xref: /cloud-hypervisor/hypervisor/src/mshv/mod.rs (revision 5641e3a283db4149052b1e9278c640bcef8a000e)
1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
2 //
3 // Copyright © 2020, Microsoft Corporation
4 //
5 
6 use crate::arch::emulator::{PlatformEmulator, PlatformError};
7 
8 #[cfg(target_arch = "x86_64")]
9 use crate::arch::x86::emulator::{Emulator, EmulatorCpuState};
10 use crate::cpu;
11 use crate::cpu::Vcpu;
12 use crate::hypervisor;
13 use crate::vec_with_array_field;
14 use crate::vm::{self, InterruptSourceConfig, VmOps};
15 use crate::HypervisorType;
16 pub use mshv_bindings::*;
17 use mshv_ioctls::{set_registers_64, Mshv, NoDatamatch, VcpuFd, VmFd, VmType};
18 use std::any::Any;
19 use std::collections::HashMap;
20 use std::sync::{Arc, RwLock};
21 use vfio_ioctls::VfioDeviceFd;
22 use vm::DataMatch;
23 
24 #[cfg(feature = "sev_snp")]
25 mod snp_constants;
26 // x86_64 dependencies
27 #[cfg(target_arch = "x86_64")]
28 pub mod x86_64;
29 #[cfg(feature = "sev_snp")]
30 use snp_constants::*;
31 
32 use crate::{
33     ClockData, CpuState, IoEventAddress, IrqRoutingEntry, MpState, UserMemoryRegion,
34     USER_MEMORY_REGION_EXECUTE, USER_MEMORY_REGION_READ, USER_MEMORY_REGION_WRITE,
35 };
36 #[cfg(feature = "sev_snp")]
37 use igvm_defs::IGVM_VHS_SNP_ID_BLOCK;
38 use vmm_sys_util::eventfd::EventFd;
39 #[cfg(target_arch = "x86_64")]
40 pub use x86_64::VcpuMshvState;
41 #[cfg(target_arch = "x86_64")]
42 pub use x86_64::*;
43 
44 #[cfg(target_arch = "x86_64")]
45 use std::fs::File;
46 use std::os::unix::io::AsRawFd;
47 
48 #[cfg(target_arch = "x86_64")]
49 use crate::arch::x86::{CpuIdEntry, FpuState, MsrEntry};
50 
51 const DIRTY_BITMAP_CLEAR_DIRTY: u64 = 0x4;
52 const DIRTY_BITMAP_SET_DIRTY: u64 = 0x8;
53 
54 ///
55 /// Export generically-named wrappers of mshv-bindings for Unix-based platforms
56 ///
57 pub use {
58     mshv_bindings::mshv_create_device as CreateDevice,
59     mshv_bindings::mshv_device_attr as DeviceAttr, mshv_ioctls::DeviceFd,
60 };
61 
62 pub const PAGE_SHIFT: usize = 12;
63 
64 impl From<mshv_user_mem_region> for UserMemoryRegion {
65     fn from(region: mshv_user_mem_region) -> Self {
66         let mut flags: u32 = 0;
67         if region.flags & HV_MAP_GPA_READABLE != 0 {
68             flags |= USER_MEMORY_REGION_READ;
69         }
70         if region.flags & HV_MAP_GPA_WRITABLE != 0 {
71             flags |= USER_MEMORY_REGION_WRITE;
72         }
73         if region.flags & HV_MAP_GPA_EXECUTABLE != 0 {
74             flags |= USER_MEMORY_REGION_EXECUTE;
75         }
76 
77         UserMemoryRegion {
78             guest_phys_addr: (region.guest_pfn << PAGE_SHIFT as u64)
79                 + (region.userspace_addr & ((1 << PAGE_SHIFT) - 1)),
80             memory_size: region.size,
81             userspace_addr: region.userspace_addr,
82             flags,
83             ..Default::default()
84         }
85     }
86 }
87 
88 impl From<UserMemoryRegion> for mshv_user_mem_region {
89     fn from(region: UserMemoryRegion) -> Self {
90         let mut flags: u32 = 0;
91         if region.flags & USER_MEMORY_REGION_READ != 0 {
92             flags |= HV_MAP_GPA_READABLE;
93         }
94         if region.flags & USER_MEMORY_REGION_WRITE != 0 {
95             flags |= HV_MAP_GPA_WRITABLE;
96         }
97         if region.flags & USER_MEMORY_REGION_EXECUTE != 0 {
98             flags |= HV_MAP_GPA_EXECUTABLE;
99         }
100 
101         mshv_user_mem_region {
102             guest_pfn: region.guest_phys_addr >> PAGE_SHIFT,
103             size: region.memory_size,
104             userspace_addr: region.userspace_addr,
105             flags,
106         }
107     }
108 }
109 
110 impl From<mshv_ioctls::IoEventAddress> for IoEventAddress {
111     fn from(a: mshv_ioctls::IoEventAddress) -> Self {
112         match a {
113             mshv_ioctls::IoEventAddress::Pio(x) => Self::Pio(x),
114             mshv_ioctls::IoEventAddress::Mmio(x) => Self::Mmio(x),
115         }
116     }
117 }
118 
119 impl From<IoEventAddress> for mshv_ioctls::IoEventAddress {
120     fn from(a: IoEventAddress) -> Self {
121         match a {
122             IoEventAddress::Pio(x) => Self::Pio(x),
123             IoEventAddress::Mmio(x) => Self::Mmio(x),
124         }
125     }
126 }
127 
128 impl From<VcpuMshvState> for CpuState {
129     fn from(s: VcpuMshvState) -> Self {
130         CpuState::Mshv(s)
131     }
132 }
133 
134 impl From<CpuState> for VcpuMshvState {
135     fn from(s: CpuState) -> Self {
136         match s {
137             CpuState::Mshv(s) => s,
138             /* Needed in case other hypervisors are enabled */
139             #[allow(unreachable_patterns)]
140             _ => panic!("CpuState is not valid"),
141         }
142     }
143 }
144 
145 impl From<mshv_msi_routing_entry> for IrqRoutingEntry {
146     fn from(s: mshv_msi_routing_entry) -> Self {
147         IrqRoutingEntry::Mshv(s)
148     }
149 }
150 
151 impl From<IrqRoutingEntry> for mshv_msi_routing_entry {
152     fn from(e: IrqRoutingEntry) -> Self {
153         match e {
154             IrqRoutingEntry::Mshv(e) => e,
155             /* Needed in case other hypervisors are enabled */
156             #[allow(unreachable_patterns)]
157             _ => panic!("IrqRoutingEntry is not valid"),
158         }
159     }
160 }
161 
162 struct MshvDirtyLogSlot {
163     guest_pfn: u64,
164     memory_size: u64,
165 }
166 
167 /// Wrapper over mshv system ioctls.
168 pub struct MshvHypervisor {
169     mshv: Mshv,
170 }
171 
172 impl MshvHypervisor {
173     #[cfg(target_arch = "x86_64")]
174     ///
175     /// Retrieve the list of MSRs supported by MSHV.
176     ///
177     fn get_msr_list(&self) -> hypervisor::Result<MsrList> {
178         self.mshv
179             .get_msr_index_list()
180             .map_err(|e| hypervisor::HypervisorError::GetMsrList(e.into()))
181     }
182 }
183 
184 impl MshvHypervisor {
185     /// Create a hypervisor based on Mshv
186     #[allow(clippy::new_ret_no_self)]
187     pub fn new() -> hypervisor::Result<Arc<dyn hypervisor::Hypervisor>> {
188         let mshv_obj =
189             Mshv::new().map_err(|e| hypervisor::HypervisorError::HypervisorCreate(e.into()))?;
190         Ok(Arc::new(MshvHypervisor { mshv: mshv_obj }))
191     }
192     /// Check if the hypervisor is available
193     pub fn is_available() -> hypervisor::Result<bool> {
194         match std::fs::metadata("/dev/mshv") {
195             Ok(_) => Ok(true),
196             Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false),
197             Err(err) => Err(hypervisor::HypervisorError::HypervisorAvailableCheck(
198                 err.into(),
199             )),
200         }
201     }
202 }
203 
204 /// Implementation of Hypervisor trait for Mshv
205 ///
206 /// # Examples
207 ///
208 /// ```
209 /// # use hypervisor::mshv::MshvHypervisor;
210 /// # use std::sync::Arc;
211 /// let mshv = MshvHypervisor::new().unwrap();
212 /// let hypervisor = Arc::new(mshv);
213 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
214 /// ```
215 impl hypervisor::Hypervisor for MshvHypervisor {
216     ///
217     /// Returns the type of the hypervisor
218     ///
219     fn hypervisor_type(&self) -> HypervisorType {
220         HypervisorType::Mshv
221     }
222 
223     fn create_vm_with_type(&self, vm_type: u64) -> hypervisor::Result<Arc<dyn crate::Vm>> {
224         let mshv_vm_type: VmType = match VmType::try_from(vm_type) {
225             Ok(vm_type) => vm_type,
226             Err(_) => return Err(hypervisor::HypervisorError::UnsupportedVmType()),
227         };
228         let fd: VmFd;
229         loop {
230             match self.mshv.create_vm_with_type(mshv_vm_type) {
231                 Ok(res) => fd = res,
232                 Err(e) => {
233                     if e.errno() == libc::EINTR {
234                         // If the error returned is EINTR, which means the
235                         // ioctl has been interrupted, we have to retry as
236                         // this can't be considered as a regular error.
237                         continue;
238                     } else {
239                         return Err(hypervisor::HypervisorError::VmCreate(e.into()));
240                     }
241                 }
242             }
243             break;
244         }
245 
246         // Set additional partition property for SEV-SNP partition.
247         if mshv_vm_type == VmType::Snp {
248             let snp_policy = snp::get_default_snp_guest_policy();
249             let vmgexit_offloads = snp::get_default_vmgexit_offload_features();
250             // SAFETY: access union fields
251             unsafe {
252                 debug!(
253                     "Setting the partition isolation policy as: 0x{:x}",
254                     snp_policy.as_uint64
255                 );
256                 fd.set_partition_property(
257                     hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_POLICY,
258                     snp_policy.as_uint64,
259                 )
260                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
261                 debug!(
262                     "Setting the partition property to enable VMGEXIT offloads as : 0x{:x}",
263                     vmgexit_offloads.as_uint64
264                 );
265                 fd.set_partition_property(
266                     hv_partition_property_code_HV_PARTITION_PROPERTY_SEV_VMGEXIT_OFFLOADS,
267                     vmgexit_offloads.as_uint64,
268                 )
269                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
270             }
271         }
272 
273         // Default Microsoft Hypervisor behavior for unimplemented MSR is to
274         // send a fault to the guest if it tries to access it. It is possible
275         // to override this behavior with a more suitable option i.e., ignore
276         // writes from the guest and return zero in attempt to read unimplemented
277         // MSR.
278         fd.set_partition_property(
279             hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
280             hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64,
281         )
282         .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
283 
284         let msr_list = self.get_msr_list()?;
285         let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize;
286         let mut msrs: Vec<MsrEntry> = vec![
287             MsrEntry {
288                 ..Default::default()
289             };
290             num_msrs
291         ];
292         let indices = msr_list.as_slice();
293         for (pos, index) in indices.iter().enumerate() {
294             msrs[pos].index = *index;
295         }
296         let vm_fd = Arc::new(fd);
297 
298         Ok(Arc::new(MshvVm {
299             fd: vm_fd,
300             msrs,
301             dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
302         }))
303     }
304 
305     /// Create a mshv vm object and return the object as Vm trait object
306     ///
307     /// # Examples
308     ///
309     /// ```
310     /// # extern crate hypervisor;
311     /// # use hypervisor::mshv::MshvHypervisor;
312     /// use hypervisor::mshv::MshvVm;
313     /// let hypervisor = MshvHypervisor::new().unwrap();
314     /// let vm = hypervisor.create_vm().unwrap();
315     /// ```
316     fn create_vm(&self) -> hypervisor::Result<Arc<dyn vm::Vm>> {
317         let vm_type = 0;
318         self.create_vm_with_type(vm_type)
319     }
320     ///
321     /// Get the supported CpuID
322     ///
323     fn get_supported_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> {
324         Ok(Vec::new())
325     }
326 
327     /// Get maximum number of vCPUs
328     fn get_max_vcpus(&self) -> u32 {
329         // TODO: Using HV_MAXIMUM_PROCESSORS would be better
330         // but the ioctl API is limited to u8
331         256
332     }
333 }
334 
335 /// Vcpu struct for Microsoft Hypervisor
336 pub struct MshvVcpu {
337     fd: VcpuFd,
338     vp_index: u8,
339     cpuid: Vec<CpuIdEntry>,
340     msrs: Vec<MsrEntry>,
341     vm_ops: Option<Arc<dyn vm::VmOps>>,
342     #[cfg(feature = "sev_snp")]
343     vm_fd: Arc<VmFd>,
344 }
345 
346 /// Implementation of Vcpu trait for Microsoft Hypervisor
347 ///
348 /// # Examples
349 ///
350 /// ```
351 /// # use hypervisor::mshv::MshvHypervisor;
352 /// # use std::sync::Arc;
353 /// let mshv = MshvHypervisor::new().unwrap();
354 /// let hypervisor = Arc::new(mshv);
355 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
356 /// let vcpu = vm.create_vcpu(0, None).unwrap();
357 /// ```
358 impl cpu::Vcpu for MshvVcpu {
359     #[cfg(target_arch = "x86_64")]
360     ///
361     /// Returns the vCPU general purpose registers.
362     ///
363     fn get_regs(&self) -> cpu::Result<crate::arch::x86::StandardRegisters> {
364         Ok(self
365             .fd
366             .get_regs()
367             .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))?
368             .into())
369     }
370 
371     #[cfg(target_arch = "x86_64")]
372     ///
373     /// Sets the vCPU general purpose registers.
374     ///
375     fn set_regs(&self, regs: &crate::arch::x86::StandardRegisters) -> cpu::Result<()> {
376         let regs = (*regs).into();
377         self.fd
378             .set_regs(&regs)
379             .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into()))
380     }
381 
382     #[cfg(target_arch = "x86_64")]
383     ///
384     /// Returns the vCPU special registers.
385     ///
386     fn get_sregs(&self) -> cpu::Result<crate::arch::x86::SpecialRegisters> {
387         Ok(self
388             .fd
389             .get_sregs()
390             .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))?
391             .into())
392     }
393 
394     #[cfg(target_arch = "x86_64")]
395     ///
396     /// Sets the vCPU special registers.
397     ///
398     fn set_sregs(&self, sregs: &crate::arch::x86::SpecialRegisters) -> cpu::Result<()> {
399         let sregs = (*sregs).into();
400         self.fd
401             .set_sregs(&sregs)
402             .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into()))
403     }
404 
405     #[cfg(target_arch = "x86_64")]
406     ///
407     /// Returns the floating point state (FPU) from the vCPU.
408     ///
409     fn get_fpu(&self) -> cpu::Result<FpuState> {
410         Ok(self
411             .fd
412             .get_fpu()
413             .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))?
414             .into())
415     }
416 
417     #[cfg(target_arch = "x86_64")]
418     ///
419     /// Set the floating point state (FPU) of a vCPU.
420     ///
421     fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> {
422         let fpu: mshv_bindings::FloatingPointUnit = (*fpu).clone().into();
423         self.fd
424             .set_fpu(&fpu)
425             .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into()))
426     }
427 
428     #[cfg(target_arch = "x86_64")]
429     ///
430     /// Returns the model-specific registers (MSR) for this vCPU.
431     ///
432     fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> {
433         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
434         let mut mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
435         let succ = self
436             .fd
437             .get_msrs(&mut mshv_msrs)
438             .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
439 
440         msrs[..succ].copy_from_slice(
441             &mshv_msrs.as_slice()[..succ]
442                 .iter()
443                 .map(|e| (*e).into())
444                 .collect::<Vec<MsrEntry>>(),
445         );
446 
447         Ok(succ)
448     }
449 
450     #[cfg(target_arch = "x86_64")]
451     ///
452     /// Setup the model-specific registers (MSR) for this vCPU.
453     /// Returns the number of MSR entries actually written.
454     ///
455     fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> {
456         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
457         let mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
458         self.fd
459             .set_msrs(&mshv_msrs)
460             .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
461     }
462 
463     #[cfg(target_arch = "x86_64")]
464     ///
465     /// X86 specific call to enable HyperV SynIC
466     ///
467     fn enable_hyperv_synic(&self) -> cpu::Result<()> {
468         /* We always have SynIC enabled on MSHV */
469         Ok(())
470     }
471 
472     #[allow(non_upper_case_globals)]
473     fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> {
474         let hv_message: hv_message = hv_message::default();
475         match self.fd.run(hv_message) {
476             Ok(x) => match x.header.message_type {
477                 hv_message_type_HVMSG_X64_HALT => {
478                     debug!("HALT");
479                     Ok(cpu::VmExit::Reset)
480                 }
481                 hv_message_type_HVMSG_UNRECOVERABLE_EXCEPTION => {
482                     warn!("TRIPLE FAULT");
483                     Ok(cpu::VmExit::Shutdown)
484                 }
485                 hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT => {
486                     let info = x.to_ioport_info().unwrap();
487                     let access_info = info.access_info;
488                     // SAFETY: access_info is valid, otherwise we won't be here
489                     let len = unsafe { access_info.__bindgen_anon_1.access_size() } as usize;
490                     let is_write = info.header.intercept_access_type == 1;
491                     let port = info.port_number;
492                     let mut data: [u8; 4] = [0; 4];
493                     let mut ret_rax = info.rax;
494 
495                     /*
496                      * XXX: Ignore QEMU fw_cfg (0x5xx) and debug console (0x402) ports.
497                      *
498                      * Cloud Hypervisor doesn't support fw_cfg at the moment. It does support 0x402
499                      * under the "fwdebug" feature flag. But that feature is not enabled by default
500                      * and is considered legacy.
501                      *
502                      * OVMF unconditionally pokes these IO ports with string IO.
503                      *
504                      * Instead of trying to implement string IO support now which does not do much
505                      * now, skip those ports explicitly to avoid panicking.
506                      *
507                      * Proper string IO support can be added once we gain the ability to translate
508                      * guest virtual addresses to guest physical addresses on MSHV.
509                      */
510                     match port {
511                         0x402 | 0x510 | 0x511 | 0x514 => {
512                             let insn_len = info.header.instruction_length() as u64;
513 
514                             /* Advance RIP and update RAX */
515                             let arr_reg_name_value = [
516                                 (
517                                     hv_register_name_HV_X64_REGISTER_RIP,
518                                     info.header.rip + insn_len,
519                                 ),
520                                 (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
521                             ];
522                             set_registers_64!(self.fd, arr_reg_name_value)
523                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
524                             return Ok(cpu::VmExit::Ignore);
525                         }
526                         _ => {}
527                     }
528 
529                     assert!(
530                         // SAFETY: access_info is valid, otherwise we won't be here
531                         (unsafe { access_info.__bindgen_anon_1.string_op() } != 1),
532                         "String IN/OUT not supported"
533                     );
534                     assert!(
535                         // SAFETY: access_info is valid, otherwise we won't be here
536                         (unsafe { access_info.__bindgen_anon_1.rep_prefix() } != 1),
537                         "Rep IN/OUT not supported"
538                     );
539 
540                     if is_write {
541                         let data = (info.rax as u32).to_le_bytes();
542                         if let Some(vm_ops) = &self.vm_ops {
543                             vm_ops
544                                 .pio_write(port.into(), &data[0..len])
545                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
546                         }
547                     } else {
548                         if let Some(vm_ops) = &self.vm_ops {
549                             vm_ops
550                                 .pio_read(port.into(), &mut data[0..len])
551                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
552                         }
553 
554                         let v = u32::from_le_bytes(data);
555                         /* Preserve high bits in EAX but clear out high bits in RAX */
556                         let mask = 0xffffffff >> (32 - len * 8);
557                         let eax = (info.rax as u32 & !mask) | (v & mask);
558                         ret_rax = eax as u64;
559                     }
560 
561                     let insn_len = info.header.instruction_length() as u64;
562 
563                     /* Advance RIP and update RAX */
564                     let arr_reg_name_value = [
565                         (
566                             hv_register_name_HV_X64_REGISTER_RIP,
567                             info.header.rip + insn_len,
568                         ),
569                         (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
570                     ];
571                     set_registers_64!(self.fd, arr_reg_name_value)
572                         .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
573                     Ok(cpu::VmExit::Ignore)
574                 }
575                 hv_message_type_HVMSG_UNMAPPED_GPA => {
576                     let info = x.to_memory_info().unwrap();
577                     let insn_len = info.instruction_byte_count as usize;
578                     assert!(insn_len > 0 && insn_len <= 16);
579 
580                     let mut context = MshvEmulatorContext {
581                         vcpu: self,
582                         map: (info.guest_virtual_address, info.guest_physical_address),
583                     };
584 
585                     // Create a new emulator.
586                     let mut emul = Emulator::new(&mut context);
587 
588                     // Emulate the trapped instruction, and only the first one.
589                     let new_state = emul
590                         .emulate_first_insn(self.vp_index as usize, &info.instruction_bytes)
591                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
592 
593                     // Set CPU state back.
594                     context
595                         .set_cpu_state(self.vp_index as usize, new_state)
596                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
597 
598                     Ok(cpu::VmExit::Ignore)
599                 }
600                 hv_message_type_HVMSG_UNACCEPTED_GPA => {
601                     let info = x.to_memory_info().unwrap();
602                     let gva = info.guest_virtual_address;
603                     let gpa = info.guest_physical_address;
604 
605                     Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
606                         "Unhandled VCPU exit: Unaccepted GPA({:x}) found at GVA({:x})",
607                         gpa,
608                         gva,
609                     )))
610                 }
611                 hv_message_type_HVMSG_X64_CPUID_INTERCEPT => {
612                     let info = x.to_cpuid_info().unwrap();
613                     debug!("cpuid eax: {:x}", { info.rax });
614                     Ok(cpu::VmExit::Ignore)
615                 }
616                 hv_message_type_HVMSG_X64_MSR_INTERCEPT => {
617                     let info = x.to_msr_info().unwrap();
618                     if info.header.intercept_access_type == 0 {
619                         debug!("msr read: {:x}", { info.msr_number });
620                     } else {
621                         debug!("msr write: {:x}", { info.msr_number });
622                     }
623                     Ok(cpu::VmExit::Ignore)
624                 }
625                 hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT => {
626                     //TODO: Handler for VMCALL here.
627                     let info = x.to_exception_info().unwrap();
628                     debug!("Exception Info {:?}", { info.exception_vector });
629                     Ok(cpu::VmExit::Ignore)
630                 }
631                 hv_message_type_HVMSG_X64_APIC_EOI => {
632                     let info = x.to_apic_eoi_info().unwrap();
633                     // The kernel should dispatch the EOI to the correct thread.
634                     // Check the VP index is the same as the one we have.
635                     assert!(info.vp_index == self.vp_index as u32);
636                     // The interrupt vector in info is u32, but x86 only supports 256 vectors.
637                     // There is no good way to recover from this if the hypervisor messes around.
638                     // Just unwrap.
639                     Ok(cpu::VmExit::IoapicEoi(
640                         info.interrupt_vector.try_into().unwrap(),
641                     ))
642                 }
643                 #[cfg(feature = "sev_snp")]
644                 hv_message_type_HVMSG_X64_SEV_VMGEXIT_INTERCEPT => {
645                     let info = x.to_vmg_intercept_info().unwrap();
646                     let ghcb_data = info.ghcb_msr >> GHCB_INFO_BIT_WIDTH;
647                     let ghcb_msr = svm_ghcb_msr {
648                         as_uint64: info.ghcb_msr,
649                     };
650                     // SAFETY: Accessing a union element from bindgen generated bindings.
651                     let ghcb_op = unsafe { ghcb_msr.__bindgen_anon_2.ghcb_info() as u32 };
652                     // Sanity check on the header fields before handling other operations.
653                     assert!(info.header.intercept_access_type == HV_INTERCEPT_ACCESS_EXECUTE as u8);
654 
655                     match ghcb_op {
656                         GHCB_INFO_HYP_FEATURE_REQUEST => {
657                             // Pre-condition: GHCB data must be zero
658                             assert!(ghcb_data == 0);
659                             let mut ghcb_response = GHCB_INFO_HYP_FEATURE_RESPONSE as u64;
660                             // Indicate support for basic SEV-SNP features
661                             ghcb_response |=
662                                 (GHCB_HYP_FEATURE_SEV_SNP << GHCB_INFO_BIT_WIDTH) as u64;
663                             // Indicate support for SEV-SNP AP creation
664                             ghcb_response |= (GHCB_HYP_FEATURE_SEV_SNP_AP_CREATION
665                                 << GHCB_INFO_BIT_WIDTH)
666                                 as u64;
667                             debug!(
668                                 "GHCB_INFO_HYP_FEATURE_REQUEST: Supported features: {:0x}",
669                                 ghcb_response
670                             );
671                             let arr_reg_name_value =
672                                 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)];
673                             set_registers_64!(self.fd, arr_reg_name_value)
674                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
675                         }
676                         GHCB_INFO_REGISTER_REQUEST => {
677                             let mut ghcb_gpa = hv_x64_register_sev_ghcb::default();
678                             // SAFETY: Accessing a union element from bindgen generated bindings.
679                             unsafe {
680                                 ghcb_gpa.__bindgen_anon_1.set_enabled(1);
681                                 ghcb_gpa
682                                     .__bindgen_anon_1
683                                     .set_page_number(ghcb_msr.__bindgen_anon_2.gpa_page_number());
684                             }
685                             // SAFETY: Accessing a union element from bindgen generated bindings.
686                             let reg_name_value = unsafe {
687                                 [(
688                                     hv_register_name_HV_X64_REGISTER_SEV_GHCB_GPA,
689                                     ghcb_gpa.as_uint64,
690                                 )]
691                             };
692 
693                             set_registers_64!(self.fd, reg_name_value)
694                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
695 
696                             let mut resp_ghcb_msr = svm_ghcb_msr::default();
697                             // SAFETY: Accessing a union element from bindgen generated bindings.
698                             unsafe {
699                                 resp_ghcb_msr
700                                     .__bindgen_anon_2
701                                     .set_ghcb_info(GHCB_INFO_REGISTER_RESPONSE as u64);
702                                 resp_ghcb_msr.__bindgen_anon_2.set_gpa_page_number(
703                                     ghcb_msr.__bindgen_anon_2.gpa_page_number(),
704                                 );
705                             }
706                             // SAFETY: Accessing a union element from bindgen generated bindings.
707                             let reg_name_value = unsafe {
708                                 [(
709                                     hv_register_name_HV_X64_REGISTER_GHCB,
710                                     resp_ghcb_msr.as_uint64,
711                                 )]
712                             };
713 
714                             set_registers_64!(self.fd, reg_name_value)
715                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
716                         }
717                         GHCB_INFO_SEV_INFO_REQUEST => {
718                             let sev_cpuid_function = 0x8000_001F;
719                             let cpu_leaf = self
720                                 .fd
721                                 .get_cpuid_values(sev_cpuid_function, 0, 0, 0)
722                                 .unwrap();
723                             let ebx = cpu_leaf[1];
724                             // First 6-byte of EBX represents page table encryption bit number
725                             let pbit_encryption = (ebx & 0x3f) as u8;
726                             let mut ghcb_response = GHCB_INFO_SEV_INFO_RESPONSE as u64;
727 
728                             // GHCBData[63:48] specifies the maximum GHCB protocol version supported
729                             ghcb_response |= (GHCB_PROTOCOL_VERSION_MAX as u64) << 48;
730                             // GHCBData[47:32] specifies the minimum GHCB protocol version supported
731                             ghcb_response |= (GHCB_PROTOCOL_VERSION_MIN as u64) << 32;
732                             // GHCBData[31:24] specifies the SEV page table encryption bit number.
733                             ghcb_response |= (pbit_encryption as u64) << 24;
734 
735                             let arr_reg_name_value =
736                                 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)];
737                             set_registers_64!(self.fd, arr_reg_name_value)
738                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
739                         }
740                         GHCB_INFO_NORMAL => {
741                             let exit_code =
742                                 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_code as u32;
743                             // SAFETY: Accessing a union element from bindgen generated bindings.
744                             let pfn = unsafe { ghcb_msr.__bindgen_anon_2.gpa_page_number() };
745                             let ghcb_gpa = pfn << GHCB_INFO_BIT_WIDTH;
746                             match exit_code {
747                                 SVM_EXITCODE_HV_DOORBELL_PAGE => {
748                                     let exit_info1 =
749                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32;
750                                     match exit_info1 {
751                                         SVM_NAE_HV_DOORBELL_PAGE_GET_PREFERRED => {
752                                             // Hypervisor does not have any preference for doorbell GPA.
753                                             let preferred_doorbell_gpa: u64 = 0xFFFFFFFFFFFFFFFF;
754                                             let mut swei2_rw_gpa_arg =
755                                                 mshv_bindings::mshv_read_write_gpa {
756                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
757                                                     byte_count: std::mem::size_of::<u64>() as u32,
758                                                     ..Default::default()
759                                                 };
760                                             swei2_rw_gpa_arg.data.copy_from_slice(
761                                                 &preferred_doorbell_gpa.to_le_bytes(),
762                                             );
763                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
764                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
765                                             )?;
766                                         }
767                                         SVM_NAE_HV_DOORBELL_PAGE_SET => {
768                                             let exit_info2 = info
769                                                 .__bindgen_anon_2
770                                                 .__bindgen_anon_1
771                                                 .sw_exit_info2;
772                                             let mut ghcb_doorbell_gpa =
773                                                 hv_x64_register_sev_hv_doorbell::default();
774                                             // SAFETY: Accessing a union element from bindgen generated bindings.
775                                             unsafe {
776                                                 ghcb_doorbell_gpa.__bindgen_anon_1.set_enabled(1);
777                                                 ghcb_doorbell_gpa
778                                                     .__bindgen_anon_1
779                                                     .set_page_number(exit_info2 >> PAGE_SHIFT);
780                                             }
781                                             // SAFETY: Accessing a union element from bindgen generated bindings.
782                                             let reg_names = unsafe {
783                                                 [(
784                                                     hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA,
785                                                     ghcb_doorbell_gpa.as_uint64,
786                                                 )]
787                                             };
788                                             set_registers_64!(self.fd, reg_names).map_err(|e| {
789                                                 cpu::HypervisorCpuError::SetRegister(e.into())
790                                             })?;
791 
792                                             let mut swei2_rw_gpa_arg =
793                                                 mshv_bindings::mshv_read_write_gpa {
794                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
795                                                     byte_count: std::mem::size_of::<u64>() as u32,
796                                                     ..Default::default()
797                                                 };
798                                             swei2_rw_gpa_arg.data[0..8]
799                                                 .copy_from_slice(&exit_info2.to_le_bytes());
800                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
801                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
802                                             )?;
803 
804                                             // Clear the SW_EXIT_INFO1 register to indicate no error
805                                             let mut swei1_rw_gpa_arg =
806                                                 mshv_bindings::mshv_read_write_gpa {
807                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO1_OFFSET,
808                                                     byte_count: std::mem::size_of::<u64>() as u32,
809                                                     ..Default::default()
810                                                 };
811                                             self.fd.gpa_write(&mut swei1_rw_gpa_arg).map_err(
812                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
813                                             )?;
814                                         }
815                                         SVM_NAE_HV_DOORBELL_PAGE_QUERY => {
816                                             let mut reg_assocs = [ hv_register_assoc {
817                                                 name: hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA,
818                                                 ..Default::default()
819                                             } ];
820                                             self.fd.get_reg(&mut reg_assocs).unwrap();
821                                             // SAFETY: Accessing a union element from bindgen generated bindings.
822                                             let doorbell_gpa = unsafe { reg_assocs[0].value.reg64 };
823                                             let mut swei2_rw_gpa_arg =
824                                                 mshv_bindings::mshv_read_write_gpa {
825                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
826                                                     byte_count: std::mem::size_of::<u64>() as u32,
827                                                     ..Default::default()
828                                                 };
829                                             swei2_rw_gpa_arg
830                                                 .data
831                                                 .copy_from_slice(&doorbell_gpa.to_le_bytes());
832                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
833                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
834                                             )?;
835                                         }
836                                         SVM_NAE_HV_DOORBELL_PAGE_CLEAR => {
837                                             let mut swei2_rw_gpa_arg =
838                                                 mshv_bindings::mshv_read_write_gpa {
839                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
840                                                     byte_count: std::mem::size_of::<u64>() as u32,
841                                                     ..Default::default()
842                                                 };
843                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
844                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
845                                             )?;
846                                         }
847                                         _ => {
848                                             panic!(
849                                                 "SVM_EXITCODE_HV_DOORBELL_PAGE: Unhandled exit code: {:0x}",
850                                                 exit_info1
851                                             );
852                                         }
853                                     }
854                                 }
855                                 SVM_EXITCODE_SNP_EXTENDED_GUEST_REQUEST => {
856                                     warn!("Fetching extended guest request is not supported");
857                                     // Extended guest request is not supported by the Hypervisor
858                                     // Returning the error to the guest
859                                     // 0x6 means `The NAE event was not valid`
860                                     // Reference: GHCB Spec, page 42
861                                     let value: u64 = 0x6;
862                                     let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
863                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
864                                         byte_count: std::mem::size_of::<u64>() as u32,
865                                         ..Default::default()
866                                     };
867                                     swei2_rw_gpa_arg.data.copy_from_slice(&value.to_le_bytes());
868                                     self.fd
869                                         .gpa_write(&mut swei2_rw_gpa_arg)
870                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
871                                 }
872                                 SVM_EXITCODE_IOIO_PROT => {
873                                     let exit_info1 =
874                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32;
875                                     let port_info = hv_sev_vmgexit_port_info {
876                                         as_uint32: exit_info1,
877                                     };
878 
879                                     let port =
880                                         // SAFETY: Accessing a union element from bindgen generated bindings.
881                                         unsafe { port_info.__bindgen_anon_1.intercepted_port() };
882                                     let mut len = 4;
883                                     // SAFETY: Accessing a union element from bindgen generated bindings.
884                                     unsafe {
885                                         if port_info.__bindgen_anon_1.operand_size_16bit() == 1 {
886                                             len = 2;
887                                         } else if port_info.__bindgen_anon_1.operand_size_8bit()
888                                             == 1
889                                         {
890                                             len = 1;
891                                         }
892                                     }
893                                     let is_write =
894                                         // SAFETY: Accessing a union element from bindgen generated bindings.
895                                         unsafe { port_info.__bindgen_anon_1.access_type() == 0 };
896                                     let mut rax_rw_gpa_arg: mshv_read_write_gpa =
897                                         mshv_bindings::mshv_read_write_gpa {
898                                             base_gpa: ghcb_gpa + GHCB_RAX_OFFSET,
899                                             byte_count: std::mem::size_of::<u64>() as u32,
900                                             ..Default::default()
901                                         };
902                                     self.fd
903                                         .gpa_read(&mut rax_rw_gpa_arg)
904                                         .map_err(|e| cpu::HypervisorCpuError::GpaRead(e.into()))?;
905 
906                                     if is_write {
907                                         if let Some(vm_ops) = &self.vm_ops {
908                                             vm_ops
909                                                 .pio_write(
910                                                     port.into(),
911                                                     &rax_rw_gpa_arg.data[0..len],
912                                                 )
913                                                 .map_err(|e| {
914                                                     cpu::HypervisorCpuError::RunVcpu(e.into())
915                                                 })?;
916                                         }
917                                     } else {
918                                         if let Some(vm_ops) = &self.vm_ops {
919                                             vm_ops
920                                                 .pio_read(
921                                                     port.into(),
922                                                     &mut rax_rw_gpa_arg.data[0..len],
923                                                 )
924                                                 .map_err(|e| {
925                                                     cpu::HypervisorCpuError::RunVcpu(e.into())
926                                                 })?;
927                                         }
928 
929                                         self.fd.gpa_write(&mut rax_rw_gpa_arg).map_err(|e| {
930                                             cpu::HypervisorCpuError::GpaWrite(e.into())
931                                         })?;
932                                     }
933 
934                                     // Clear the SW_EXIT_INFO1 register to indicate no error
935                                     let mut swei1_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
936                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO1_OFFSET,
937                                         byte_count: std::mem::size_of::<u64>() as u32,
938                                         ..Default::default()
939                                     };
940                                     self.fd
941                                         .gpa_write(&mut swei1_rw_gpa_arg)
942                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
943                                 }
944                                 SVM_EXITCODE_MMIO_READ => {
945                                     let src_gpa =
946                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
947                                     let dst_gpa = info.__bindgen_anon_2.__bindgen_anon_1.sw_scratch;
948                                     let data_len =
949                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2
950                                             as usize;
951                                     // Sanity check to make sure data len is within supported range.
952                                     assert!(data_len <= 0x8);
953 
954                                     let mut data: Vec<u8> = vec![0; data_len];
955                                     if let Some(vm_ops) = &self.vm_ops {
956                                         vm_ops.mmio_read(src_gpa, &mut data[0..data_len]).map_err(
957                                             |e| cpu::HypervisorCpuError::RunVcpu(e.into()),
958                                         )?;
959                                     }
960                                     let mut arg: mshv_read_write_gpa =
961                                         mshv_bindings::mshv_read_write_gpa {
962                                             base_gpa: dst_gpa,
963                                             byte_count: data_len as u32,
964                                             ..Default::default()
965                                         };
966                                     arg.data[0..data_len].copy_from_slice(&data);
967 
968                                     self.fd
969                                         .gpa_write(&mut arg)
970                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
971                                 }
972                                 SVM_EXITCODE_MMIO_WRITE => {
973                                     let dst_gpa =
974                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
975                                     let src_gpa = info.__bindgen_anon_2.__bindgen_anon_1.sw_scratch;
976                                     let data_len =
977                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2
978                                             as usize;
979                                     // Sanity check to make sure data len is within supported range.
980                                     assert!(data_len <= 0x8);
981                                     let mut arg: mshv_read_write_gpa =
982                                         mshv_bindings::mshv_read_write_gpa {
983                                             base_gpa: src_gpa,
984                                             byte_count: data_len as u32,
985                                             ..Default::default()
986                                         };
987 
988                                     self.fd
989                                         .gpa_read(&mut arg)
990                                         .map_err(|e| cpu::HypervisorCpuError::GpaRead(e.into()))?;
991 
992                                     if let Some(vm_ops) = &self.vm_ops {
993                                         vm_ops
994                                             .mmio_write(dst_gpa, &arg.data[0..data_len])
995                                             .map_err(|e| {
996                                                 cpu::HypervisorCpuError::RunVcpu(e.into())
997                                             })?;
998                                     }
999                                 }
1000                                 SVM_EXITCODE_SNP_GUEST_REQUEST => {
1001                                     let req_gpa =
1002                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
1003                                     let rsp_gpa =
1004                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2;
1005 
1006                                     let mshv_psp_req =
1007                                         mshv_issue_psp_guest_request { req_gpa, rsp_gpa };
1008                                     self.vm_fd
1009                                         .psp_issue_guest_request(&mshv_psp_req)
1010                                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
1011 
1012                                     debug!(
1013                                         "SNP guest request: req_gpa {:0x} rsp_gpa {:0x}",
1014                                         req_gpa, rsp_gpa
1015                                     );
1016 
1017                                     let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
1018                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
1019                                         byte_count: std::mem::size_of::<u64>() as u32,
1020                                         ..Default::default()
1021                                     };
1022                                     self.fd
1023                                         .gpa_write(&mut swei2_rw_gpa_arg)
1024                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
1025                                 }
1026                                 SVM_EXITCODE_SNP_AP_CREATION => {
1027                                     let vmsa_gpa =
1028                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2;
1029                                     let apic_id =
1030                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 >> 32;
1031                                     debug!(
1032                                         "SNP AP CREATE REQUEST with VMSA GPA {:0x}, and APIC ID {:?}",
1033                                         vmsa_gpa, apic_id
1034                                     );
1035 
1036                                     let mshv_ap_create_req = mshv_sev_snp_ap_create {
1037                                         vp_id: apic_id,
1038                                         vmsa_gpa,
1039                                     };
1040                                     self.vm_fd
1041                                         .sev_snp_ap_create(&mshv_ap_create_req)
1042                                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
1043 
1044                                     let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
1045                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
1046                                         byte_count: std::mem::size_of::<u64>() as u32,
1047                                         ..Default::default()
1048                                     };
1049                                     self.fd
1050                                         .gpa_write(&mut swei2_rw_gpa_arg)
1051                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
1052                                 }
1053                                 _ => panic!(
1054                                     "GHCB_INFO_NORMAL: Unhandled exit code: {:0x}",
1055                                     exit_code
1056                                 ),
1057                             }
1058                         }
1059                         _ => panic!("Unsupported VMGEXIT operation: {:0x}", ghcb_op),
1060                     }
1061 
1062                     Ok(cpu::VmExit::Ignore)
1063                 }
1064                 exit => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
1065                     "Unhandled VCPU exit {:?}",
1066                     exit
1067                 ))),
1068             },
1069 
1070             Err(e) => match e.errno() {
1071                 libc::EAGAIN | libc::EINTR => Ok(cpu::VmExit::Ignore),
1072                 _ => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
1073                     "VCPU error {:?}",
1074                     e
1075                 ))),
1076             },
1077         }
1078     }
1079 
1080     #[cfg(target_arch = "x86_64")]
1081     ///
1082     /// X86 specific call to setup the CPUID registers.
1083     ///
1084     fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> cpu::Result<()> {
1085         let cpuid: Vec<mshv_bindings::hv_cpuid_entry> = cpuid.iter().map(|e| (*e).into()).collect();
1086         let mshv_cpuid = <CpuId>::from_entries(&cpuid)
1087             .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?;
1088 
1089         self.fd
1090             .register_intercept_result_cpuid(&mshv_cpuid)
1091             .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
1092     }
1093 
1094     #[cfg(target_arch = "x86_64")]
1095     ///
1096     /// X86 specific call to retrieve the CPUID registers.
1097     ///
1098     fn get_cpuid2(&self, _num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
1099         Ok(self.cpuid.clone())
1100     }
1101 
1102     #[cfg(target_arch = "x86_64")]
1103     ///
1104     /// X86 specific call to retrieve cpuid leaf
1105     ///
1106     fn get_cpuid_values(
1107         &self,
1108         function: u32,
1109         index: u32,
1110         xfem: u64,
1111         xss: u64,
1112     ) -> cpu::Result<[u32; 4]> {
1113         self.fd
1114             .get_cpuid_values(function, index, xfem, xss)
1115             .map_err(|e| cpu::HypervisorCpuError::GetCpuidVales(e.into()))
1116     }
1117 
1118     #[cfg(target_arch = "x86_64")]
1119     ///
1120     /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
1121     ///
1122     fn get_lapic(&self) -> cpu::Result<crate::arch::x86::LapicState> {
1123         Ok(self
1124             .fd
1125             .get_lapic()
1126             .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))?
1127             .into())
1128     }
1129 
1130     #[cfg(target_arch = "x86_64")]
1131     ///
1132     /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
1133     ///
1134     fn set_lapic(&self, lapic: &crate::arch::x86::LapicState) -> cpu::Result<()> {
1135         let lapic: mshv_bindings::LapicState = (*lapic).clone().into();
1136         self.fd
1137             .set_lapic(&lapic)
1138             .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into()))
1139     }
1140 
1141     ///
1142     /// Returns the vcpu's current "multiprocessing state".
1143     ///
1144     fn get_mp_state(&self) -> cpu::Result<MpState> {
1145         Ok(MpState::Mshv)
1146     }
1147 
1148     ///
1149     /// Sets the vcpu's current "multiprocessing state".
1150     ///
1151     fn set_mp_state(&self, _mp_state: MpState) -> cpu::Result<()> {
1152         Ok(())
1153     }
1154 
1155     ///
1156     /// Set CPU state
1157     ///
1158     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
1159         let state: VcpuMshvState = state.clone().into();
1160         self.set_msrs(&state.msrs)?;
1161         self.set_vcpu_events(&state.vcpu_events)?;
1162         self.set_regs(&state.regs.into())?;
1163         self.set_sregs(&state.sregs.into())?;
1164         self.set_fpu(&state.fpu)?;
1165         self.set_xcrs(&state.xcrs)?;
1166         self.set_lapic(&state.lapic)?;
1167         self.set_xsave(&state.xsave)?;
1168         // These registers are global and needed to be set only for first VCPU
1169         // as Microsoft Hypervisor allows setting this regsier for only one VCPU
1170         if self.vp_index == 0 {
1171             self.fd
1172                 .set_misc_regs(&state.misc)
1173                 .map_err(|e| cpu::HypervisorCpuError::SetMiscRegs(e.into()))?
1174         }
1175         self.fd
1176             .set_debug_regs(&state.dbg)
1177             .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into()))?;
1178         Ok(())
1179     }
1180 
1181     ///
1182     /// Get CPU State
1183     ///
1184     fn state(&self) -> cpu::Result<CpuState> {
1185         let regs = self.get_regs()?;
1186         let sregs = self.get_sregs()?;
1187         let xcrs = self.get_xcrs()?;
1188         let fpu = self.get_fpu()?;
1189         let vcpu_events = self.get_vcpu_events()?;
1190         let mut msrs = self.msrs.clone();
1191         self.get_msrs(&mut msrs)?;
1192         let lapic = self.get_lapic()?;
1193         let xsave = self.get_xsave()?;
1194         let misc = self
1195             .fd
1196             .get_misc_regs()
1197             .map_err(|e| cpu::HypervisorCpuError::GetMiscRegs(e.into()))?;
1198         let dbg = self
1199             .fd
1200             .get_debug_regs()
1201             .map_err(|e| cpu::HypervisorCpuError::GetDebugRegs(e.into()))?;
1202 
1203         Ok(VcpuMshvState {
1204             msrs,
1205             vcpu_events,
1206             regs: regs.into(),
1207             sregs: sregs.into(),
1208             fpu,
1209             xcrs,
1210             lapic,
1211             dbg,
1212             xsave,
1213             misc,
1214         }
1215         .into())
1216     }
1217 
1218     #[cfg(target_arch = "x86_64")]
1219     ///
1220     /// Translate guest virtual address to guest physical address
1221     ///
1222     fn translate_gva(&self, gva: u64, flags: u64) -> cpu::Result<(u64, u32)> {
1223         let r = self
1224             .fd
1225             .translate_gva(gva, flags)
1226             .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?;
1227 
1228         let gpa = r.0;
1229         // SAFETY: r is valid, otherwise this function will have returned
1230         let result_code = unsafe { r.1.__bindgen_anon_1.result_code };
1231 
1232         Ok((gpa, result_code))
1233     }
1234 
1235     #[cfg(target_arch = "x86_64")]
1236     ///
1237     /// Return the list of initial MSR entries for a VCPU
1238     ///
1239     fn boot_msr_entries(&self) -> Vec<MsrEntry> {
1240         use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB};
1241 
1242         [
1243             msr!(msr_index::MSR_IA32_SYSENTER_CS),
1244             msr!(msr_index::MSR_IA32_SYSENTER_ESP),
1245             msr!(msr_index::MSR_IA32_SYSENTER_EIP),
1246             msr!(msr_index::MSR_STAR),
1247             msr!(msr_index::MSR_CSTAR),
1248             msr!(msr_index::MSR_LSTAR),
1249             msr!(msr_index::MSR_KERNEL_GS_BASE),
1250             msr!(msr_index::MSR_SYSCALL_MASK),
1251             msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB),
1252         ]
1253         .to_vec()
1254     }
1255 
1256     ///
1257     /// Sets the AMD specific vcpu's sev control register.
1258     ///
1259     #[cfg(feature = "sev_snp")]
1260     fn set_sev_control_register(&self, vmsa_pfn: u64) -> cpu::Result<()> {
1261         let sev_control_reg = snp::get_sev_control_register(vmsa_pfn);
1262 
1263         self.fd
1264             .set_sev_control_register(sev_control_reg)
1265             .map_err(|e| cpu::HypervisorCpuError::SetSevControlRegister(e.into()))
1266     }
1267 }
1268 
1269 impl MshvVcpu {
1270     #[cfg(target_arch = "x86_64")]
1271     ///
1272     /// X86 specific call that returns the vcpu's current "xsave struct".
1273     ///
1274     fn get_xsave(&self) -> cpu::Result<Xsave> {
1275         self.fd
1276             .get_xsave()
1277             .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))
1278     }
1279 
1280     #[cfg(target_arch = "x86_64")]
1281     ///
1282     /// X86 specific call that sets the vcpu's current "xsave struct".
1283     ///
1284     fn set_xsave(&self, xsave: &Xsave) -> cpu::Result<()> {
1285         self.fd
1286             .set_xsave(xsave)
1287             .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into()))
1288     }
1289 
1290     #[cfg(target_arch = "x86_64")]
1291     ///
1292     /// X86 specific call that returns the vcpu's current "xcrs".
1293     ///
1294     fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> {
1295         self.fd
1296             .get_xcrs()
1297             .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into()))
1298     }
1299 
1300     #[cfg(target_arch = "x86_64")]
1301     ///
1302     /// X86 specific call that sets the vcpu's current "xcrs".
1303     ///
1304     fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> {
1305         self.fd
1306             .set_xcrs(xcrs)
1307             .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into()))
1308     }
1309 
1310     #[cfg(target_arch = "x86_64")]
1311     ///
1312     /// Returns currently pending exceptions, interrupts, and NMIs as well as related
1313     /// states of the vcpu.
1314     ///
1315     fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> {
1316         self.fd
1317             .get_vcpu_events()
1318             .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into()))
1319     }
1320 
1321     #[cfg(target_arch = "x86_64")]
1322     ///
1323     /// Sets pending exceptions, interrupts, and NMIs as well as related states
1324     /// of the vcpu.
1325     ///
1326     fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> {
1327         self.fd
1328             .set_vcpu_events(events)
1329             .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into()))
1330     }
1331 }
1332 
1333 struct MshvEmulatorContext<'a> {
1334     vcpu: &'a MshvVcpu,
1335     map: (u64, u64), // Initial GVA to GPA mapping provided by the hypervisor
1336 }
1337 
1338 impl<'a> MshvEmulatorContext<'a> {
1339     // Do the actual gva -> gpa translation
1340     #[allow(non_upper_case_globals)]
1341     fn translate(&self, gva: u64) -> Result<u64, PlatformError> {
1342         if self.map.0 == gva {
1343             return Ok(self.map.1);
1344         }
1345 
1346         // TODO: More fine-grained control for the flags
1347         let flags = HV_TRANSLATE_GVA_VALIDATE_READ | HV_TRANSLATE_GVA_VALIDATE_WRITE;
1348 
1349         let (gpa, result_code) = self
1350             .vcpu
1351             .translate_gva(gva, flags.into())
1352             .map_err(|e| PlatformError::TranslateVirtualAddress(anyhow!(e)))?;
1353 
1354         match result_code {
1355             hv_translate_gva_result_code_HV_TRANSLATE_GVA_SUCCESS => Ok(gpa),
1356             _ => Err(PlatformError::TranslateVirtualAddress(anyhow!(result_code))),
1357         }
1358     }
1359 }
1360 
1361 /// Platform emulation for Hyper-V
1362 impl<'a> PlatformEmulator for MshvEmulatorContext<'a> {
1363     type CpuState = EmulatorCpuState;
1364 
1365     fn read_memory(&self, gva: u64, data: &mut [u8]) -> Result<(), PlatformError> {
1366         let gpa = self.translate(gva)?;
1367         debug!(
1368             "mshv emulator: memory read {} bytes from [{:#x} -> {:#x}]",
1369             data.len(),
1370             gva,
1371             gpa
1372         );
1373 
1374         if let Some(vm_ops) = &self.vcpu.vm_ops {
1375             if vm_ops.guest_mem_read(gpa, data).is_err() {
1376                 vm_ops
1377                     .mmio_read(gpa, data)
1378                     .map_err(|e| PlatformError::MemoryReadFailure(e.into()))?;
1379             }
1380         }
1381 
1382         Ok(())
1383     }
1384 
1385     fn write_memory(&mut self, gva: u64, data: &[u8]) -> Result<(), PlatformError> {
1386         let gpa = self.translate(gva)?;
1387         debug!(
1388             "mshv emulator: memory write {} bytes at [{:#x} -> {:#x}]",
1389             data.len(),
1390             gva,
1391             gpa
1392         );
1393 
1394         if let Some(vm_ops) = &self.vcpu.vm_ops {
1395             if vm_ops.guest_mem_write(gpa, data).is_err() {
1396                 vm_ops
1397                     .mmio_write(gpa, data)
1398                     .map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?;
1399             }
1400         }
1401 
1402         Ok(())
1403     }
1404 
1405     fn cpu_state(&self, cpu_id: usize) -> Result<Self::CpuState, PlatformError> {
1406         if cpu_id != self.vcpu.vp_index as usize {
1407             return Err(PlatformError::GetCpuStateFailure(anyhow!(
1408                 "CPU id mismatch {:?} {:?}",
1409                 cpu_id,
1410                 self.vcpu.vp_index
1411             )));
1412         }
1413 
1414         let regs = self
1415             .vcpu
1416             .get_regs()
1417             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
1418         let sregs = self
1419             .vcpu
1420             .get_sregs()
1421             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
1422 
1423         debug!("mshv emulator: Getting new CPU state");
1424         debug!("mshv emulator: {:#x?}", regs);
1425 
1426         Ok(EmulatorCpuState { regs, sregs })
1427     }
1428 
1429     fn set_cpu_state(&self, cpu_id: usize, state: Self::CpuState) -> Result<(), PlatformError> {
1430         if cpu_id != self.vcpu.vp_index as usize {
1431             return Err(PlatformError::SetCpuStateFailure(anyhow!(
1432                 "CPU id mismatch {:?} {:?}",
1433                 cpu_id,
1434                 self.vcpu.vp_index
1435             )));
1436         }
1437 
1438         debug!("mshv emulator: Setting new CPU state");
1439         debug!("mshv emulator: {:#x?}", state.regs);
1440 
1441         self.vcpu
1442             .set_regs(&state.regs)
1443             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))?;
1444         self.vcpu
1445             .set_sregs(&state.sregs)
1446             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))
1447     }
1448 
1449     fn gva_to_gpa(&self, gva: u64) -> Result<u64, PlatformError> {
1450         self.translate(gva)
1451     }
1452 
1453     fn fetch(&self, _ip: u64, _instruction_bytes: &mut [u8]) -> Result<(), PlatformError> {
1454         Err(PlatformError::MemoryReadFailure(anyhow!("unimplemented")))
1455     }
1456 }
1457 
1458 /// Wrapper over Mshv VM ioctls.
1459 pub struct MshvVm {
1460     fd: Arc<VmFd>,
1461     msrs: Vec<MsrEntry>,
1462     dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>,
1463 }
1464 
1465 impl MshvVm {
1466     ///
1467     /// Creates an in-kernel device.
1468     ///
1469     /// See the documentation for `MSHV_CREATE_DEVICE`.
1470     fn create_device(&self, device: &mut CreateDevice) -> vm::Result<VfioDeviceFd> {
1471         let device_fd = self
1472             .fd
1473             .create_device(device)
1474             .map_err(|e| vm::HypervisorVmError::CreateDevice(e.into()))?;
1475         Ok(VfioDeviceFd::new_from_mshv(device_fd))
1476     }
1477 }
1478 
1479 ///
1480 /// Implementation of Vm trait for Mshv
1481 ///
1482 /// # Examples
1483 ///
1484 /// ```
1485 /// # extern crate hypervisor;
1486 /// # use hypervisor::mshv::MshvHypervisor;
1487 /// # use std::sync::Arc;
1488 /// let mshv = MshvHypervisor::new().unwrap();
1489 /// let hypervisor = Arc::new(mshv);
1490 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
1491 /// ```
1492 impl vm::Vm for MshvVm {
1493     #[cfg(target_arch = "x86_64")]
1494     ///
1495     /// Sets the address of the one-page region in the VM's address space.
1496     ///
1497     fn set_identity_map_address(&self, _address: u64) -> vm::Result<()> {
1498         Ok(())
1499     }
1500 
1501     #[cfg(target_arch = "x86_64")]
1502     ///
1503     /// Sets the address of the three-page region in the VM's address space.
1504     ///
1505     fn set_tss_address(&self, _offset: usize) -> vm::Result<()> {
1506         Ok(())
1507     }
1508 
1509     ///
1510     /// Creates an in-kernel interrupt controller.
1511     ///
1512     fn create_irq_chip(&self) -> vm::Result<()> {
1513         Ok(())
1514     }
1515 
1516     ///
1517     /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
1518     ///
1519     fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1520         debug!("register_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1521 
1522         self.fd
1523             .register_irqfd(fd, gsi)
1524             .map_err(|e| vm::HypervisorVmError::RegisterIrqFd(e.into()))?;
1525 
1526         Ok(())
1527     }
1528 
1529     ///
1530     /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
1531     ///
1532     fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1533         debug!("unregister_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1534 
1535         self.fd
1536             .unregister_irqfd(fd, gsi)
1537             .map_err(|e| vm::HypervisorVmError::UnregisterIrqFd(e.into()))?;
1538 
1539         Ok(())
1540     }
1541 
1542     ///
1543     /// Creates a VcpuFd object from a vcpu RawFd.
1544     ///
1545     fn create_vcpu(
1546         &self,
1547         id: u8,
1548         vm_ops: Option<Arc<dyn VmOps>>,
1549     ) -> vm::Result<Arc<dyn cpu::Vcpu>> {
1550         let vcpu_fd = self
1551             .fd
1552             .create_vcpu(id)
1553             .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?;
1554         let vcpu = MshvVcpu {
1555             fd: vcpu_fd,
1556             vp_index: id,
1557             cpuid: Vec::new(),
1558             msrs: self.msrs.clone(),
1559             vm_ops,
1560             #[cfg(feature = "sev_snp")]
1561             vm_fd: self.fd.clone(),
1562         };
1563         Ok(Arc::new(vcpu))
1564     }
1565 
1566     #[cfg(target_arch = "x86_64")]
1567     fn enable_split_irq(&self) -> vm::Result<()> {
1568         Ok(())
1569     }
1570 
1571     #[cfg(target_arch = "x86_64")]
1572     fn enable_sgx_attribute(&self, _file: File) -> vm::Result<()> {
1573         Ok(())
1574     }
1575 
1576     fn register_ioevent(
1577         &self,
1578         fd: &EventFd,
1579         addr: &IoEventAddress,
1580         datamatch: Option<DataMatch>,
1581     ) -> vm::Result<()> {
1582         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1583         debug!(
1584             "register_ioevent fd {} addr {:x?} datamatch {:?}",
1585             fd.as_raw_fd(),
1586             addr,
1587             datamatch
1588         );
1589         if let Some(dm) = datamatch {
1590             match dm {
1591                 vm::DataMatch::DataMatch32(mshv_dm32) => self
1592                     .fd
1593                     .register_ioevent(fd, addr, mshv_dm32)
1594                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1595                 vm::DataMatch::DataMatch64(mshv_dm64) => self
1596                     .fd
1597                     .register_ioevent(fd, addr, mshv_dm64)
1598                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1599             }
1600         } else {
1601             self.fd
1602                 .register_ioevent(fd, addr, NoDatamatch)
1603                 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into()))
1604         }
1605     }
1606 
1607     /// Unregister an event from a certain address it has been previously registered to.
1608     fn unregister_ioevent(&self, fd: &EventFd, addr: &IoEventAddress) -> vm::Result<()> {
1609         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1610         debug!("unregister_ioevent fd {} addr {:x?}", fd.as_raw_fd(), addr);
1611 
1612         self.fd
1613             .unregister_ioevent(fd, addr, NoDatamatch)
1614             .map_err(|e| vm::HypervisorVmError::UnregisterIoEvent(e.into()))
1615     }
1616 
1617     /// Creates a guest physical memory region.
1618     fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1619         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1620         // No matter read only or not we keep track the slots.
1621         // For readonly hypervisor can enable the dirty bits,
1622         // but a VM exit happens before setting the dirty bits
1623         self.dirty_log_slots.write().unwrap().insert(
1624             user_memory_region.guest_pfn,
1625             MshvDirtyLogSlot {
1626                 guest_pfn: user_memory_region.guest_pfn,
1627                 memory_size: user_memory_region.size,
1628             },
1629         );
1630 
1631         self.fd
1632             .map_user_memory(user_memory_region)
1633             .map_err(|e| vm::HypervisorVmError::CreateUserMemory(e.into()))?;
1634         Ok(())
1635     }
1636 
1637     /// Removes a guest physical memory region.
1638     fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1639         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1640         // Remove the corresponding entry from "self.dirty_log_slots" if needed
1641         self.dirty_log_slots
1642             .write()
1643             .unwrap()
1644             .remove(&user_memory_region.guest_pfn);
1645 
1646         self.fd
1647             .unmap_user_memory(user_memory_region)
1648             .map_err(|e| vm::HypervisorVmError::RemoveUserMemory(e.into()))?;
1649         Ok(())
1650     }
1651 
1652     fn make_user_memory_region(
1653         &self,
1654         _slot: u32,
1655         guest_phys_addr: u64,
1656         memory_size: u64,
1657         userspace_addr: u64,
1658         readonly: bool,
1659         _log_dirty_pages: bool,
1660     ) -> UserMemoryRegion {
1661         let mut flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE;
1662         if !readonly {
1663             flags |= HV_MAP_GPA_WRITABLE;
1664         }
1665 
1666         mshv_user_mem_region {
1667             flags,
1668             guest_pfn: guest_phys_addr >> PAGE_SHIFT,
1669             size: memory_size,
1670             userspace_addr,
1671         }
1672         .into()
1673     }
1674 
1675     fn create_passthrough_device(&self) -> vm::Result<VfioDeviceFd> {
1676         let mut vfio_dev = mshv_create_device {
1677             type_: mshv_device_type_MSHV_DEV_TYPE_VFIO,
1678             fd: 0,
1679             flags: 0,
1680         };
1681 
1682         self.create_device(&mut vfio_dev)
1683             .map_err(|e| vm::HypervisorVmError::CreatePassthroughDevice(e.into()))
1684     }
1685 
1686     ///
1687     /// Constructs a routing entry
1688     ///
1689     fn make_routing_entry(&self, gsi: u32, config: &InterruptSourceConfig) -> IrqRoutingEntry {
1690         match config {
1691             InterruptSourceConfig::MsiIrq(cfg) => mshv_msi_routing_entry {
1692                 gsi,
1693                 address_lo: cfg.low_addr,
1694                 address_hi: cfg.high_addr,
1695                 data: cfg.data,
1696             }
1697             .into(),
1698             _ => {
1699                 unreachable!()
1700             }
1701         }
1702     }
1703 
1704     fn set_gsi_routing(&self, entries: &[IrqRoutingEntry]) -> vm::Result<()> {
1705         let mut msi_routing =
1706             vec_with_array_field::<mshv_msi_routing, mshv_msi_routing_entry>(entries.len());
1707         msi_routing[0].nr = entries.len() as u32;
1708 
1709         let entries: Vec<mshv_msi_routing_entry> = entries
1710             .iter()
1711             .map(|entry| match entry {
1712                 IrqRoutingEntry::Mshv(e) => *e,
1713                 #[allow(unreachable_patterns)]
1714                 _ => panic!("IrqRoutingEntry type is wrong"),
1715             })
1716             .collect();
1717 
1718         // SAFETY: msi_routing initialized with entries.len() and now it is being turned into
1719         // entries_slice with entries.len() again. It is guaranteed to be large enough to hold
1720         // everything from entries.
1721         unsafe {
1722             let entries_slice: &mut [mshv_msi_routing_entry] =
1723                 msi_routing[0].entries.as_mut_slice(entries.len());
1724             entries_slice.copy_from_slice(&entries);
1725         }
1726 
1727         self.fd
1728             .set_msi_routing(&msi_routing[0])
1729             .map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into()))
1730     }
1731 
1732     ///
1733     /// Start logging dirty pages
1734     ///
1735     fn start_dirty_log(&self) -> vm::Result<()> {
1736         self.fd
1737             .enable_dirty_page_tracking()
1738             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))
1739     }
1740 
1741     ///
1742     /// Stop logging dirty pages
1743     ///
1744     fn stop_dirty_log(&self) -> vm::Result<()> {
1745         let dirty_log_slots = self.dirty_log_slots.read().unwrap();
1746         // Before disabling the dirty page tracking we need
1747         // to set the dirty bits in the Hypervisor
1748         // This is a requirement from Microsoft Hypervisor
1749         for (_, s) in dirty_log_slots.iter() {
1750             self.fd
1751                 .get_dirty_log(s.guest_pfn, s.memory_size as usize, DIRTY_BITMAP_SET_DIRTY)
1752                 .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1753         }
1754         self.fd
1755             .disable_dirty_page_tracking()
1756             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1757         Ok(())
1758     }
1759 
1760     ///
1761     /// Get dirty pages bitmap (one bit per page)
1762     ///
1763     fn get_dirty_log(&self, _slot: u32, base_gpa: u64, memory_size: u64) -> vm::Result<Vec<u64>> {
1764         self.fd
1765             .get_dirty_log(
1766                 base_gpa >> PAGE_SHIFT,
1767                 memory_size as usize,
1768                 DIRTY_BITMAP_CLEAR_DIRTY,
1769             )
1770             .map_err(|e| vm::HypervisorVmError::GetDirtyLog(e.into()))
1771     }
1772 
1773     /// Retrieve guest clock.
1774     #[cfg(target_arch = "x86_64")]
1775     fn get_clock(&self) -> vm::Result<ClockData> {
1776         Ok(ClockData::Mshv)
1777     }
1778 
1779     /// Set guest clock.
1780     #[cfg(target_arch = "x86_64")]
1781     fn set_clock(&self, _data: &ClockData) -> vm::Result<()> {
1782         Ok(())
1783     }
1784 
1785     /// Downcast to the underlying MshvVm type
1786     fn as_any(&self) -> &dyn Any {
1787         self
1788     }
1789 
1790     /// Initialize the SEV-SNP VM
1791     #[cfg(feature = "sev_snp")]
1792     fn sev_snp_init(&self) -> vm::Result<()> {
1793         self.fd
1794             .set_partition_property(
1795                 hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_STATE,
1796                 hv_partition_isolation_state_HV_PARTITION_ISOLATION_SECURE as u64,
1797             )
1798             .map_err(|e| vm::HypervisorVmError::InitializeSevSnp(e.into()))
1799     }
1800 
1801     ///
1802     /// Importing isolated pages, these pages will be used
1803     /// for the PSP(Platform Security Processor) measurement.
1804     #[cfg(feature = "sev_snp")]
1805     fn import_isolated_pages(
1806         &self,
1807         page_type: u32,
1808         page_size: u32,
1809         pages: &[u64],
1810     ) -> vm::Result<()> {
1811         if pages.is_empty() {
1812             return Ok(());
1813         }
1814 
1815         let mut isolated_pages =
1816             vec_with_array_field::<mshv_import_isolated_pages, u64>(pages.len());
1817         isolated_pages[0].num_pages = pages.len() as u64;
1818         isolated_pages[0].page_type = page_type;
1819         isolated_pages[0].page_size = page_size;
1820         // SAFETY: isolated_pages initialized with pages.len() and now it is being turned into
1821         // pages_slice with pages.len() again. It is guaranteed to be large enough to hold
1822         // everything from pages.
1823         unsafe {
1824             let pages_slice: &mut [u64] = isolated_pages[0].page_number.as_mut_slice(pages.len());
1825             pages_slice.copy_from_slice(pages);
1826         }
1827         self.fd
1828             .import_isolated_pages(&isolated_pages[0])
1829             .map_err(|e| vm::HypervisorVmError::ImportIsolatedPages(e.into()))
1830     }
1831 
1832     ///
1833     /// Complete isolated import, telling the hypervisor that
1834     /// importing the pages to guest memory is complete.
1835     ///
1836     #[cfg(feature = "sev_snp")]
1837     fn complete_isolated_import(
1838         &self,
1839         snp_id_block: IGVM_VHS_SNP_ID_BLOCK,
1840         host_data: [u8; 32],
1841         id_block_enabled: u8,
1842     ) -> vm::Result<()> {
1843         let mut auth_info = hv_snp_id_auth_info {
1844             id_key_algorithm: snp_id_block.id_key_algorithm,
1845             auth_key_algorithm: snp_id_block.author_key_algorithm,
1846             ..Default::default()
1847         };
1848         // Each of r/s component is 576 bits long
1849         auth_info.id_block_signature[..SIG_R_COMPONENT_SIZE_IN_BYTES]
1850             .copy_from_slice(snp_id_block.id_key_signature.r_comp.as_ref());
1851         auth_info.id_block_signature
1852             [SIG_R_COMPONENT_SIZE_IN_BYTES..SIG_R_AND_S_COMPONENT_SIZE_IN_BYTES]
1853             .copy_from_slice(snp_id_block.id_key_signature.s_comp.as_ref());
1854         auth_info.id_key[..ECDSA_CURVE_ID_SIZE_IN_BYTES]
1855             .copy_from_slice(snp_id_block.id_public_key.curve.to_le_bytes().as_ref());
1856         auth_info.id_key[ECDSA_SIG_X_COMPONENT_START..ECDSA_SIG_X_COMPONENT_END]
1857             .copy_from_slice(snp_id_block.id_public_key.qx.as_ref());
1858         auth_info.id_key[ECDSA_SIG_Y_COMPONENT_START..ECDSA_SIG_Y_COMPONENT_END]
1859             .copy_from_slice(snp_id_block.id_public_key.qy.as_ref());
1860 
1861         let data = mshv_complete_isolated_import {
1862             import_data: hv_partition_complete_isolated_import_data {
1863                 psp_parameters: hv_psp_launch_finish_data {
1864                     id_block: hv_snp_id_block {
1865                         launch_digest: snp_id_block.ld,
1866                         family_id: snp_id_block.family_id,
1867                         image_id: snp_id_block.image_id,
1868                         version: snp_id_block.version,
1869                         guest_svn: snp_id_block.guest_svn,
1870                         policy: get_default_snp_guest_policy(),
1871                     },
1872                     id_auth_info: auth_info,
1873                     host_data,
1874                     id_block_enabled,
1875                     author_key_enabled: 0,
1876                 },
1877             },
1878         };
1879         self.fd
1880             .complete_isolated_import(&data)
1881             .map_err(|e| vm::HypervisorVmError::CompleteIsolatedImport(e.into()))
1882     }
1883 }
1884