xref: /cloud-hypervisor/hypervisor/src/mshv/mod.rs (revision adb318f4cd0079246b3cb07e01c4e978330445d2)
1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
2 //
3 // Copyright © 2020, Microsoft Corporation
4 //
5 
6 use crate::arch::emulator::{PlatformEmulator, PlatformError};
7 
8 #[cfg(target_arch = "x86_64")]
9 use crate::arch::x86::emulator::{Emulator, EmulatorCpuState};
10 use crate::cpu;
11 use crate::cpu::Vcpu;
12 use crate::hypervisor;
13 use crate::vec_with_array_field;
14 use crate::vm::{self, InterruptSourceConfig, VmOps};
15 use crate::HypervisorType;
16 pub use mshv_bindings::*;
17 use mshv_ioctls::{set_registers_64, Mshv, NoDatamatch, VcpuFd, VmFd, VmType};
18 use std::any::Any;
19 use std::collections::HashMap;
20 use std::sync::{Arc, RwLock};
21 use vfio_ioctls::VfioDeviceFd;
22 use vm::DataMatch;
23 
24 #[cfg(feature = "sev_snp")]
25 mod snp_constants;
26 // x86_64 dependencies
27 #[cfg(target_arch = "x86_64")]
28 pub mod x86_64;
29 #[cfg(feature = "sev_snp")]
30 use snp_constants::*;
31 
32 use crate::{
33     ClockData, CpuState, IoEventAddress, IrqRoutingEntry, MpState, UserMemoryRegion,
34     USER_MEMORY_REGION_ADJUSTABLE, USER_MEMORY_REGION_EXECUTE, USER_MEMORY_REGION_READ,
35     USER_MEMORY_REGION_WRITE,
36 };
37 #[cfg(feature = "sev_snp")]
38 use igvm_defs::IGVM_VHS_SNP_ID_BLOCK;
39 use vmm_sys_util::eventfd::EventFd;
40 #[cfg(target_arch = "x86_64")]
41 pub use x86_64::VcpuMshvState;
42 #[cfg(target_arch = "x86_64")]
43 pub use x86_64::*;
44 
45 #[cfg(target_arch = "x86_64")]
46 use std::fs::File;
47 use std::os::unix::io::AsRawFd;
48 
49 #[cfg(target_arch = "x86_64")]
50 use crate::arch::x86::{CpuIdEntry, FpuState, MsrEntry};
51 
52 const DIRTY_BITMAP_CLEAR_DIRTY: u64 = 0x4;
53 const DIRTY_BITMAP_SET_DIRTY: u64 = 0x8;
54 
55 ///
56 /// Export generically-named wrappers of mshv-bindings for Unix-based platforms
57 ///
58 pub use {
59     mshv_bindings::mshv_create_device as CreateDevice,
60     mshv_bindings::mshv_device_attr as DeviceAttr, mshv_ioctls::DeviceFd,
61 };
62 
63 pub const PAGE_SHIFT: usize = 12;
64 
65 impl From<mshv_user_mem_region> for UserMemoryRegion {
66     fn from(region: mshv_user_mem_region) -> Self {
67         let mut flags: u32 = 0;
68         if region.flags & HV_MAP_GPA_READABLE != 0 {
69             flags |= USER_MEMORY_REGION_READ;
70         }
71         if region.flags & HV_MAP_GPA_WRITABLE != 0 {
72             flags |= USER_MEMORY_REGION_WRITE;
73         }
74         if region.flags & HV_MAP_GPA_EXECUTABLE != 0 {
75             flags |= USER_MEMORY_REGION_EXECUTE;
76         }
77         if region.flags & HV_MAP_GPA_ADJUSTABLE != 0 {
78             flags |= USER_MEMORY_REGION_ADJUSTABLE;
79         }
80 
81         UserMemoryRegion {
82             guest_phys_addr: (region.guest_pfn << PAGE_SHIFT as u64)
83                 + (region.userspace_addr & ((1 << PAGE_SHIFT) - 1)),
84             memory_size: region.size,
85             userspace_addr: region.userspace_addr,
86             flags,
87             ..Default::default()
88         }
89     }
90 }
91 
92 impl From<UserMemoryRegion> for mshv_user_mem_region {
93     fn from(region: UserMemoryRegion) -> Self {
94         let mut flags: u32 = 0;
95         if region.flags & USER_MEMORY_REGION_READ != 0 {
96             flags |= HV_MAP_GPA_READABLE;
97         }
98         if region.flags & USER_MEMORY_REGION_WRITE != 0 {
99             flags |= HV_MAP_GPA_WRITABLE;
100         }
101         if region.flags & USER_MEMORY_REGION_EXECUTE != 0 {
102             flags |= HV_MAP_GPA_EXECUTABLE;
103         }
104         if region.flags & USER_MEMORY_REGION_ADJUSTABLE != 0 {
105             flags |= HV_MAP_GPA_ADJUSTABLE;
106         }
107 
108         mshv_user_mem_region {
109             guest_pfn: region.guest_phys_addr >> PAGE_SHIFT,
110             size: region.memory_size,
111             userspace_addr: region.userspace_addr,
112             flags,
113         }
114     }
115 }
116 
117 impl From<mshv_ioctls::IoEventAddress> for IoEventAddress {
118     fn from(a: mshv_ioctls::IoEventAddress) -> Self {
119         match a {
120             mshv_ioctls::IoEventAddress::Pio(x) => Self::Pio(x),
121             mshv_ioctls::IoEventAddress::Mmio(x) => Self::Mmio(x),
122         }
123     }
124 }
125 
126 impl From<IoEventAddress> for mshv_ioctls::IoEventAddress {
127     fn from(a: IoEventAddress) -> Self {
128         match a {
129             IoEventAddress::Pio(x) => Self::Pio(x),
130             IoEventAddress::Mmio(x) => Self::Mmio(x),
131         }
132     }
133 }
134 
135 impl From<VcpuMshvState> for CpuState {
136     fn from(s: VcpuMshvState) -> Self {
137         CpuState::Mshv(s)
138     }
139 }
140 
141 impl From<CpuState> for VcpuMshvState {
142     fn from(s: CpuState) -> Self {
143         match s {
144             CpuState::Mshv(s) => s,
145             /* Needed in case other hypervisors are enabled */
146             #[allow(unreachable_patterns)]
147             _ => panic!("CpuState is not valid"),
148         }
149     }
150 }
151 
152 impl From<mshv_msi_routing_entry> for IrqRoutingEntry {
153     fn from(s: mshv_msi_routing_entry) -> Self {
154         IrqRoutingEntry::Mshv(s)
155     }
156 }
157 
158 impl From<IrqRoutingEntry> for mshv_msi_routing_entry {
159     fn from(e: IrqRoutingEntry) -> Self {
160         match e {
161             IrqRoutingEntry::Mshv(e) => e,
162             /* Needed in case other hypervisors are enabled */
163             #[allow(unreachable_patterns)]
164             _ => panic!("IrqRoutingEntry is not valid"),
165         }
166     }
167 }
168 
169 struct MshvDirtyLogSlot {
170     guest_pfn: u64,
171     memory_size: u64,
172 }
173 
174 /// Wrapper over mshv system ioctls.
175 pub struct MshvHypervisor {
176     mshv: Mshv,
177 }
178 
179 impl MshvHypervisor {
180     #[cfg(target_arch = "x86_64")]
181     ///
182     /// Retrieve the list of MSRs supported by MSHV.
183     ///
184     fn get_msr_list(&self) -> hypervisor::Result<MsrList> {
185         self.mshv
186             .get_msr_index_list()
187             .map_err(|e| hypervisor::HypervisorError::GetMsrList(e.into()))
188     }
189 }
190 
191 impl MshvHypervisor {
192     /// Create a hypervisor based on Mshv
193     #[allow(clippy::new_ret_no_self)]
194     pub fn new() -> hypervisor::Result<Arc<dyn hypervisor::Hypervisor>> {
195         let mshv_obj =
196             Mshv::new().map_err(|e| hypervisor::HypervisorError::HypervisorCreate(e.into()))?;
197         Ok(Arc::new(MshvHypervisor { mshv: mshv_obj }))
198     }
199     /// Check if the hypervisor is available
200     pub fn is_available() -> hypervisor::Result<bool> {
201         match std::fs::metadata("/dev/mshv") {
202             Ok(_) => Ok(true),
203             Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false),
204             Err(err) => Err(hypervisor::HypervisorError::HypervisorAvailableCheck(
205                 err.into(),
206             )),
207         }
208     }
209 }
210 
211 /// Implementation of Hypervisor trait for Mshv
212 ///
213 /// # Examples
214 ///
215 /// ```
216 /// # use hypervisor::mshv::MshvHypervisor;
217 /// # use std::sync::Arc;
218 /// let mshv = MshvHypervisor::new().unwrap();
219 /// let hypervisor = Arc::new(mshv);
220 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
221 /// ```
222 impl hypervisor::Hypervisor for MshvHypervisor {
223     ///
224     /// Returns the type of the hypervisor
225     ///
226     fn hypervisor_type(&self) -> HypervisorType {
227         HypervisorType::Mshv
228     }
229 
230     fn create_vm_with_type(&self, vm_type: u64) -> hypervisor::Result<Arc<dyn crate::Vm>> {
231         let mshv_vm_type: VmType = match VmType::try_from(vm_type) {
232             Ok(vm_type) => vm_type,
233             Err(_) => return Err(hypervisor::HypervisorError::UnsupportedVmType()),
234         };
235         let fd: VmFd;
236         loop {
237             match self.mshv.create_vm_with_type(mshv_vm_type) {
238                 Ok(res) => fd = res,
239                 Err(e) => {
240                     if e.errno() == libc::EINTR {
241                         // If the error returned is EINTR, which means the
242                         // ioctl has been interrupted, we have to retry as
243                         // this can't be considered as a regular error.
244                         continue;
245                     } else {
246                         return Err(hypervisor::HypervisorError::VmCreate(e.into()));
247                     }
248                 }
249             }
250             break;
251         }
252 
253         // Set additional partition property for SEV-SNP partition.
254         if mshv_vm_type == VmType::Snp {
255             let snp_policy = snp::get_default_snp_guest_policy();
256             let vmgexit_offloads = snp::get_default_vmgexit_offload_features();
257             // SAFETY: access union fields
258             unsafe {
259                 debug!(
260                     "Setting the partition isolation policy as: 0x{:x}",
261                     snp_policy.as_uint64
262                 );
263                 fd.set_partition_property(
264                     hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_POLICY,
265                     snp_policy.as_uint64,
266                 )
267                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
268                 debug!(
269                     "Setting the partition property to enable VMGEXIT offloads as : 0x{:x}",
270                     vmgexit_offloads.as_uint64
271                 );
272                 fd.set_partition_property(
273                     hv_partition_property_code_HV_PARTITION_PROPERTY_SEV_VMGEXIT_OFFLOADS,
274                     vmgexit_offloads.as_uint64,
275                 )
276                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
277             }
278         }
279 
280         // Default Microsoft Hypervisor behavior for unimplemented MSR is to
281         // send a fault to the guest if it tries to access it. It is possible
282         // to override this behavior with a more suitable option i.e., ignore
283         // writes from the guest and return zero in attempt to read unimplemented
284         // MSR.
285         fd.set_partition_property(
286             hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
287             hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64,
288         )
289         .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
290 
291         let msr_list = self.get_msr_list()?;
292         let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize;
293         let mut msrs: Vec<MsrEntry> = vec![
294             MsrEntry {
295                 ..Default::default()
296             };
297             num_msrs
298         ];
299         let indices = msr_list.as_slice();
300         for (pos, index) in indices.iter().enumerate() {
301             msrs[pos].index = *index;
302         }
303         let vm_fd = Arc::new(fd);
304 
305         Ok(Arc::new(MshvVm {
306             fd: vm_fd,
307             msrs,
308             dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
309             #[cfg(feature = "sev_snp")]
310             sev_snp_enabled: mshv_vm_type == VmType::Snp,
311         }))
312     }
313 
314     /// Create a mshv vm object and return the object as Vm trait object
315     ///
316     /// # Examples
317     ///
318     /// ```
319     /// # extern crate hypervisor;
320     /// # use hypervisor::mshv::MshvHypervisor;
321     /// use hypervisor::mshv::MshvVm;
322     /// let hypervisor = MshvHypervisor::new().unwrap();
323     /// let vm = hypervisor.create_vm().unwrap();
324     /// ```
325     fn create_vm(&self) -> hypervisor::Result<Arc<dyn vm::Vm>> {
326         let vm_type = 0;
327         self.create_vm_with_type(vm_type)
328     }
329     ///
330     /// Get the supported CpuID
331     ///
332     fn get_supported_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> {
333         Ok(Vec::new())
334     }
335 
336     /// Get maximum number of vCPUs
337     fn get_max_vcpus(&self) -> u32 {
338         // TODO: Using HV_MAXIMUM_PROCESSORS would be better
339         // but the ioctl API is limited to u8
340         256
341     }
342 }
343 
344 /// Vcpu struct for Microsoft Hypervisor
345 pub struct MshvVcpu {
346     fd: VcpuFd,
347     vp_index: u8,
348     cpuid: Vec<CpuIdEntry>,
349     msrs: Vec<MsrEntry>,
350     vm_ops: Option<Arc<dyn vm::VmOps>>,
351     #[cfg(feature = "sev_snp")]
352     vm_fd: Arc<VmFd>,
353 }
354 
355 /// Implementation of Vcpu trait for Microsoft Hypervisor
356 ///
357 /// # Examples
358 ///
359 /// ```
360 /// # use hypervisor::mshv::MshvHypervisor;
361 /// # use std::sync::Arc;
362 /// let mshv = MshvHypervisor::new().unwrap();
363 /// let hypervisor = Arc::new(mshv);
364 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
365 /// let vcpu = vm.create_vcpu(0, None).unwrap();
366 /// ```
367 impl cpu::Vcpu for MshvVcpu {
368     #[cfg(target_arch = "x86_64")]
369     ///
370     /// Returns the vCPU general purpose registers.
371     ///
372     fn get_regs(&self) -> cpu::Result<crate::arch::x86::StandardRegisters> {
373         Ok(self
374             .fd
375             .get_regs()
376             .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))?
377             .into())
378     }
379 
380     #[cfg(target_arch = "x86_64")]
381     ///
382     /// Sets the vCPU general purpose registers.
383     ///
384     fn set_regs(&self, regs: &crate::arch::x86::StandardRegisters) -> cpu::Result<()> {
385         let regs = (*regs).into();
386         self.fd
387             .set_regs(&regs)
388             .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into()))
389     }
390 
391     #[cfg(target_arch = "x86_64")]
392     ///
393     /// Returns the vCPU special registers.
394     ///
395     fn get_sregs(&self) -> cpu::Result<crate::arch::x86::SpecialRegisters> {
396         Ok(self
397             .fd
398             .get_sregs()
399             .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))?
400             .into())
401     }
402 
403     #[cfg(target_arch = "x86_64")]
404     ///
405     /// Sets the vCPU special registers.
406     ///
407     fn set_sregs(&self, sregs: &crate::arch::x86::SpecialRegisters) -> cpu::Result<()> {
408         let sregs = (*sregs).into();
409         self.fd
410             .set_sregs(&sregs)
411             .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into()))
412     }
413 
414     #[cfg(target_arch = "x86_64")]
415     ///
416     /// Returns the floating point state (FPU) from the vCPU.
417     ///
418     fn get_fpu(&self) -> cpu::Result<FpuState> {
419         Ok(self
420             .fd
421             .get_fpu()
422             .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))?
423             .into())
424     }
425 
426     #[cfg(target_arch = "x86_64")]
427     ///
428     /// Set the floating point state (FPU) of a vCPU.
429     ///
430     fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> {
431         let fpu: mshv_bindings::FloatingPointUnit = (*fpu).clone().into();
432         self.fd
433             .set_fpu(&fpu)
434             .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into()))
435     }
436 
437     #[cfg(target_arch = "x86_64")]
438     ///
439     /// Returns the model-specific registers (MSR) for this vCPU.
440     ///
441     fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> {
442         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
443         let mut mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
444         let succ = self
445             .fd
446             .get_msrs(&mut mshv_msrs)
447             .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
448 
449         msrs[..succ].copy_from_slice(
450             &mshv_msrs.as_slice()[..succ]
451                 .iter()
452                 .map(|e| (*e).into())
453                 .collect::<Vec<MsrEntry>>(),
454         );
455 
456         Ok(succ)
457     }
458 
459     #[cfg(target_arch = "x86_64")]
460     ///
461     /// Setup the model-specific registers (MSR) for this vCPU.
462     /// Returns the number of MSR entries actually written.
463     ///
464     fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> {
465         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
466         let mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
467         self.fd
468             .set_msrs(&mshv_msrs)
469             .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
470     }
471 
472     #[cfg(target_arch = "x86_64")]
473     ///
474     /// X86 specific call to enable HyperV SynIC
475     ///
476     fn enable_hyperv_synic(&self) -> cpu::Result<()> {
477         /* We always have SynIC enabled on MSHV */
478         Ok(())
479     }
480 
481     #[allow(non_upper_case_globals)]
482     fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> {
483         let hv_message: hv_message = hv_message::default();
484         match self.fd.run(hv_message) {
485             Ok(x) => match x.header.message_type {
486                 hv_message_type_HVMSG_X64_HALT => {
487                     debug!("HALT");
488                     Ok(cpu::VmExit::Reset)
489                 }
490                 hv_message_type_HVMSG_UNRECOVERABLE_EXCEPTION => {
491                     warn!("TRIPLE FAULT");
492                     Ok(cpu::VmExit::Shutdown)
493                 }
494                 hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT => {
495                     let info = x.to_ioport_info().unwrap();
496                     let access_info = info.access_info;
497                     // SAFETY: access_info is valid, otherwise we won't be here
498                     let len = unsafe { access_info.__bindgen_anon_1.access_size() } as usize;
499                     let is_write = info.header.intercept_access_type == 1;
500                     let port = info.port_number;
501                     let mut data: [u8; 4] = [0; 4];
502                     let mut ret_rax = info.rax;
503 
504                     /*
505                      * XXX: Ignore QEMU fw_cfg (0x5xx) and debug console (0x402) ports.
506                      *
507                      * Cloud Hypervisor doesn't support fw_cfg at the moment. It does support 0x402
508                      * under the "fwdebug" feature flag. But that feature is not enabled by default
509                      * and is considered legacy.
510                      *
511                      * OVMF unconditionally pokes these IO ports with string IO.
512                      *
513                      * Instead of trying to implement string IO support now which does not do much
514                      * now, skip those ports explicitly to avoid panicking.
515                      *
516                      * Proper string IO support can be added once we gain the ability to translate
517                      * guest virtual addresses to guest physical addresses on MSHV.
518                      */
519                     match port {
520                         0x402 | 0x510 | 0x511 | 0x514 => {
521                             let insn_len = info.header.instruction_length() as u64;
522 
523                             /* Advance RIP and update RAX */
524                             let arr_reg_name_value = [
525                                 (
526                                     hv_register_name_HV_X64_REGISTER_RIP,
527                                     info.header.rip + insn_len,
528                                 ),
529                                 (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
530                             ];
531                             set_registers_64!(self.fd, arr_reg_name_value)
532                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
533                             return Ok(cpu::VmExit::Ignore);
534                         }
535                         _ => {}
536                     }
537 
538                     assert!(
539                         // SAFETY: access_info is valid, otherwise we won't be here
540                         (unsafe { access_info.__bindgen_anon_1.string_op() } != 1),
541                         "String IN/OUT not supported"
542                     );
543                     assert!(
544                         // SAFETY: access_info is valid, otherwise we won't be here
545                         (unsafe { access_info.__bindgen_anon_1.rep_prefix() } != 1),
546                         "Rep IN/OUT not supported"
547                     );
548 
549                     if is_write {
550                         let data = (info.rax as u32).to_le_bytes();
551                         if let Some(vm_ops) = &self.vm_ops {
552                             vm_ops
553                                 .pio_write(port.into(), &data[0..len])
554                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
555                         }
556                     } else {
557                         if let Some(vm_ops) = &self.vm_ops {
558                             vm_ops
559                                 .pio_read(port.into(), &mut data[0..len])
560                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
561                         }
562 
563                         let v = u32::from_le_bytes(data);
564                         /* Preserve high bits in EAX but clear out high bits in RAX */
565                         let mask = 0xffffffff >> (32 - len * 8);
566                         let eax = (info.rax as u32 & !mask) | (v & mask);
567                         ret_rax = eax as u64;
568                     }
569 
570                     let insn_len = info.header.instruction_length() as u64;
571 
572                     /* Advance RIP and update RAX */
573                     let arr_reg_name_value = [
574                         (
575                             hv_register_name_HV_X64_REGISTER_RIP,
576                             info.header.rip + insn_len,
577                         ),
578                         (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
579                     ];
580                     set_registers_64!(self.fd, arr_reg_name_value)
581                         .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
582                     Ok(cpu::VmExit::Ignore)
583                 }
584                 hv_message_type_HVMSG_UNMAPPED_GPA => {
585                     let info = x.to_memory_info().unwrap();
586                     let insn_len = info.instruction_byte_count as usize;
587                     assert!(insn_len > 0 && insn_len <= 16);
588 
589                     let mut context = MshvEmulatorContext {
590                         vcpu: self,
591                         map: (info.guest_virtual_address, info.guest_physical_address),
592                     };
593 
594                     // Create a new emulator.
595                     let mut emul = Emulator::new(&mut context);
596 
597                     // Emulate the trapped instruction, and only the first one.
598                     let new_state = emul
599                         .emulate_first_insn(self.vp_index as usize, &info.instruction_bytes)
600                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
601 
602                     // Set CPU state back.
603                     context
604                         .set_cpu_state(self.vp_index as usize, new_state)
605                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
606 
607                     Ok(cpu::VmExit::Ignore)
608                 }
609                 hv_message_type_HVMSG_UNACCEPTED_GPA => {
610                     let info = x.to_memory_info().unwrap();
611                     let gva = info.guest_virtual_address;
612                     let gpa = info.guest_physical_address;
613 
614                     Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
615                         "Unhandled VCPU exit: Unaccepted GPA({:x}) found at GVA({:x})",
616                         gpa,
617                         gva,
618                     )))
619                 }
620                 hv_message_type_HVMSG_X64_CPUID_INTERCEPT => {
621                     let info = x.to_cpuid_info().unwrap();
622                     debug!("cpuid eax: {:x}", { info.rax });
623                     Ok(cpu::VmExit::Ignore)
624                 }
625                 hv_message_type_HVMSG_X64_MSR_INTERCEPT => {
626                     let info = x.to_msr_info().unwrap();
627                     if info.header.intercept_access_type == 0 {
628                         debug!("msr read: {:x}", { info.msr_number });
629                     } else {
630                         debug!("msr write: {:x}", { info.msr_number });
631                     }
632                     Ok(cpu::VmExit::Ignore)
633                 }
634                 hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT => {
635                     //TODO: Handler for VMCALL here.
636                     let info = x.to_exception_info().unwrap();
637                     debug!("Exception Info {:?}", { info.exception_vector });
638                     Ok(cpu::VmExit::Ignore)
639                 }
640                 hv_message_type_HVMSG_X64_APIC_EOI => {
641                     let info = x.to_apic_eoi_info().unwrap();
642                     // The kernel should dispatch the EOI to the correct thread.
643                     // Check the VP index is the same as the one we have.
644                     assert!(info.vp_index == self.vp_index as u32);
645                     // The interrupt vector in info is u32, but x86 only supports 256 vectors.
646                     // There is no good way to recover from this if the hypervisor messes around.
647                     // Just unwrap.
648                     Ok(cpu::VmExit::IoapicEoi(
649                         info.interrupt_vector.try_into().unwrap(),
650                     ))
651                 }
652                 #[cfg(feature = "sev_snp")]
653                 hv_message_type_HVMSG_X64_SEV_VMGEXIT_INTERCEPT => {
654                     let info = x.to_vmg_intercept_info().unwrap();
655                     let ghcb_data = info.ghcb_msr >> GHCB_INFO_BIT_WIDTH;
656                     let ghcb_msr = svm_ghcb_msr {
657                         as_uint64: info.ghcb_msr,
658                     };
659                     // SAFETY: Accessing a union element from bindgen generated bindings.
660                     let ghcb_op = unsafe { ghcb_msr.__bindgen_anon_2.ghcb_info() as u32 };
661                     // Sanity check on the header fields before handling other operations.
662                     assert!(info.header.intercept_access_type == HV_INTERCEPT_ACCESS_EXECUTE as u8);
663 
664                     match ghcb_op {
665                         GHCB_INFO_HYP_FEATURE_REQUEST => {
666                             // Pre-condition: GHCB data must be zero
667                             assert!(ghcb_data == 0);
668                             let mut ghcb_response = GHCB_INFO_HYP_FEATURE_RESPONSE as u64;
669                             // Indicate support for basic SEV-SNP features
670                             ghcb_response |=
671                                 (GHCB_HYP_FEATURE_SEV_SNP << GHCB_INFO_BIT_WIDTH) as u64;
672                             // Indicate support for SEV-SNP AP creation
673                             ghcb_response |= (GHCB_HYP_FEATURE_SEV_SNP_AP_CREATION
674                                 << GHCB_INFO_BIT_WIDTH)
675                                 as u64;
676                             debug!(
677                                 "GHCB_INFO_HYP_FEATURE_REQUEST: Supported features: {:0x}",
678                                 ghcb_response
679                             );
680                             let arr_reg_name_value =
681                                 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)];
682                             set_registers_64!(self.fd, arr_reg_name_value)
683                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
684                         }
685                         GHCB_INFO_REGISTER_REQUEST => {
686                             let mut ghcb_gpa = hv_x64_register_sev_ghcb::default();
687                             // SAFETY: Accessing a union element from bindgen generated bindings.
688                             unsafe {
689                                 ghcb_gpa.__bindgen_anon_1.set_enabled(1);
690                                 ghcb_gpa
691                                     .__bindgen_anon_1
692                                     .set_page_number(ghcb_msr.__bindgen_anon_2.gpa_page_number());
693                             }
694                             // SAFETY: Accessing a union element from bindgen generated bindings.
695                             let reg_name_value = unsafe {
696                                 [(
697                                     hv_register_name_HV_X64_REGISTER_SEV_GHCB_GPA,
698                                     ghcb_gpa.as_uint64,
699                                 )]
700                             };
701 
702                             set_registers_64!(self.fd, reg_name_value)
703                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
704 
705                             let mut resp_ghcb_msr = svm_ghcb_msr::default();
706                             // SAFETY: Accessing a union element from bindgen generated bindings.
707                             unsafe {
708                                 resp_ghcb_msr
709                                     .__bindgen_anon_2
710                                     .set_ghcb_info(GHCB_INFO_REGISTER_RESPONSE as u64);
711                                 resp_ghcb_msr.__bindgen_anon_2.set_gpa_page_number(
712                                     ghcb_msr.__bindgen_anon_2.gpa_page_number(),
713                                 );
714                             }
715                             // SAFETY: Accessing a union element from bindgen generated bindings.
716                             let reg_name_value = unsafe {
717                                 [(
718                                     hv_register_name_HV_X64_REGISTER_GHCB,
719                                     resp_ghcb_msr.as_uint64,
720                                 )]
721                             };
722 
723                             set_registers_64!(self.fd, reg_name_value)
724                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
725                         }
726                         GHCB_INFO_SEV_INFO_REQUEST => {
727                             let sev_cpuid_function = 0x8000_001F;
728                             let cpu_leaf = self
729                                 .fd
730                                 .get_cpuid_values(sev_cpuid_function, 0, 0, 0)
731                                 .unwrap();
732                             let ebx = cpu_leaf[1];
733                             // First 6-byte of EBX represents page table encryption bit number
734                             let pbit_encryption = (ebx & 0x3f) as u8;
735                             let mut ghcb_response = GHCB_INFO_SEV_INFO_RESPONSE as u64;
736 
737                             // GHCBData[63:48] specifies the maximum GHCB protocol version supported
738                             ghcb_response |= (GHCB_PROTOCOL_VERSION_MAX as u64) << 48;
739                             // GHCBData[47:32] specifies the minimum GHCB protocol version supported
740                             ghcb_response |= (GHCB_PROTOCOL_VERSION_MIN as u64) << 32;
741                             // GHCBData[31:24] specifies the SEV page table encryption bit number.
742                             ghcb_response |= (pbit_encryption as u64) << 24;
743 
744                             let arr_reg_name_value =
745                                 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)];
746                             set_registers_64!(self.fd, arr_reg_name_value)
747                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
748                         }
749                         GHCB_INFO_NORMAL => {
750                             let exit_code =
751                                 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_code as u32;
752                             // SAFETY: Accessing a union element from bindgen generated bindings.
753                             let pfn = unsafe { ghcb_msr.__bindgen_anon_2.gpa_page_number() };
754                             let ghcb_gpa = pfn << GHCB_INFO_BIT_WIDTH;
755                             match exit_code {
756                                 SVM_EXITCODE_HV_DOORBELL_PAGE => {
757                                     let exit_info1 =
758                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32;
759                                     match exit_info1 {
760                                         SVM_NAE_HV_DOORBELL_PAGE_GET_PREFERRED => {
761                                             // Hypervisor does not have any preference for doorbell GPA.
762                                             let preferred_doorbell_gpa: u64 = 0xFFFFFFFFFFFFFFFF;
763                                             let mut swei2_rw_gpa_arg =
764                                                 mshv_bindings::mshv_read_write_gpa {
765                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
766                                                     byte_count: std::mem::size_of::<u64>() as u32,
767                                                     ..Default::default()
768                                                 };
769                                             swei2_rw_gpa_arg.data.copy_from_slice(
770                                                 &preferred_doorbell_gpa.to_le_bytes(),
771                                             );
772                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
773                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
774                                             )?;
775                                         }
776                                         SVM_NAE_HV_DOORBELL_PAGE_SET => {
777                                             let exit_info2 = info
778                                                 .__bindgen_anon_2
779                                                 .__bindgen_anon_1
780                                                 .sw_exit_info2;
781                                             let mut ghcb_doorbell_gpa =
782                                                 hv_x64_register_sev_hv_doorbell::default();
783                                             // SAFETY: Accessing a union element from bindgen generated bindings.
784                                             unsafe {
785                                                 ghcb_doorbell_gpa.__bindgen_anon_1.set_enabled(1);
786                                                 ghcb_doorbell_gpa
787                                                     .__bindgen_anon_1
788                                                     .set_page_number(exit_info2 >> PAGE_SHIFT);
789                                             }
790                                             // SAFETY: Accessing a union element from bindgen generated bindings.
791                                             let reg_names = unsafe {
792                                                 [(
793                                                     hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA,
794                                                     ghcb_doorbell_gpa.as_uint64,
795                                                 )]
796                                             };
797                                             set_registers_64!(self.fd, reg_names).map_err(|e| {
798                                                 cpu::HypervisorCpuError::SetRegister(e.into())
799                                             })?;
800 
801                                             let mut swei2_rw_gpa_arg =
802                                                 mshv_bindings::mshv_read_write_gpa {
803                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
804                                                     byte_count: std::mem::size_of::<u64>() as u32,
805                                                     ..Default::default()
806                                                 };
807                                             swei2_rw_gpa_arg.data[0..8]
808                                                 .copy_from_slice(&exit_info2.to_le_bytes());
809                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
810                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
811                                             )?;
812 
813                                             // Clear the SW_EXIT_INFO1 register to indicate no error
814                                             let mut swei1_rw_gpa_arg =
815                                                 mshv_bindings::mshv_read_write_gpa {
816                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO1_OFFSET,
817                                                     byte_count: std::mem::size_of::<u64>() as u32,
818                                                     ..Default::default()
819                                                 };
820                                             self.fd.gpa_write(&mut swei1_rw_gpa_arg).map_err(
821                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
822                                             )?;
823                                         }
824                                         SVM_NAE_HV_DOORBELL_PAGE_QUERY => {
825                                             let mut reg_assocs = [ hv_register_assoc {
826                                                 name: hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA,
827                                                 ..Default::default()
828                                             } ];
829                                             self.fd.get_reg(&mut reg_assocs).unwrap();
830                                             // SAFETY: Accessing a union element from bindgen generated bindings.
831                                             let doorbell_gpa = unsafe { reg_assocs[0].value.reg64 };
832                                             let mut swei2_rw_gpa_arg =
833                                                 mshv_bindings::mshv_read_write_gpa {
834                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
835                                                     byte_count: std::mem::size_of::<u64>() as u32,
836                                                     ..Default::default()
837                                                 };
838                                             swei2_rw_gpa_arg
839                                                 .data
840                                                 .copy_from_slice(&doorbell_gpa.to_le_bytes());
841                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
842                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
843                                             )?;
844                                         }
845                                         SVM_NAE_HV_DOORBELL_PAGE_CLEAR => {
846                                             let mut swei2_rw_gpa_arg =
847                                                 mshv_bindings::mshv_read_write_gpa {
848                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
849                                                     byte_count: std::mem::size_of::<u64>() as u32,
850                                                     ..Default::default()
851                                                 };
852                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
853                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
854                                             )?;
855                                         }
856                                         _ => {
857                                             panic!(
858                                                 "SVM_EXITCODE_HV_DOORBELL_PAGE: Unhandled exit code: {:0x}",
859                                                 exit_info1
860                                             );
861                                         }
862                                     }
863                                 }
864                                 SVM_EXITCODE_SNP_EXTENDED_GUEST_REQUEST => {
865                                     warn!("Fetching extended guest request is not supported");
866                                     // Extended guest request is not supported by the Hypervisor
867                                     // Returning the error to the guest
868                                     // 0x6 means `The NAE event was not valid`
869                                     // Reference: GHCB Spec, page 42
870                                     let value: u64 = 0x6;
871                                     let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
872                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
873                                         byte_count: std::mem::size_of::<u64>() as u32,
874                                         ..Default::default()
875                                     };
876                                     swei2_rw_gpa_arg.data.copy_from_slice(&value.to_le_bytes());
877                                     self.fd
878                                         .gpa_write(&mut swei2_rw_gpa_arg)
879                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
880                                 }
881                                 SVM_EXITCODE_IOIO_PROT => {
882                                     let exit_info1 =
883                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32;
884                                     let port_info = hv_sev_vmgexit_port_info {
885                                         as_uint32: exit_info1,
886                                     };
887 
888                                     let port =
889                                         // SAFETY: Accessing a union element from bindgen generated bindings.
890                                         unsafe { port_info.__bindgen_anon_1.intercepted_port() };
891                                     let mut len = 4;
892                                     // SAFETY: Accessing a union element from bindgen generated bindings.
893                                     unsafe {
894                                         if port_info.__bindgen_anon_1.operand_size_16bit() == 1 {
895                                             len = 2;
896                                         } else if port_info.__bindgen_anon_1.operand_size_8bit()
897                                             == 1
898                                         {
899                                             len = 1;
900                                         }
901                                     }
902                                     let is_write =
903                                         // SAFETY: Accessing a union element from bindgen generated bindings.
904                                         unsafe { port_info.__bindgen_anon_1.access_type() == 0 };
905                                     let mut rax_rw_gpa_arg: mshv_read_write_gpa =
906                                         mshv_bindings::mshv_read_write_gpa {
907                                             base_gpa: ghcb_gpa + GHCB_RAX_OFFSET,
908                                             byte_count: std::mem::size_of::<u64>() as u32,
909                                             ..Default::default()
910                                         };
911                                     self.fd
912                                         .gpa_read(&mut rax_rw_gpa_arg)
913                                         .map_err(|e| cpu::HypervisorCpuError::GpaRead(e.into()))?;
914 
915                                     if is_write {
916                                         if let Some(vm_ops) = &self.vm_ops {
917                                             vm_ops
918                                                 .pio_write(
919                                                     port.into(),
920                                                     &rax_rw_gpa_arg.data[0..len],
921                                                 )
922                                                 .map_err(|e| {
923                                                     cpu::HypervisorCpuError::RunVcpu(e.into())
924                                                 })?;
925                                         }
926                                     } else {
927                                         if let Some(vm_ops) = &self.vm_ops {
928                                             vm_ops
929                                                 .pio_read(
930                                                     port.into(),
931                                                     &mut rax_rw_gpa_arg.data[0..len],
932                                                 )
933                                                 .map_err(|e| {
934                                                     cpu::HypervisorCpuError::RunVcpu(e.into())
935                                                 })?;
936                                         }
937 
938                                         self.fd.gpa_write(&mut rax_rw_gpa_arg).map_err(|e| {
939                                             cpu::HypervisorCpuError::GpaWrite(e.into())
940                                         })?;
941                                     }
942 
943                                     // Clear the SW_EXIT_INFO1 register to indicate no error
944                                     let mut swei1_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
945                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO1_OFFSET,
946                                         byte_count: std::mem::size_of::<u64>() as u32,
947                                         ..Default::default()
948                                     };
949                                     self.fd
950                                         .gpa_write(&mut swei1_rw_gpa_arg)
951                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
952                                 }
953                                 SVM_EXITCODE_MMIO_READ => {
954                                     let src_gpa =
955                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
956                                     let dst_gpa = info.__bindgen_anon_2.__bindgen_anon_1.sw_scratch;
957                                     let data_len =
958                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2
959                                             as usize;
960                                     // Sanity check to make sure data len is within supported range.
961                                     assert!(data_len <= 0x8);
962 
963                                     let mut data: Vec<u8> = vec![0; data_len];
964                                     if let Some(vm_ops) = &self.vm_ops {
965                                         vm_ops.mmio_read(src_gpa, &mut data[0..data_len]).map_err(
966                                             |e| cpu::HypervisorCpuError::RunVcpu(e.into()),
967                                         )?;
968                                     }
969                                     let mut arg: mshv_read_write_gpa =
970                                         mshv_bindings::mshv_read_write_gpa {
971                                             base_gpa: dst_gpa,
972                                             byte_count: data_len as u32,
973                                             ..Default::default()
974                                         };
975                                     arg.data[0..data_len].copy_from_slice(&data);
976 
977                                     self.fd
978                                         .gpa_write(&mut arg)
979                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
980                                 }
981                                 SVM_EXITCODE_MMIO_WRITE => {
982                                     let dst_gpa =
983                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
984                                     let src_gpa = info.__bindgen_anon_2.__bindgen_anon_1.sw_scratch;
985                                     let data_len =
986                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2
987                                             as usize;
988                                     // Sanity check to make sure data len is within supported range.
989                                     assert!(data_len <= 0x8);
990                                     let mut arg: mshv_read_write_gpa =
991                                         mshv_bindings::mshv_read_write_gpa {
992                                             base_gpa: src_gpa,
993                                             byte_count: data_len as u32,
994                                             ..Default::default()
995                                         };
996 
997                                     self.fd
998                                         .gpa_read(&mut arg)
999                                         .map_err(|e| cpu::HypervisorCpuError::GpaRead(e.into()))?;
1000 
1001                                     if let Some(vm_ops) = &self.vm_ops {
1002                                         vm_ops
1003                                             .mmio_write(dst_gpa, &arg.data[0..data_len])
1004                                             .map_err(|e| {
1005                                                 cpu::HypervisorCpuError::RunVcpu(e.into())
1006                                             })?;
1007                                     }
1008                                 }
1009                                 SVM_EXITCODE_SNP_GUEST_REQUEST => {
1010                                     let req_gpa =
1011                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
1012                                     let rsp_gpa =
1013                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2;
1014 
1015                                     let mshv_psp_req =
1016                                         mshv_issue_psp_guest_request { req_gpa, rsp_gpa };
1017                                     self.vm_fd
1018                                         .psp_issue_guest_request(&mshv_psp_req)
1019                                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
1020 
1021                                     debug!(
1022                                         "SNP guest request: req_gpa {:0x} rsp_gpa {:0x}",
1023                                         req_gpa, rsp_gpa
1024                                     );
1025 
1026                                     let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
1027                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
1028                                         byte_count: std::mem::size_of::<u64>() as u32,
1029                                         ..Default::default()
1030                                     };
1031                                     self.fd
1032                                         .gpa_write(&mut swei2_rw_gpa_arg)
1033                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
1034                                 }
1035                                 SVM_EXITCODE_SNP_AP_CREATION => {
1036                                     let vmsa_gpa =
1037                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2;
1038                                     let apic_id =
1039                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 >> 32;
1040                                     debug!(
1041                                         "SNP AP CREATE REQUEST with VMSA GPA {:0x}, and APIC ID {:?}",
1042                                         vmsa_gpa, apic_id
1043                                     );
1044 
1045                                     let mshv_ap_create_req = mshv_sev_snp_ap_create {
1046                                         vp_id: apic_id,
1047                                         vmsa_gpa,
1048                                     };
1049                                     self.vm_fd
1050                                         .sev_snp_ap_create(&mshv_ap_create_req)
1051                                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
1052 
1053                                     let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
1054                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
1055                                         byte_count: std::mem::size_of::<u64>() as u32,
1056                                         ..Default::default()
1057                                     };
1058                                     self.fd
1059                                         .gpa_write(&mut swei2_rw_gpa_arg)
1060                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
1061                                 }
1062                                 _ => panic!(
1063                                     "GHCB_INFO_NORMAL: Unhandled exit code: {:0x}",
1064                                     exit_code
1065                                 ),
1066                             }
1067                         }
1068                         _ => panic!("Unsupported VMGEXIT operation: {:0x}", ghcb_op),
1069                     }
1070 
1071                     Ok(cpu::VmExit::Ignore)
1072                 }
1073                 exit => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
1074                     "Unhandled VCPU exit {:?}",
1075                     exit
1076                 ))),
1077             },
1078 
1079             Err(e) => match e.errno() {
1080                 libc::EAGAIN | libc::EINTR => Ok(cpu::VmExit::Ignore),
1081                 _ => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
1082                     "VCPU error {:?}",
1083                     e
1084                 ))),
1085             },
1086         }
1087     }
1088 
1089     #[cfg(target_arch = "x86_64")]
1090     ///
1091     /// X86 specific call to setup the CPUID registers.
1092     ///
1093     fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> cpu::Result<()> {
1094         let cpuid: Vec<mshv_bindings::hv_cpuid_entry> = cpuid.iter().map(|e| (*e).into()).collect();
1095         let mshv_cpuid = <CpuId>::from_entries(&cpuid)
1096             .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?;
1097 
1098         self.fd
1099             .register_intercept_result_cpuid(&mshv_cpuid)
1100             .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
1101     }
1102 
1103     #[cfg(target_arch = "x86_64")]
1104     ///
1105     /// X86 specific call to retrieve the CPUID registers.
1106     ///
1107     fn get_cpuid2(&self, _num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
1108         Ok(self.cpuid.clone())
1109     }
1110 
1111     #[cfg(target_arch = "x86_64")]
1112     ///
1113     /// X86 specific call to retrieve cpuid leaf
1114     ///
1115     fn get_cpuid_values(
1116         &self,
1117         function: u32,
1118         index: u32,
1119         xfem: u64,
1120         xss: u64,
1121     ) -> cpu::Result<[u32; 4]> {
1122         self.fd
1123             .get_cpuid_values(function, index, xfem, xss)
1124             .map_err(|e| cpu::HypervisorCpuError::GetCpuidVales(e.into()))
1125     }
1126 
1127     #[cfg(target_arch = "x86_64")]
1128     ///
1129     /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
1130     ///
1131     fn get_lapic(&self) -> cpu::Result<crate::arch::x86::LapicState> {
1132         Ok(self
1133             .fd
1134             .get_lapic()
1135             .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))?
1136             .into())
1137     }
1138 
1139     #[cfg(target_arch = "x86_64")]
1140     ///
1141     /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
1142     ///
1143     fn set_lapic(&self, lapic: &crate::arch::x86::LapicState) -> cpu::Result<()> {
1144         let lapic: mshv_bindings::LapicState = (*lapic).clone().into();
1145         self.fd
1146             .set_lapic(&lapic)
1147             .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into()))
1148     }
1149 
1150     ///
1151     /// Returns the vcpu's current "multiprocessing state".
1152     ///
1153     fn get_mp_state(&self) -> cpu::Result<MpState> {
1154         Ok(MpState::Mshv)
1155     }
1156 
1157     ///
1158     /// Sets the vcpu's current "multiprocessing state".
1159     ///
1160     fn set_mp_state(&self, _mp_state: MpState) -> cpu::Result<()> {
1161         Ok(())
1162     }
1163 
1164     ///
1165     /// Set CPU state
1166     ///
1167     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
1168         let state: VcpuMshvState = state.clone().into();
1169         self.set_msrs(&state.msrs)?;
1170         self.set_vcpu_events(&state.vcpu_events)?;
1171         self.set_regs(&state.regs.into())?;
1172         self.set_sregs(&state.sregs.into())?;
1173         self.set_fpu(&state.fpu)?;
1174         self.set_xcrs(&state.xcrs)?;
1175         self.set_lapic(&state.lapic)?;
1176         self.set_xsave(&state.xsave)?;
1177         // These registers are global and needed to be set only for first VCPU
1178         // as Microsoft Hypervisor allows setting this regsier for only one VCPU
1179         if self.vp_index == 0 {
1180             self.fd
1181                 .set_misc_regs(&state.misc)
1182                 .map_err(|e| cpu::HypervisorCpuError::SetMiscRegs(e.into()))?
1183         }
1184         self.fd
1185             .set_debug_regs(&state.dbg)
1186             .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into()))?;
1187         Ok(())
1188     }
1189 
1190     ///
1191     /// Get CPU State
1192     ///
1193     fn state(&self) -> cpu::Result<CpuState> {
1194         let regs = self.get_regs()?;
1195         let sregs = self.get_sregs()?;
1196         let xcrs = self.get_xcrs()?;
1197         let fpu = self.get_fpu()?;
1198         let vcpu_events = self.get_vcpu_events()?;
1199         let mut msrs = self.msrs.clone();
1200         self.get_msrs(&mut msrs)?;
1201         let lapic = self.get_lapic()?;
1202         let xsave = self.get_xsave()?;
1203         let misc = self
1204             .fd
1205             .get_misc_regs()
1206             .map_err(|e| cpu::HypervisorCpuError::GetMiscRegs(e.into()))?;
1207         let dbg = self
1208             .fd
1209             .get_debug_regs()
1210             .map_err(|e| cpu::HypervisorCpuError::GetDebugRegs(e.into()))?;
1211 
1212         Ok(VcpuMshvState {
1213             msrs,
1214             vcpu_events,
1215             regs: regs.into(),
1216             sregs: sregs.into(),
1217             fpu,
1218             xcrs,
1219             lapic,
1220             dbg,
1221             xsave,
1222             misc,
1223         }
1224         .into())
1225     }
1226 
1227     #[cfg(target_arch = "x86_64")]
1228     ///
1229     /// Translate guest virtual address to guest physical address
1230     ///
1231     fn translate_gva(&self, gva: u64, flags: u64) -> cpu::Result<(u64, u32)> {
1232         let r = self
1233             .fd
1234             .translate_gva(gva, flags)
1235             .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?;
1236 
1237         let gpa = r.0;
1238         // SAFETY: r is valid, otherwise this function will have returned
1239         let result_code = unsafe { r.1.__bindgen_anon_1.result_code };
1240 
1241         Ok((gpa, result_code))
1242     }
1243 
1244     #[cfg(target_arch = "x86_64")]
1245     ///
1246     /// Return the list of initial MSR entries for a VCPU
1247     ///
1248     fn boot_msr_entries(&self) -> Vec<MsrEntry> {
1249         use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB};
1250 
1251         [
1252             msr!(msr_index::MSR_IA32_SYSENTER_CS),
1253             msr!(msr_index::MSR_IA32_SYSENTER_ESP),
1254             msr!(msr_index::MSR_IA32_SYSENTER_EIP),
1255             msr!(msr_index::MSR_STAR),
1256             msr!(msr_index::MSR_CSTAR),
1257             msr!(msr_index::MSR_LSTAR),
1258             msr!(msr_index::MSR_KERNEL_GS_BASE),
1259             msr!(msr_index::MSR_SYSCALL_MASK),
1260             msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB),
1261         ]
1262         .to_vec()
1263     }
1264 
1265     ///
1266     /// Sets the AMD specific vcpu's sev control register.
1267     ///
1268     #[cfg(feature = "sev_snp")]
1269     fn set_sev_control_register(&self, vmsa_pfn: u64) -> cpu::Result<()> {
1270         let sev_control_reg = snp::get_sev_control_register(vmsa_pfn);
1271 
1272         self.fd
1273             .set_sev_control_register(sev_control_reg)
1274             .map_err(|e| cpu::HypervisorCpuError::SetSevControlRegister(e.into()))
1275     }
1276 }
1277 
1278 impl MshvVcpu {
1279     #[cfg(target_arch = "x86_64")]
1280     ///
1281     /// X86 specific call that returns the vcpu's current "xsave struct".
1282     ///
1283     fn get_xsave(&self) -> cpu::Result<Xsave> {
1284         self.fd
1285             .get_xsave()
1286             .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))
1287     }
1288 
1289     #[cfg(target_arch = "x86_64")]
1290     ///
1291     /// X86 specific call that sets the vcpu's current "xsave struct".
1292     ///
1293     fn set_xsave(&self, xsave: &Xsave) -> cpu::Result<()> {
1294         self.fd
1295             .set_xsave(xsave)
1296             .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into()))
1297     }
1298 
1299     #[cfg(target_arch = "x86_64")]
1300     ///
1301     /// X86 specific call that returns the vcpu's current "xcrs".
1302     ///
1303     fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> {
1304         self.fd
1305             .get_xcrs()
1306             .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into()))
1307     }
1308 
1309     #[cfg(target_arch = "x86_64")]
1310     ///
1311     /// X86 specific call that sets the vcpu's current "xcrs".
1312     ///
1313     fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> {
1314         self.fd
1315             .set_xcrs(xcrs)
1316             .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into()))
1317     }
1318 
1319     #[cfg(target_arch = "x86_64")]
1320     ///
1321     /// Returns currently pending exceptions, interrupts, and NMIs as well as related
1322     /// states of the vcpu.
1323     ///
1324     fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> {
1325         self.fd
1326             .get_vcpu_events()
1327             .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into()))
1328     }
1329 
1330     #[cfg(target_arch = "x86_64")]
1331     ///
1332     /// Sets pending exceptions, interrupts, and NMIs as well as related states
1333     /// of the vcpu.
1334     ///
1335     fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> {
1336         self.fd
1337             .set_vcpu_events(events)
1338             .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into()))
1339     }
1340 }
1341 
1342 struct MshvEmulatorContext<'a> {
1343     vcpu: &'a MshvVcpu,
1344     map: (u64, u64), // Initial GVA to GPA mapping provided by the hypervisor
1345 }
1346 
1347 impl<'a> MshvEmulatorContext<'a> {
1348     // Do the actual gva -> gpa translation
1349     #[allow(non_upper_case_globals)]
1350     fn translate(&self, gva: u64) -> Result<u64, PlatformError> {
1351         if self.map.0 == gva {
1352             return Ok(self.map.1);
1353         }
1354 
1355         // TODO: More fine-grained control for the flags
1356         let flags = HV_TRANSLATE_GVA_VALIDATE_READ | HV_TRANSLATE_GVA_VALIDATE_WRITE;
1357 
1358         let (gpa, result_code) = self
1359             .vcpu
1360             .translate_gva(gva, flags.into())
1361             .map_err(|e| PlatformError::TranslateVirtualAddress(anyhow!(e)))?;
1362 
1363         match result_code {
1364             hv_translate_gva_result_code_HV_TRANSLATE_GVA_SUCCESS => Ok(gpa),
1365             _ => Err(PlatformError::TranslateVirtualAddress(anyhow!(result_code))),
1366         }
1367     }
1368 }
1369 
1370 /// Platform emulation for Hyper-V
1371 impl<'a> PlatformEmulator for MshvEmulatorContext<'a> {
1372     type CpuState = EmulatorCpuState;
1373 
1374     fn read_memory(&self, gva: u64, data: &mut [u8]) -> Result<(), PlatformError> {
1375         let gpa = self.translate(gva)?;
1376         debug!(
1377             "mshv emulator: memory read {} bytes from [{:#x} -> {:#x}]",
1378             data.len(),
1379             gva,
1380             gpa
1381         );
1382 
1383         if let Some(vm_ops) = &self.vcpu.vm_ops {
1384             if vm_ops.guest_mem_read(gpa, data).is_err() {
1385                 vm_ops
1386                     .mmio_read(gpa, data)
1387                     .map_err(|e| PlatformError::MemoryReadFailure(e.into()))?;
1388             }
1389         }
1390 
1391         Ok(())
1392     }
1393 
1394     fn write_memory(&mut self, gva: u64, data: &[u8]) -> Result<(), PlatformError> {
1395         let gpa = self.translate(gva)?;
1396         debug!(
1397             "mshv emulator: memory write {} bytes at [{:#x} -> {:#x}]",
1398             data.len(),
1399             gva,
1400             gpa
1401         );
1402 
1403         if let Some(vm_ops) = &self.vcpu.vm_ops {
1404             if vm_ops.guest_mem_write(gpa, data).is_err() {
1405                 vm_ops
1406                     .mmio_write(gpa, data)
1407                     .map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?;
1408             }
1409         }
1410 
1411         Ok(())
1412     }
1413 
1414     fn cpu_state(&self, cpu_id: usize) -> Result<Self::CpuState, PlatformError> {
1415         if cpu_id != self.vcpu.vp_index as usize {
1416             return Err(PlatformError::GetCpuStateFailure(anyhow!(
1417                 "CPU id mismatch {:?} {:?}",
1418                 cpu_id,
1419                 self.vcpu.vp_index
1420             )));
1421         }
1422 
1423         let regs = self
1424             .vcpu
1425             .get_regs()
1426             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
1427         let sregs = self
1428             .vcpu
1429             .get_sregs()
1430             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
1431 
1432         debug!("mshv emulator: Getting new CPU state");
1433         debug!("mshv emulator: {:#x?}", regs);
1434 
1435         Ok(EmulatorCpuState { regs, sregs })
1436     }
1437 
1438     fn set_cpu_state(&self, cpu_id: usize, state: Self::CpuState) -> Result<(), PlatformError> {
1439         if cpu_id != self.vcpu.vp_index as usize {
1440             return Err(PlatformError::SetCpuStateFailure(anyhow!(
1441                 "CPU id mismatch {:?} {:?}",
1442                 cpu_id,
1443                 self.vcpu.vp_index
1444             )));
1445         }
1446 
1447         debug!("mshv emulator: Setting new CPU state");
1448         debug!("mshv emulator: {:#x?}", state.regs);
1449 
1450         self.vcpu
1451             .set_regs(&state.regs)
1452             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))?;
1453         self.vcpu
1454             .set_sregs(&state.sregs)
1455             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))
1456     }
1457 
1458     fn gva_to_gpa(&self, gva: u64) -> Result<u64, PlatformError> {
1459         self.translate(gva)
1460     }
1461 
1462     fn fetch(&self, _ip: u64, _instruction_bytes: &mut [u8]) -> Result<(), PlatformError> {
1463         Err(PlatformError::MemoryReadFailure(anyhow!("unimplemented")))
1464     }
1465 }
1466 
1467 /// Wrapper over Mshv VM ioctls.
1468 pub struct MshvVm {
1469     fd: Arc<VmFd>,
1470     msrs: Vec<MsrEntry>,
1471     dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>,
1472     #[cfg(feature = "sev_snp")]
1473     sev_snp_enabled: bool,
1474 }
1475 
1476 impl MshvVm {
1477     ///
1478     /// Creates an in-kernel device.
1479     ///
1480     /// See the documentation for `MSHV_CREATE_DEVICE`.
1481     fn create_device(&self, device: &mut CreateDevice) -> vm::Result<VfioDeviceFd> {
1482         let device_fd = self
1483             .fd
1484             .create_device(device)
1485             .map_err(|e| vm::HypervisorVmError::CreateDevice(e.into()))?;
1486         Ok(VfioDeviceFd::new_from_mshv(device_fd))
1487     }
1488 }
1489 
1490 ///
1491 /// Implementation of Vm trait for Mshv
1492 ///
1493 /// # Examples
1494 ///
1495 /// ```
1496 /// # extern crate hypervisor;
1497 /// # use hypervisor::mshv::MshvHypervisor;
1498 /// # use std::sync::Arc;
1499 /// let mshv = MshvHypervisor::new().unwrap();
1500 /// let hypervisor = Arc::new(mshv);
1501 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
1502 /// ```
1503 impl vm::Vm for MshvVm {
1504     #[cfg(target_arch = "x86_64")]
1505     ///
1506     /// Sets the address of the one-page region in the VM's address space.
1507     ///
1508     fn set_identity_map_address(&self, _address: u64) -> vm::Result<()> {
1509         Ok(())
1510     }
1511 
1512     #[cfg(target_arch = "x86_64")]
1513     ///
1514     /// Sets the address of the three-page region in the VM's address space.
1515     ///
1516     fn set_tss_address(&self, _offset: usize) -> vm::Result<()> {
1517         Ok(())
1518     }
1519 
1520     ///
1521     /// Creates an in-kernel interrupt controller.
1522     ///
1523     fn create_irq_chip(&self) -> vm::Result<()> {
1524         Ok(())
1525     }
1526 
1527     ///
1528     /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
1529     ///
1530     fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1531         debug!("register_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1532 
1533         self.fd
1534             .register_irqfd(fd, gsi)
1535             .map_err(|e| vm::HypervisorVmError::RegisterIrqFd(e.into()))?;
1536 
1537         Ok(())
1538     }
1539 
1540     ///
1541     /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
1542     ///
1543     fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1544         debug!("unregister_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1545 
1546         self.fd
1547             .unregister_irqfd(fd, gsi)
1548             .map_err(|e| vm::HypervisorVmError::UnregisterIrqFd(e.into()))?;
1549 
1550         Ok(())
1551     }
1552 
1553     ///
1554     /// Creates a VcpuFd object from a vcpu RawFd.
1555     ///
1556     fn create_vcpu(
1557         &self,
1558         id: u8,
1559         vm_ops: Option<Arc<dyn VmOps>>,
1560     ) -> vm::Result<Arc<dyn cpu::Vcpu>> {
1561         let vcpu_fd = self
1562             .fd
1563             .create_vcpu(id)
1564             .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?;
1565         let vcpu = MshvVcpu {
1566             fd: vcpu_fd,
1567             vp_index: id,
1568             cpuid: Vec::new(),
1569             msrs: self.msrs.clone(),
1570             vm_ops,
1571             #[cfg(feature = "sev_snp")]
1572             vm_fd: self.fd.clone(),
1573         };
1574         Ok(Arc::new(vcpu))
1575     }
1576 
1577     #[cfg(target_arch = "x86_64")]
1578     fn enable_split_irq(&self) -> vm::Result<()> {
1579         Ok(())
1580     }
1581 
1582     #[cfg(target_arch = "x86_64")]
1583     fn enable_sgx_attribute(&self, _file: File) -> vm::Result<()> {
1584         Ok(())
1585     }
1586 
1587     fn register_ioevent(
1588         &self,
1589         fd: &EventFd,
1590         addr: &IoEventAddress,
1591         datamatch: Option<DataMatch>,
1592     ) -> vm::Result<()> {
1593         #[cfg(feature = "sev_snp")]
1594         if self.sev_snp_enabled {
1595             return Ok(());
1596         }
1597 
1598         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1599         debug!(
1600             "register_ioevent fd {} addr {:x?} datamatch {:?}",
1601             fd.as_raw_fd(),
1602             addr,
1603             datamatch
1604         );
1605         if let Some(dm) = datamatch {
1606             match dm {
1607                 vm::DataMatch::DataMatch32(mshv_dm32) => self
1608                     .fd
1609                     .register_ioevent(fd, addr, mshv_dm32)
1610                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1611                 vm::DataMatch::DataMatch64(mshv_dm64) => self
1612                     .fd
1613                     .register_ioevent(fd, addr, mshv_dm64)
1614                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1615             }
1616         } else {
1617             self.fd
1618                 .register_ioevent(fd, addr, NoDatamatch)
1619                 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into()))
1620         }
1621     }
1622 
1623     /// Unregister an event from a certain address it has been previously registered to.
1624     fn unregister_ioevent(&self, fd: &EventFd, addr: &IoEventAddress) -> vm::Result<()> {
1625         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1626         debug!("unregister_ioevent fd {} addr {:x?}", fd.as_raw_fd(), addr);
1627 
1628         self.fd
1629             .unregister_ioevent(fd, addr, NoDatamatch)
1630             .map_err(|e| vm::HypervisorVmError::UnregisterIoEvent(e.into()))
1631     }
1632 
1633     /// Creates a guest physical memory region.
1634     fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1635         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1636         // No matter read only or not we keep track the slots.
1637         // For readonly hypervisor can enable the dirty bits,
1638         // but a VM exit happens before setting the dirty bits
1639         self.dirty_log_slots.write().unwrap().insert(
1640             user_memory_region.guest_pfn,
1641             MshvDirtyLogSlot {
1642                 guest_pfn: user_memory_region.guest_pfn,
1643                 memory_size: user_memory_region.size,
1644             },
1645         );
1646 
1647         self.fd
1648             .map_user_memory(user_memory_region)
1649             .map_err(|e| vm::HypervisorVmError::CreateUserMemory(e.into()))?;
1650         Ok(())
1651     }
1652 
1653     /// Removes a guest physical memory region.
1654     fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1655         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1656         // Remove the corresponding entry from "self.dirty_log_slots" if needed
1657         self.dirty_log_slots
1658             .write()
1659             .unwrap()
1660             .remove(&user_memory_region.guest_pfn);
1661 
1662         self.fd
1663             .unmap_user_memory(user_memory_region)
1664             .map_err(|e| vm::HypervisorVmError::RemoveUserMemory(e.into()))?;
1665         Ok(())
1666     }
1667 
1668     fn make_user_memory_region(
1669         &self,
1670         _slot: u32,
1671         guest_phys_addr: u64,
1672         memory_size: u64,
1673         userspace_addr: u64,
1674         readonly: bool,
1675         _log_dirty_pages: bool,
1676     ) -> UserMemoryRegion {
1677         let mut flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_ADJUSTABLE;
1678         if !readonly {
1679             flags |= HV_MAP_GPA_WRITABLE;
1680         }
1681 
1682         mshv_user_mem_region {
1683             flags,
1684             guest_pfn: guest_phys_addr >> PAGE_SHIFT,
1685             size: memory_size,
1686             userspace_addr,
1687         }
1688         .into()
1689     }
1690 
1691     fn create_passthrough_device(&self) -> vm::Result<VfioDeviceFd> {
1692         let mut vfio_dev = mshv_create_device {
1693             type_: mshv_device_type_MSHV_DEV_TYPE_VFIO,
1694             fd: 0,
1695             flags: 0,
1696         };
1697 
1698         self.create_device(&mut vfio_dev)
1699             .map_err(|e| vm::HypervisorVmError::CreatePassthroughDevice(e.into()))
1700     }
1701 
1702     ///
1703     /// Constructs a routing entry
1704     ///
1705     fn make_routing_entry(&self, gsi: u32, config: &InterruptSourceConfig) -> IrqRoutingEntry {
1706         match config {
1707             InterruptSourceConfig::MsiIrq(cfg) => mshv_msi_routing_entry {
1708                 gsi,
1709                 address_lo: cfg.low_addr,
1710                 address_hi: cfg.high_addr,
1711                 data: cfg.data,
1712             }
1713             .into(),
1714             _ => {
1715                 unreachable!()
1716             }
1717         }
1718     }
1719 
1720     fn set_gsi_routing(&self, entries: &[IrqRoutingEntry]) -> vm::Result<()> {
1721         let mut msi_routing =
1722             vec_with_array_field::<mshv_msi_routing, mshv_msi_routing_entry>(entries.len());
1723         msi_routing[0].nr = entries.len() as u32;
1724 
1725         let entries: Vec<mshv_msi_routing_entry> = entries
1726             .iter()
1727             .map(|entry| match entry {
1728                 IrqRoutingEntry::Mshv(e) => *e,
1729                 #[allow(unreachable_patterns)]
1730                 _ => panic!("IrqRoutingEntry type is wrong"),
1731             })
1732             .collect();
1733 
1734         // SAFETY: msi_routing initialized with entries.len() and now it is being turned into
1735         // entries_slice with entries.len() again. It is guaranteed to be large enough to hold
1736         // everything from entries.
1737         unsafe {
1738             let entries_slice: &mut [mshv_msi_routing_entry] =
1739                 msi_routing[0].entries.as_mut_slice(entries.len());
1740             entries_slice.copy_from_slice(&entries);
1741         }
1742 
1743         self.fd
1744             .set_msi_routing(&msi_routing[0])
1745             .map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into()))
1746     }
1747 
1748     ///
1749     /// Start logging dirty pages
1750     ///
1751     fn start_dirty_log(&self) -> vm::Result<()> {
1752         self.fd
1753             .enable_dirty_page_tracking()
1754             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))
1755     }
1756 
1757     ///
1758     /// Stop logging dirty pages
1759     ///
1760     fn stop_dirty_log(&self) -> vm::Result<()> {
1761         let dirty_log_slots = self.dirty_log_slots.read().unwrap();
1762         // Before disabling the dirty page tracking we need
1763         // to set the dirty bits in the Hypervisor
1764         // This is a requirement from Microsoft Hypervisor
1765         for (_, s) in dirty_log_slots.iter() {
1766             self.fd
1767                 .get_dirty_log(s.guest_pfn, s.memory_size as usize, DIRTY_BITMAP_SET_DIRTY)
1768                 .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1769         }
1770         self.fd
1771             .disable_dirty_page_tracking()
1772             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1773         Ok(())
1774     }
1775 
1776     ///
1777     /// Get dirty pages bitmap (one bit per page)
1778     ///
1779     fn get_dirty_log(&self, _slot: u32, base_gpa: u64, memory_size: u64) -> vm::Result<Vec<u64>> {
1780         self.fd
1781             .get_dirty_log(
1782                 base_gpa >> PAGE_SHIFT,
1783                 memory_size as usize,
1784                 DIRTY_BITMAP_CLEAR_DIRTY,
1785             )
1786             .map_err(|e| vm::HypervisorVmError::GetDirtyLog(e.into()))
1787     }
1788 
1789     /// Retrieve guest clock.
1790     #[cfg(target_arch = "x86_64")]
1791     fn get_clock(&self) -> vm::Result<ClockData> {
1792         Ok(ClockData::Mshv)
1793     }
1794 
1795     /// Set guest clock.
1796     #[cfg(target_arch = "x86_64")]
1797     fn set_clock(&self, _data: &ClockData) -> vm::Result<()> {
1798         Ok(())
1799     }
1800 
1801     /// Downcast to the underlying MshvVm type
1802     fn as_any(&self) -> &dyn Any {
1803         self
1804     }
1805 
1806     /// Initialize the SEV-SNP VM
1807     #[cfg(feature = "sev_snp")]
1808     fn sev_snp_init(&self) -> vm::Result<()> {
1809         self.fd
1810             .set_partition_property(
1811                 hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_STATE,
1812                 hv_partition_isolation_state_HV_PARTITION_ISOLATION_SECURE as u64,
1813             )
1814             .map_err(|e| vm::HypervisorVmError::InitializeSevSnp(e.into()))
1815     }
1816 
1817     ///
1818     /// Importing isolated pages, these pages will be used
1819     /// for the PSP(Platform Security Processor) measurement.
1820     #[cfg(feature = "sev_snp")]
1821     fn import_isolated_pages(
1822         &self,
1823         page_type: u32,
1824         page_size: u32,
1825         pages: &[u64],
1826     ) -> vm::Result<()> {
1827         if pages.is_empty() {
1828             return Ok(());
1829         }
1830 
1831         let mut isolated_pages =
1832             vec_with_array_field::<mshv_import_isolated_pages, u64>(pages.len());
1833         isolated_pages[0].num_pages = pages.len() as u64;
1834         isolated_pages[0].page_type = page_type;
1835         isolated_pages[0].page_size = page_size;
1836         // SAFETY: isolated_pages initialized with pages.len() and now it is being turned into
1837         // pages_slice with pages.len() again. It is guaranteed to be large enough to hold
1838         // everything from pages.
1839         unsafe {
1840             let pages_slice: &mut [u64] = isolated_pages[0].page_number.as_mut_slice(pages.len());
1841             pages_slice.copy_from_slice(pages);
1842         }
1843         self.fd
1844             .import_isolated_pages(&isolated_pages[0])
1845             .map_err(|e| vm::HypervisorVmError::ImportIsolatedPages(e.into()))
1846     }
1847 
1848     ///
1849     /// Complete isolated import, telling the hypervisor that
1850     /// importing the pages to guest memory is complete.
1851     ///
1852     #[cfg(feature = "sev_snp")]
1853     fn complete_isolated_import(
1854         &self,
1855         snp_id_block: IGVM_VHS_SNP_ID_BLOCK,
1856         host_data: [u8; 32],
1857         id_block_enabled: u8,
1858     ) -> vm::Result<()> {
1859         let mut auth_info = hv_snp_id_auth_info {
1860             id_key_algorithm: snp_id_block.id_key_algorithm,
1861             auth_key_algorithm: snp_id_block.author_key_algorithm,
1862             ..Default::default()
1863         };
1864         // Each of r/s component is 576 bits long
1865         auth_info.id_block_signature[..SIG_R_COMPONENT_SIZE_IN_BYTES]
1866             .copy_from_slice(snp_id_block.id_key_signature.r_comp.as_ref());
1867         auth_info.id_block_signature
1868             [SIG_R_COMPONENT_SIZE_IN_BYTES..SIG_R_AND_S_COMPONENT_SIZE_IN_BYTES]
1869             .copy_from_slice(snp_id_block.id_key_signature.s_comp.as_ref());
1870         auth_info.id_key[..ECDSA_CURVE_ID_SIZE_IN_BYTES]
1871             .copy_from_slice(snp_id_block.id_public_key.curve.to_le_bytes().as_ref());
1872         auth_info.id_key[ECDSA_SIG_X_COMPONENT_START..ECDSA_SIG_X_COMPONENT_END]
1873             .copy_from_slice(snp_id_block.id_public_key.qx.as_ref());
1874         auth_info.id_key[ECDSA_SIG_Y_COMPONENT_START..ECDSA_SIG_Y_COMPONENT_END]
1875             .copy_from_slice(snp_id_block.id_public_key.qy.as_ref());
1876 
1877         let data = mshv_complete_isolated_import {
1878             import_data: hv_partition_complete_isolated_import_data {
1879                 psp_parameters: hv_psp_launch_finish_data {
1880                     id_block: hv_snp_id_block {
1881                         launch_digest: snp_id_block.ld,
1882                         family_id: snp_id_block.family_id,
1883                         image_id: snp_id_block.image_id,
1884                         version: snp_id_block.version,
1885                         guest_svn: snp_id_block.guest_svn,
1886                         policy: get_default_snp_guest_policy(),
1887                     },
1888                     id_auth_info: auth_info,
1889                     host_data,
1890                     id_block_enabled,
1891                     author_key_enabled: 0,
1892                 },
1893             },
1894         };
1895         self.fd
1896             .complete_isolated_import(&data)
1897             .map_err(|e| vm::HypervisorVmError::CompleteIsolatedImport(e.into()))
1898     }
1899 }
1900