xref: /cloud-hypervisor/hypervisor/src/mshv/mod.rs (revision 4d7a4c598ac247aaf770b00dfb057cdac891f67d)
1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
2 //
3 // Copyright © 2020, Microsoft Corporation
4 //
5 
6 use crate::arch::emulator::{PlatformEmulator, PlatformError};
7 
8 #[cfg(target_arch = "x86_64")]
9 use crate::arch::x86::emulator::{Emulator, EmulatorCpuState};
10 use crate::cpu;
11 use crate::cpu::Vcpu;
12 use crate::hypervisor;
13 use crate::vec_with_array_field;
14 use crate::vm::{self, InterruptSourceConfig, VmOps};
15 use crate::HypervisorType;
16 pub use mshv_bindings::*;
17 use mshv_ioctls::{set_registers_64, Mshv, NoDatamatch, VcpuFd, VmFd, VmType};
18 use std::any::Any;
19 use std::collections::HashMap;
20 use std::sync::{Arc, RwLock};
21 use vfio_ioctls::VfioDeviceFd;
22 use vm::DataMatch;
23 
24 #[cfg(feature = "sev_snp")]
25 mod snp_constants;
26 // x86_64 dependencies
27 #[cfg(target_arch = "x86_64")]
28 pub mod x86_64;
29 #[cfg(feature = "sev_snp")]
30 use snp_constants::*;
31 
32 use crate::{
33     ClockData, CpuState, IoEventAddress, IrqRoutingEntry, MpState, UserMemoryRegion,
34     USER_MEMORY_REGION_EXECUTE, USER_MEMORY_REGION_READ, USER_MEMORY_REGION_WRITE,
35 };
36 #[cfg(feature = "sev_snp")]
37 use igvm_defs::IGVM_VHS_SNP_ID_BLOCK;
38 use vmm_sys_util::eventfd::EventFd;
39 #[cfg(target_arch = "x86_64")]
40 pub use x86_64::VcpuMshvState;
41 #[cfg(target_arch = "x86_64")]
42 pub use x86_64::*;
43 
44 #[cfg(target_arch = "x86_64")]
45 use std::fs::File;
46 use std::os::unix::io::AsRawFd;
47 
48 #[cfg(target_arch = "x86_64")]
49 use crate::arch::x86::{CpuIdEntry, FpuState, MsrEntry};
50 
51 const DIRTY_BITMAP_CLEAR_DIRTY: u64 = 0x4;
52 const DIRTY_BITMAP_SET_DIRTY: u64 = 0x8;
53 
54 ///
55 /// Export generically-named wrappers of mshv-bindings for Unix-based platforms
56 ///
57 pub use {
58     mshv_bindings::mshv_create_device as CreateDevice,
59     mshv_bindings::mshv_device_attr as DeviceAttr, mshv_ioctls::DeviceFd,
60 };
61 
62 pub const PAGE_SHIFT: usize = 12;
63 
64 impl From<mshv_user_mem_region> for UserMemoryRegion {
65     fn from(region: mshv_user_mem_region) -> Self {
66         let mut flags: u32 = 0;
67         if region.flags & HV_MAP_GPA_READABLE != 0 {
68             flags |= USER_MEMORY_REGION_READ;
69         }
70         if region.flags & HV_MAP_GPA_WRITABLE != 0 {
71             flags |= USER_MEMORY_REGION_WRITE;
72         }
73         if region.flags & HV_MAP_GPA_EXECUTABLE != 0 {
74             flags |= USER_MEMORY_REGION_EXECUTE;
75         }
76 
77         UserMemoryRegion {
78             guest_phys_addr: (region.guest_pfn << PAGE_SHIFT as u64)
79                 + (region.userspace_addr & ((1 << PAGE_SHIFT) - 1)),
80             memory_size: region.size,
81             userspace_addr: region.userspace_addr,
82             flags,
83             ..Default::default()
84         }
85     }
86 }
87 
88 impl From<UserMemoryRegion> for mshv_user_mem_region {
89     fn from(region: UserMemoryRegion) -> Self {
90         let mut flags: u32 = 0;
91         if region.flags & USER_MEMORY_REGION_READ != 0 {
92             flags |= HV_MAP_GPA_READABLE;
93         }
94         if region.flags & USER_MEMORY_REGION_WRITE != 0 {
95             flags |= HV_MAP_GPA_WRITABLE;
96         }
97         if region.flags & USER_MEMORY_REGION_EXECUTE != 0 {
98             flags |= HV_MAP_GPA_EXECUTABLE;
99         }
100 
101         mshv_user_mem_region {
102             guest_pfn: region.guest_phys_addr >> PAGE_SHIFT,
103             size: region.memory_size,
104             userspace_addr: region.userspace_addr,
105             flags,
106         }
107     }
108 }
109 
110 impl From<mshv_ioctls::IoEventAddress> for IoEventAddress {
111     fn from(a: mshv_ioctls::IoEventAddress) -> Self {
112         match a {
113             mshv_ioctls::IoEventAddress::Pio(x) => Self::Pio(x),
114             mshv_ioctls::IoEventAddress::Mmio(x) => Self::Mmio(x),
115         }
116     }
117 }
118 
119 impl From<IoEventAddress> for mshv_ioctls::IoEventAddress {
120     fn from(a: IoEventAddress) -> Self {
121         match a {
122             IoEventAddress::Pio(x) => Self::Pio(x),
123             IoEventAddress::Mmio(x) => Self::Mmio(x),
124         }
125     }
126 }
127 
128 impl From<VcpuMshvState> for CpuState {
129     fn from(s: VcpuMshvState) -> Self {
130         CpuState::Mshv(s)
131     }
132 }
133 
134 impl From<CpuState> for VcpuMshvState {
135     fn from(s: CpuState) -> Self {
136         match s {
137             CpuState::Mshv(s) => s,
138             /* Needed in case other hypervisors are enabled */
139             #[allow(unreachable_patterns)]
140             _ => panic!("CpuState is not valid"),
141         }
142     }
143 }
144 
145 impl From<mshv_msi_routing_entry> for IrqRoutingEntry {
146     fn from(s: mshv_msi_routing_entry) -> Self {
147         IrqRoutingEntry::Mshv(s)
148     }
149 }
150 
151 impl From<IrqRoutingEntry> for mshv_msi_routing_entry {
152     fn from(e: IrqRoutingEntry) -> Self {
153         match e {
154             IrqRoutingEntry::Mshv(e) => e,
155             /* Needed in case other hypervisors are enabled */
156             #[allow(unreachable_patterns)]
157             _ => panic!("IrqRoutingEntry is not valid"),
158         }
159     }
160 }
161 
162 struct MshvDirtyLogSlot {
163     guest_pfn: u64,
164     memory_size: u64,
165 }
166 
167 /// Wrapper over mshv system ioctls.
168 pub struct MshvHypervisor {
169     mshv: Mshv,
170 }
171 
172 impl MshvHypervisor {
173     #[cfg(target_arch = "x86_64")]
174     ///
175     /// Retrieve the list of MSRs supported by MSHV.
176     ///
177     fn get_msr_list(&self) -> hypervisor::Result<MsrList> {
178         self.mshv
179             .get_msr_index_list()
180             .map_err(|e| hypervisor::HypervisorError::GetMsrList(e.into()))
181     }
182 }
183 
184 impl MshvHypervisor {
185     /// Create a hypervisor based on Mshv
186     #[allow(clippy::new_ret_no_self)]
187     pub fn new() -> hypervisor::Result<Arc<dyn hypervisor::Hypervisor>> {
188         let mshv_obj =
189             Mshv::new().map_err(|e| hypervisor::HypervisorError::HypervisorCreate(e.into()))?;
190         Ok(Arc::new(MshvHypervisor { mshv: mshv_obj }))
191     }
192     /// Check if the hypervisor is available
193     pub fn is_available() -> hypervisor::Result<bool> {
194         match std::fs::metadata("/dev/mshv") {
195             Ok(_) => Ok(true),
196             Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false),
197             Err(err) => Err(hypervisor::HypervisorError::HypervisorAvailableCheck(
198                 err.into(),
199             )),
200         }
201     }
202 }
203 /// Implementation of Hypervisor trait for Mshv
204 ///
205 /// # Examples
206 ///
207 /// ```
208 /// # use hypervisor::mshv::MshvHypervisor;
209 /// # use std::sync::Arc;
210 /// let mshv = MshvHypervisor::new().unwrap();
211 /// let hypervisor = Arc::new(mshv);
212 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
213 /// ```
214 impl hypervisor::Hypervisor for MshvHypervisor {
215     ///
216     /// Returns the type of the hypervisor
217     ///
218     fn hypervisor_type(&self) -> HypervisorType {
219         HypervisorType::Mshv
220     }
221 
222     fn create_vm_with_type(&self, vm_type: u64) -> hypervisor::Result<Arc<dyn crate::Vm>> {
223         let mshv_vm_type: VmType = match VmType::try_from(vm_type) {
224             Ok(vm_type) => vm_type,
225             Err(_) => return Err(hypervisor::HypervisorError::UnsupportedVmType()),
226         };
227         let fd: VmFd;
228         loop {
229             match self.mshv.create_vm_with_type(mshv_vm_type) {
230                 Ok(res) => fd = res,
231                 Err(e) => {
232                     if e.errno() == libc::EINTR {
233                         // If the error returned is EINTR, which means the
234                         // ioctl has been interrupted, we have to retry as
235                         // this can't be considered as a regular error.
236                         continue;
237                     } else {
238                         return Err(hypervisor::HypervisorError::VmCreate(e.into()));
239                     }
240                 }
241             }
242             break;
243         }
244 
245         // Set additional partition property for SEV-SNP partition.
246         if mshv_vm_type == VmType::Snp {
247             let snp_policy = snp::get_default_snp_guest_policy();
248             let vmgexit_offloads = snp::get_default_vmgexit_offload_features();
249             // SAFETY: access union fields
250             unsafe {
251                 debug!(
252                     "Setting the partition isolation policy as: 0x{:x}",
253                     snp_policy.as_uint64
254                 );
255                 fd.set_partition_property(
256                     hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_POLICY,
257                     snp_policy.as_uint64,
258                 )
259                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
260                 debug!(
261                     "Setting the partition property to enable VMGEXIT offloads as : 0x{:x}",
262                     vmgexit_offloads.as_uint64
263                 );
264                 fd.set_partition_property(
265                     hv_partition_property_code_HV_PARTITION_PROPERTY_SEV_VMGEXIT_OFFLOADS,
266                     vmgexit_offloads.as_uint64,
267                 )
268                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
269             }
270         }
271 
272         // Default Microsoft Hypervisor behavior for unimplemented MSR is to
273         // send a fault to the guest if it tries to access it. It is possible
274         // to override this behavior with a more suitable option i.e., ignore
275         // writes from the guest and return zero in attempt to read unimplemented
276         // MSR.
277         fd.set_partition_property(
278             hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
279             hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64,
280         )
281         .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
282 
283         let msr_list = self.get_msr_list()?;
284         let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize;
285         let mut msrs: Vec<MsrEntry> = vec![
286             MsrEntry {
287                 ..Default::default()
288             };
289             num_msrs
290         ];
291         let indices = msr_list.as_slice();
292         for (pos, index) in indices.iter().enumerate() {
293             msrs[pos].index = *index;
294         }
295         let vm_fd = Arc::new(fd);
296 
297         Ok(Arc::new(MshvVm {
298             fd: vm_fd,
299             msrs,
300             dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
301         }))
302     }
303 
304     /// Create a mshv vm object and return the object as Vm trait object
305     ///
306     /// # Examples
307     ///
308     /// ```
309     /// # extern crate hypervisor;
310     /// # use hypervisor::mshv::MshvHypervisor;
311     /// use hypervisor::mshv::MshvVm;
312     /// let hypervisor = MshvHypervisor::new().unwrap();
313     /// let vm = hypervisor.create_vm().unwrap();
314     /// ```
315     fn create_vm(&self) -> hypervisor::Result<Arc<dyn vm::Vm>> {
316         let vm_type = 0;
317         self.create_vm_with_type(vm_type)
318     }
319     ///
320     /// Get the supported CpuID
321     ///
322     fn get_supported_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> {
323         Ok(Vec::new())
324     }
325 
326     /// Get maximum number of vCPUs
327     fn get_max_vcpus(&self) -> u32 {
328         // TODO: Using HV_MAXIMUM_PROCESSORS would be better
329         // but the ioctl API is limited to u8
330         256
331     }
332 }
333 
334 /// Vcpu struct for Microsoft Hypervisor
335 pub struct MshvVcpu {
336     fd: VcpuFd,
337     vp_index: u8,
338     cpuid: Vec<CpuIdEntry>,
339     msrs: Vec<MsrEntry>,
340     vm_ops: Option<Arc<dyn vm::VmOps>>,
341 }
342 
343 /// Implementation of Vcpu trait for Microsoft Hypervisor
344 ///
345 /// # Examples
346 ///
347 /// ```
348 /// # use hypervisor::mshv::MshvHypervisor;
349 /// # use std::sync::Arc;
350 /// let mshv = MshvHypervisor::new().unwrap();
351 /// let hypervisor = Arc::new(mshv);
352 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
353 /// let vcpu = vm.create_vcpu(0, None).unwrap();
354 /// ```
355 impl cpu::Vcpu for MshvVcpu {
356     #[cfg(target_arch = "x86_64")]
357     ///
358     /// Returns the vCPU general purpose registers.
359     ///
360     fn get_regs(&self) -> cpu::Result<crate::arch::x86::StandardRegisters> {
361         Ok(self
362             .fd
363             .get_regs()
364             .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))?
365             .into())
366     }
367     #[cfg(target_arch = "x86_64")]
368     ///
369     /// Sets the vCPU general purpose registers.
370     ///
371     fn set_regs(&self, regs: &crate::arch::x86::StandardRegisters) -> cpu::Result<()> {
372         let regs = (*regs).into();
373         self.fd
374             .set_regs(&regs)
375             .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into()))
376     }
377     #[cfg(target_arch = "x86_64")]
378     ///
379     /// Returns the vCPU special registers.
380     ///
381     fn get_sregs(&self) -> cpu::Result<crate::arch::x86::SpecialRegisters> {
382         Ok(self
383             .fd
384             .get_sregs()
385             .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))?
386             .into())
387     }
388     #[cfg(target_arch = "x86_64")]
389     ///
390     /// Sets the vCPU special registers.
391     ///
392     fn set_sregs(&self, sregs: &crate::arch::x86::SpecialRegisters) -> cpu::Result<()> {
393         let sregs = (*sregs).into();
394         self.fd
395             .set_sregs(&sregs)
396             .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into()))
397     }
398     #[cfg(target_arch = "x86_64")]
399     ///
400     /// Returns the floating point state (FPU) from the vCPU.
401     ///
402     fn get_fpu(&self) -> cpu::Result<FpuState> {
403         Ok(self
404             .fd
405             .get_fpu()
406             .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))?
407             .into())
408     }
409     #[cfg(target_arch = "x86_64")]
410     ///
411     /// Set the floating point state (FPU) of a vCPU.
412     ///
413     fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> {
414         let fpu: mshv_bindings::FloatingPointUnit = (*fpu).clone().into();
415         self.fd
416             .set_fpu(&fpu)
417             .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into()))
418     }
419 
420     #[cfg(target_arch = "x86_64")]
421     ///
422     /// Returns the model-specific registers (MSR) for this vCPU.
423     ///
424     fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> {
425         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
426         let mut mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
427         let succ = self
428             .fd
429             .get_msrs(&mut mshv_msrs)
430             .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
431 
432         msrs[..succ].copy_from_slice(
433             &mshv_msrs.as_slice()[..succ]
434                 .iter()
435                 .map(|e| (*e).into())
436                 .collect::<Vec<MsrEntry>>(),
437         );
438 
439         Ok(succ)
440     }
441     #[cfg(target_arch = "x86_64")]
442     ///
443     /// Setup the model-specific registers (MSR) for this vCPU.
444     /// Returns the number of MSR entries actually written.
445     ///
446     fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> {
447         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
448         let mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
449         self.fd
450             .set_msrs(&mshv_msrs)
451             .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
452     }
453 
454     #[cfg(target_arch = "x86_64")]
455     ///
456     /// X86 specific call to enable HyperV SynIC
457     ///
458     fn enable_hyperv_synic(&self) -> cpu::Result<()> {
459         /* We always have SynIC enabled on MSHV */
460         Ok(())
461     }
462     #[allow(non_upper_case_globals)]
463     fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> {
464         let hv_message: hv_message = hv_message::default();
465         match self.fd.run(hv_message) {
466             Ok(x) => match x.header.message_type {
467                 hv_message_type_HVMSG_X64_HALT => {
468                     debug!("HALT");
469                     Ok(cpu::VmExit::Reset)
470                 }
471                 hv_message_type_HVMSG_UNRECOVERABLE_EXCEPTION => {
472                     warn!("TRIPLE FAULT");
473                     Ok(cpu::VmExit::Shutdown)
474                 }
475                 hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT => {
476                     let info = x.to_ioport_info().unwrap();
477                     let access_info = info.access_info;
478                     // SAFETY: access_info is valid, otherwise we won't be here
479                     let len = unsafe { access_info.__bindgen_anon_1.access_size() } as usize;
480                     let is_write = info.header.intercept_access_type == 1;
481                     let port = info.port_number;
482                     let mut data: [u8; 4] = [0; 4];
483                     let mut ret_rax = info.rax;
484 
485                     /*
486                      * XXX: Ignore QEMU fw_cfg (0x5xx) and debug console (0x402) ports.
487                      *
488                      * Cloud Hypervisor doesn't support fw_cfg at the moment. It does support 0x402
489                      * under the "fwdebug" feature flag. But that feature is not enabled by default
490                      * and is considered legacy.
491                      *
492                      * OVMF unconditionally pokes these IO ports with string IO.
493                      *
494                      * Instead of trying to implement string IO support now which does not do much
495                      * now, skip those ports explicitly to avoid panicking.
496                      *
497                      * Proper string IO support can be added once we gain the ability to translate
498                      * guest virtual addresses to guest physical addresses on MSHV.
499                      */
500                     match port {
501                         0x402 | 0x510 | 0x511 | 0x514 => {
502                             let insn_len = info.header.instruction_length() as u64;
503 
504                             /* Advance RIP and update RAX */
505                             let arr_reg_name_value = [
506                                 (
507                                     hv_register_name_HV_X64_REGISTER_RIP,
508                                     info.header.rip + insn_len,
509                                 ),
510                                 (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
511                             ];
512                             set_registers_64!(self.fd, arr_reg_name_value)
513                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
514                             return Ok(cpu::VmExit::Ignore);
515                         }
516                         _ => {}
517                     }
518 
519                     assert!(
520                         // SAFETY: access_info is valid, otherwise we won't be here
521                         (unsafe { access_info.__bindgen_anon_1.string_op() } != 1),
522                         "String IN/OUT not supported"
523                     );
524                     assert!(
525                         // SAFETY: access_info is valid, otherwise we won't be here
526                         (unsafe { access_info.__bindgen_anon_1.rep_prefix() } != 1),
527                         "Rep IN/OUT not supported"
528                     );
529 
530                     if is_write {
531                         let data = (info.rax as u32).to_le_bytes();
532                         if let Some(vm_ops) = &self.vm_ops {
533                             vm_ops
534                                 .pio_write(port.into(), &data[0..len])
535                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
536                         }
537                     } else {
538                         if let Some(vm_ops) = &self.vm_ops {
539                             vm_ops
540                                 .pio_read(port.into(), &mut data[0..len])
541                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
542                         }
543 
544                         let v = u32::from_le_bytes(data);
545                         /* Preserve high bits in EAX but clear out high bits in RAX */
546                         let mask = 0xffffffff >> (32 - len * 8);
547                         let eax = (info.rax as u32 & !mask) | (v & mask);
548                         ret_rax = eax as u64;
549                     }
550 
551                     let insn_len = info.header.instruction_length() as u64;
552 
553                     /* Advance RIP and update RAX */
554                     let arr_reg_name_value = [
555                         (
556                             hv_register_name_HV_X64_REGISTER_RIP,
557                             info.header.rip + insn_len,
558                         ),
559                         (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
560                     ];
561                     set_registers_64!(self.fd, arr_reg_name_value)
562                         .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
563                     Ok(cpu::VmExit::Ignore)
564                 }
565                 hv_message_type_HVMSG_UNMAPPED_GPA => {
566                     let info = x.to_memory_info().unwrap();
567                     let insn_len = info.instruction_byte_count as usize;
568                     assert!(insn_len > 0 && insn_len <= 16);
569 
570                     let mut context = MshvEmulatorContext {
571                         vcpu: self,
572                         map: (info.guest_virtual_address, info.guest_physical_address),
573                     };
574 
575                     // Create a new emulator.
576                     let mut emul = Emulator::new(&mut context);
577 
578                     // Emulate the trapped instruction, and only the first one.
579                     let new_state = emul
580                         .emulate_first_insn(self.vp_index as usize, &info.instruction_bytes)
581                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
582 
583                     // Set CPU state back.
584                     context
585                         .set_cpu_state(self.vp_index as usize, new_state)
586                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
587 
588                     Ok(cpu::VmExit::Ignore)
589                 }
590                 hv_message_type_HVMSG_X64_CPUID_INTERCEPT => {
591                     let info = x.to_cpuid_info().unwrap();
592                     debug!("cpuid eax: {:x}", { info.rax });
593                     Ok(cpu::VmExit::Ignore)
594                 }
595                 hv_message_type_HVMSG_X64_MSR_INTERCEPT => {
596                     let info = x.to_msr_info().unwrap();
597                     if info.header.intercept_access_type == 0 {
598                         debug!("msr read: {:x}", { info.msr_number });
599                     } else {
600                         debug!("msr write: {:x}", { info.msr_number });
601                     }
602                     Ok(cpu::VmExit::Ignore)
603                 }
604                 hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT => {
605                     //TODO: Handler for VMCALL here.
606                     let info = x.to_exception_info().unwrap();
607                     debug!("Exception Info {:?}", { info.exception_vector });
608                     Ok(cpu::VmExit::Ignore)
609                 }
610                 hv_message_type_HVMSG_X64_APIC_EOI => {
611                     let info = x.to_apic_eoi_info().unwrap();
612                     // The kernel should dispatch the EOI to the correct thread.
613                     // Check the VP index is the same as the one we have.
614                     assert!(info.vp_index == self.vp_index as u32);
615                     // The interrupt vector in info is u32, but x86 only supports 256 vectors.
616                     // There is no good way to recover from this if the hypervisor messes around.
617                     // Just unwrap.
618                     Ok(cpu::VmExit::IoapicEoi(
619                         info.interrupt_vector.try_into().unwrap(),
620                     ))
621                 }
622                 #[cfg(feature = "sev_snp")]
623                 hv_message_type_HVMSG_X64_SEV_VMGEXIT_INTERCEPT => {
624                     let info = x.to_vmg_intercept_info().unwrap();
625                     let ghcb_data = info.ghcb_msr >> GHCB_INFO_BIT_WIDTH;
626                     let ghcb_msr = svm_ghcb_msr {
627                         as_uint64: info.ghcb_msr,
628                     };
629                     // SAFETY: Accessing a union element from bindgen generated bindings.
630                     let ghcb_op = unsafe { ghcb_msr.__bindgen_anon_2.ghcb_info() as u32 };
631                     // Sanity check on the header fields before handling other operations.
632                     assert!(info.header.intercept_access_type == HV_INTERCEPT_ACCESS_EXECUTE as u8);
633 
634                     match ghcb_op {
635                         GHCB_INFO_HYP_FEATURE_REQUEST => {
636                             // Pre-condition: GHCB data must be zero
637                             assert!(ghcb_data == 0);
638                             let mut ghcb_response = GHCB_INFO_HYP_FEATURE_RESPONSE as u64;
639                             // Indicate support for basic SEV-SNP features
640                             ghcb_response |=
641                                 (GHCB_HYP_FEATURE_SEV_SNP << GHCB_INFO_BIT_WIDTH) as u64;
642                             // Indicate support for SEV-SNP AP creation
643                             ghcb_response |= (GHCB_HYP_FEATURE_SEV_SNP_AP_CREATION
644                                 << GHCB_INFO_BIT_WIDTH)
645                                 as u64;
646                             debug!(
647                                 "GHCB_INFO_HYP_FEATURE_REQUEST: Supported features: {:0x}",
648                                 ghcb_response
649                             );
650                             let arr_reg_name_value =
651                                 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)];
652                             set_registers_64!(self.fd, arr_reg_name_value)
653                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
654                         }
655                         GHCB_INFO_REGISTER_REQUEST => {
656                             let mut ghcb_gpa = hv_x64_register_sev_ghcb::default();
657                             // SAFETY: Accessing a union element from bindgen generated bindings.
658                             unsafe {
659                                 ghcb_gpa.__bindgen_anon_1.set_enabled(1);
660                                 ghcb_gpa
661                                     .__bindgen_anon_1
662                                     .set_page_number(ghcb_msr.__bindgen_anon_2.gpa_page_number());
663                             }
664                             // SAFETY: Accessing a union element from bindgen generated bindings.
665                             let reg_name_value = unsafe {
666                                 [(
667                                     hv_register_name_HV_X64_REGISTER_SEV_GHCB_GPA,
668                                     ghcb_gpa.as_uint64,
669                                 )]
670                             };
671 
672                             set_registers_64!(self.fd, reg_name_value)
673                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
674 
675                             let mut resp_ghcb_msr = svm_ghcb_msr::default();
676                             // SAFETY: Accessing a union element from bindgen generated bindings.
677                             unsafe {
678                                 resp_ghcb_msr
679                                     .__bindgen_anon_2
680                                     .set_ghcb_info(GHCB_INFO_REGISTER_RESPONSE as u64);
681                                 resp_ghcb_msr.__bindgen_anon_2.set_gpa_page_number(
682                                     ghcb_msr.__bindgen_anon_2.gpa_page_number(),
683                                 );
684                             }
685                             // SAFETY: Accessing a union element from bindgen generated bindings.
686                             let reg_name_value = unsafe {
687                                 [(
688                                     hv_register_name_HV_X64_REGISTER_GHCB,
689                                     resp_ghcb_msr.as_uint64,
690                                 )]
691                             };
692 
693                             set_registers_64!(self.fd, reg_name_value)
694                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
695                         }
696                         GHCB_INFO_SEV_INFO_REQUEST => {
697                             let sev_cpuid_function = 0x8000_001F;
698                             let cpu_leaf = self
699                                 .fd
700                                 .get_cpuid_values(sev_cpuid_function, 0, 0, 0)
701                                 .unwrap();
702                             let ebx = cpu_leaf[1];
703                             // First 6-byte of EBX represents page table encryption bit number
704                             let pbit_encryption = (ebx & 0x3f) as u8;
705                             let mut ghcb_response = GHCB_INFO_SEV_INFO_RESPONSE as u64;
706 
707                             // GHCBData[63:48] specifies the maximum GHCB protocol version supported
708                             ghcb_response |= (GHCB_PROTOCOL_VERSION_MAX as u64) << 48;
709                             // GHCBData[47:32] specifies the minimum GHCB protocol version supported
710                             ghcb_response |= (GHCB_PROTOCOL_VERSION_MIN as u64) << 32;
711                             // GHCBData[31:24] specifies the SEV page table encryption bit number.
712                             ghcb_response |= (pbit_encryption as u64) << 24;
713 
714                             let arr_reg_name_value =
715                                 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)];
716                             set_registers_64!(self.fd, arr_reg_name_value)
717                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
718                         }
719                         GHCB_INFO_NORMAL => {
720                             let exit_code =
721                                 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_code as u32;
722                             // SAFETY: Accessing a union element from bindgen generated bindings.
723                             let pfn = unsafe { ghcb_msr.__bindgen_anon_2.gpa_page_number() };
724                             let ghcb_gpa = pfn << GHCB_INFO_BIT_WIDTH;
725                             match exit_code {
726                                 SVM_EXITCODE_HV_DOORBELL_PAGE => {
727                                     let exit_info1 =
728                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32;
729                                     match exit_info1 {
730                                         SVM_NAE_HV_DOORBELL_PAGE_GET_PREFERRED => {
731                                             // Hypervisor does not have any preference for doorbell GPA.
732                                             let preferred_doorbell_gpa: u64 = 0xFFFFFFFFFFFFFFFF;
733                                             let mut swei2_rw_gpa_arg =
734                                                 mshv_bindings::mshv_read_write_gpa {
735                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
736                                                     byte_count: std::mem::size_of::<u64>() as u32,
737                                                     ..Default::default()
738                                                 };
739                                             swei2_rw_gpa_arg.data.copy_from_slice(
740                                                 &preferred_doorbell_gpa.to_le_bytes(),
741                                             );
742                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
743                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
744                                             )?;
745                                         }
746                                         SVM_NAE_HV_DOORBELL_PAGE_SET => {
747                                             let exit_info2 = info
748                                                 .__bindgen_anon_2
749                                                 .__bindgen_anon_1
750                                                 .sw_exit_info2;
751                                             let mut ghcb_doorbell_gpa =
752                                                 hv_x64_register_sev_hv_doorbell::default();
753                                             // SAFETY: Accessing a union element from bindgen generated bindings.
754                                             unsafe {
755                                                 ghcb_doorbell_gpa.__bindgen_anon_1.set_enabled(1);
756                                                 ghcb_doorbell_gpa
757                                                     .__bindgen_anon_1
758                                                     .set_page_number(exit_info2 >> PAGE_SHIFT);
759                                             }
760                                             // SAFETY: Accessing a union element from bindgen generated bindings.
761                                             let reg_names = unsafe {
762                                                 [(
763                                                     hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA,
764                                                     ghcb_doorbell_gpa.as_uint64,
765                                                 )]
766                                             };
767                                             set_registers_64!(self.fd, reg_names).map_err(|e| {
768                                                 cpu::HypervisorCpuError::SetRegister(e.into())
769                                             })?;
770 
771                                             let mut swei2_rw_gpa_arg =
772                                                 mshv_bindings::mshv_read_write_gpa {
773                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
774                                                     byte_count: std::mem::size_of::<u64>() as u32,
775                                                     ..Default::default()
776                                                 };
777                                             swei2_rw_gpa_arg.data[0..8]
778                                                 .copy_from_slice(&exit_info2.to_le_bytes());
779                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
780                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
781                                             )?;
782 
783                                             // Clear the SW_EXIT_INFO1 register to indicate no error
784                                             let mut swei1_rw_gpa_arg =
785                                                 mshv_bindings::mshv_read_write_gpa {
786                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO1_OFFSET,
787                                                     byte_count: std::mem::size_of::<u64>() as u32,
788                                                     ..Default::default()
789                                                 };
790                                             self.fd.gpa_write(&mut swei1_rw_gpa_arg).map_err(
791                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
792                                             )?;
793                                         }
794                                         SVM_NAE_HV_DOORBELL_PAGE_QUERY => {
795                                             let mut reg_assocs = [ hv_register_assoc {
796                                                 name: hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA,
797                                                 ..Default::default()
798                                             } ];
799                                             self.fd.get_reg(&mut reg_assocs).unwrap();
800                                             // SAFETY: Accessing a union element from bindgen generated bindings.
801                                             let doorbell_gpa = unsafe { reg_assocs[0].value.reg64 };
802                                             let mut swei2_rw_gpa_arg =
803                                                 mshv_bindings::mshv_read_write_gpa {
804                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
805                                                     byte_count: std::mem::size_of::<u64>() as u32,
806                                                     ..Default::default()
807                                                 };
808                                             swei2_rw_gpa_arg
809                                                 .data
810                                                 .copy_from_slice(&doorbell_gpa.to_le_bytes());
811                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
812                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
813                                             )?;
814                                         }
815                                         SVM_NAE_HV_DOORBELL_PAGE_CLEAR => {
816                                             let mut swei2_rw_gpa_arg =
817                                                 mshv_bindings::mshv_read_write_gpa {
818                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
819                                                     byte_count: std::mem::size_of::<u64>() as u32,
820                                                     ..Default::default()
821                                                 };
822                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
823                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
824                                             )?;
825                                         }
826                                         _ => {
827                                             panic!(
828                                                 "SVM_EXITCODE_HV_DOORBELL_PAGE: Unhandled exit code: {:0x}",
829                                                 exit_info1
830                                             );
831                                         }
832                                     }
833                                 }
834                                 SVM_EXITCODE_SNP_EXTENDED_GUEST_REQUEST => {
835                                     warn!("Fetching extended guest request is not supported");
836                                     // Extended guest request is not supported by the Hypervisor
837                                     // Returning the error to the guest
838                                     // 0x6 means `The NAE event was not valid`
839                                     // Reference: GHCB Spec, page 42
840                                     let value: u64 = 0x6;
841                                     let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
842                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
843                                         byte_count: std::mem::size_of::<u64>() as u32,
844                                         ..Default::default()
845                                     };
846                                     swei2_rw_gpa_arg.data.copy_from_slice(&value.to_le_bytes());
847                                     self.fd
848                                         .gpa_write(&mut swei2_rw_gpa_arg)
849                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
850                                 }
851                                 SVM_EXITCODE_IOIO_PROT => {
852                                     let exit_info1 =
853                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32;
854                                     let port_info = hv_sev_vmgexit_port_info {
855                                         as_uint32: exit_info1,
856                                     };
857 
858                                     let port =
859                                         // SAFETY: Accessing a union element from bindgen generated bindings.
860                                         unsafe { port_info.__bindgen_anon_1.intercepted_port() };
861                                     let mut len = 4;
862                                     // SAFETY: Accessing a union element from bindgen generated bindings.
863                                     unsafe {
864                                         if port_info.__bindgen_anon_1.operand_size_16bit() == 1 {
865                                             len = 2;
866                                         } else if port_info.__bindgen_anon_1.operand_size_8bit()
867                                             == 1
868                                         {
869                                             len = 1;
870                                         }
871                                     }
872                                     let is_write =
873                                         // SAFETY: Accessing a union element from bindgen generated bindings.
874                                         unsafe { port_info.__bindgen_anon_1.access_type() == 0 };
875                                     let mut rax_rw_gpa_arg: mshv_read_write_gpa =
876                                         mshv_bindings::mshv_read_write_gpa {
877                                             base_gpa: ghcb_gpa + GHCB_RAX_OFFSET,
878                                             byte_count: std::mem::size_of::<u64>() as u32,
879                                             ..Default::default()
880                                         };
881                                     self.fd
882                                         .gpa_read(&mut rax_rw_gpa_arg)
883                                         .map_err(|e| cpu::HypervisorCpuError::GpaRead(e.into()))?;
884 
885                                     if is_write {
886                                         if let Some(vm_ops) = &self.vm_ops {
887                                             vm_ops
888                                                 .pio_write(
889                                                     port.into(),
890                                                     &rax_rw_gpa_arg.data[0..len],
891                                                 )
892                                                 .map_err(|e| {
893                                                     cpu::HypervisorCpuError::RunVcpu(e.into())
894                                                 })?;
895                                         }
896                                     } else {
897                                         if let Some(vm_ops) = &self.vm_ops {
898                                             vm_ops
899                                                 .pio_read(
900                                                     port.into(),
901                                                     &mut rax_rw_gpa_arg.data[0..len],
902                                                 )
903                                                 .map_err(|e| {
904                                                     cpu::HypervisorCpuError::RunVcpu(e.into())
905                                                 })?;
906                                         }
907 
908                                         self.fd.gpa_write(&mut rax_rw_gpa_arg).map_err(|e| {
909                                             cpu::HypervisorCpuError::GpaWrite(e.into())
910                                         })?;
911                                     }
912 
913                                     // Clear the SW_EXIT_INFO1 register to indicate no error
914                                     let mut swei1_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
915                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO1_OFFSET,
916                                         byte_count: std::mem::size_of::<u64>() as u32,
917                                         ..Default::default()
918                                     };
919                                     self.fd
920                                         .gpa_write(&mut swei1_rw_gpa_arg)
921                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
922                                 }
923                                 SVM_EXITCODE_MMIO_READ => {
924                                     let src_gpa =
925                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
926                                     let dst_gpa = info.__bindgen_anon_2.__bindgen_anon_1.sw_scratch;
927                                     let data_len =
928                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2
929                                             as usize;
930                                     // Sanity check to make sure data len is within supported range.
931                                     assert!(data_len <= 0x8);
932 
933                                     let mut data: Vec<u8> = vec![0; data_len];
934                                     if let Some(vm_ops) = &self.vm_ops {
935                                         vm_ops.mmio_read(src_gpa, &mut data[0..data_len]).map_err(
936                                             |e| cpu::HypervisorCpuError::RunVcpu(e.into()),
937                                         )?;
938                                     }
939                                     let mut arg: mshv_read_write_gpa =
940                                         mshv_bindings::mshv_read_write_gpa {
941                                             base_gpa: dst_gpa,
942                                             byte_count: data_len as u32,
943                                             ..Default::default()
944                                         };
945                                     arg.data[0..data_len].copy_from_slice(&data);
946 
947                                     self.fd
948                                         .gpa_write(&mut arg)
949                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
950                                 }
951                                 SVM_EXITCODE_MMIO_WRITE => {
952                                     let dst_gpa =
953                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
954                                     let src_gpa = info.__bindgen_anon_2.__bindgen_anon_1.sw_scratch;
955                                     let data_len =
956                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2
957                                             as usize;
958                                     // Sanity check to make sure data len is within supported range.
959                                     assert!(data_len <= 0x8);
960                                     let mut arg: mshv_read_write_gpa =
961                                         mshv_bindings::mshv_read_write_gpa {
962                                             base_gpa: src_gpa,
963                                             byte_count: data_len as u32,
964                                             ..Default::default()
965                                         };
966 
967                                     self.fd
968                                         .gpa_read(&mut arg)
969                                         .map_err(|e| cpu::HypervisorCpuError::GpaRead(e.into()))?;
970 
971                                     if let Some(vm_ops) = &self.vm_ops {
972                                         vm_ops
973                                             .mmio_write(dst_gpa, &arg.data[0..data_len])
974                                             .map_err(|e| {
975                                                 cpu::HypervisorCpuError::RunVcpu(e.into())
976                                             })?;
977                                     }
978                                 }
979                                 _ => panic!(
980                                     "GHCB_INFO_NORMAL: Unhandled exit code: {:0x}",
981                                     exit_code
982                                 ),
983                             }
984                         }
985                         _ => panic!("Unsupported VMGEXIT operation: {:0x}", ghcb_op),
986                     }
987 
988                     Ok(cpu::VmExit::Ignore)
989                 }
990                 exit => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
991                     "Unhandled VCPU exit {:?}",
992                     exit
993                 ))),
994             },
995 
996             Err(e) => match e.errno() {
997                 libc::EAGAIN | libc::EINTR => Ok(cpu::VmExit::Ignore),
998                 _ => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
999                     "VCPU error {:?}",
1000                     e
1001                 ))),
1002             },
1003         }
1004     }
1005     #[cfg(target_arch = "x86_64")]
1006     ///
1007     /// X86 specific call to setup the CPUID registers.
1008     ///
1009     fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> cpu::Result<()> {
1010         let cpuid: Vec<mshv_bindings::hv_cpuid_entry> = cpuid.iter().map(|e| (*e).into()).collect();
1011         let mshv_cpuid = <CpuId>::from_entries(&cpuid)
1012             .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?;
1013 
1014         self.fd
1015             .register_intercept_result_cpuid(&mshv_cpuid)
1016             .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
1017     }
1018     #[cfg(target_arch = "x86_64")]
1019     ///
1020     /// X86 specific call to retrieve the CPUID registers.
1021     ///
1022     fn get_cpuid2(&self, _num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
1023         Ok(self.cpuid.clone())
1024     }
1025     #[cfg(target_arch = "x86_64")]
1026     ///
1027     /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
1028     ///
1029     fn get_lapic(&self) -> cpu::Result<crate::arch::x86::LapicState> {
1030         Ok(self
1031             .fd
1032             .get_lapic()
1033             .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))?
1034             .into())
1035     }
1036     #[cfg(target_arch = "x86_64")]
1037     ///
1038     /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
1039     ///
1040     fn set_lapic(&self, lapic: &crate::arch::x86::LapicState) -> cpu::Result<()> {
1041         let lapic: mshv_bindings::LapicState = (*lapic).clone().into();
1042         self.fd
1043             .set_lapic(&lapic)
1044             .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into()))
1045     }
1046     ///
1047     /// Returns the vcpu's current "multiprocessing state".
1048     ///
1049     fn get_mp_state(&self) -> cpu::Result<MpState> {
1050         Ok(MpState::Mshv)
1051     }
1052     ///
1053     /// Sets the vcpu's current "multiprocessing state".
1054     ///
1055     fn set_mp_state(&self, _mp_state: MpState) -> cpu::Result<()> {
1056         Ok(())
1057     }
1058     ///
1059     /// Set CPU state
1060     ///
1061     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
1062         let state: VcpuMshvState = state.clone().into();
1063         self.set_msrs(&state.msrs)?;
1064         self.set_vcpu_events(&state.vcpu_events)?;
1065         self.set_regs(&state.regs.into())?;
1066         self.set_sregs(&state.sregs.into())?;
1067         self.set_fpu(&state.fpu)?;
1068         self.set_xcrs(&state.xcrs)?;
1069         self.set_lapic(&state.lapic)?;
1070         self.set_xsave(&state.xsave)?;
1071         // These registers are global and needed to be set only for first VCPU
1072         // as Microsoft Hypervisor allows setting this regsier for only one VCPU
1073         if self.vp_index == 0 {
1074             self.fd
1075                 .set_misc_regs(&state.misc)
1076                 .map_err(|e| cpu::HypervisorCpuError::SetMiscRegs(e.into()))?
1077         }
1078         self.fd
1079             .set_debug_regs(&state.dbg)
1080             .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into()))?;
1081         Ok(())
1082     }
1083     ///
1084     /// Get CPU State
1085     ///
1086     fn state(&self) -> cpu::Result<CpuState> {
1087         let regs = self.get_regs()?;
1088         let sregs = self.get_sregs()?;
1089         let xcrs = self.get_xcrs()?;
1090         let fpu = self.get_fpu()?;
1091         let vcpu_events = self.get_vcpu_events()?;
1092         let mut msrs = self.msrs.clone();
1093         self.get_msrs(&mut msrs)?;
1094         let lapic = self.get_lapic()?;
1095         let xsave = self.get_xsave()?;
1096         let misc = self
1097             .fd
1098             .get_misc_regs()
1099             .map_err(|e| cpu::HypervisorCpuError::GetMiscRegs(e.into()))?;
1100         let dbg = self
1101             .fd
1102             .get_debug_regs()
1103             .map_err(|e| cpu::HypervisorCpuError::GetDebugRegs(e.into()))?;
1104 
1105         Ok(VcpuMshvState {
1106             msrs,
1107             vcpu_events,
1108             regs: regs.into(),
1109             sregs: sregs.into(),
1110             fpu,
1111             xcrs,
1112             lapic,
1113             dbg,
1114             xsave,
1115             misc,
1116         }
1117         .into())
1118     }
1119     #[cfg(target_arch = "x86_64")]
1120     ///
1121     /// Translate guest virtual address to guest physical address
1122     ///
1123     fn translate_gva(&self, gva: u64, flags: u64) -> cpu::Result<(u64, u32)> {
1124         let r = self
1125             .fd
1126             .translate_gva(gva, flags)
1127             .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?;
1128 
1129         let gpa = r.0;
1130         // SAFETY: r is valid, otherwise this function will have returned
1131         let result_code = unsafe { r.1.__bindgen_anon_1.result_code };
1132 
1133         Ok((gpa, result_code))
1134     }
1135     #[cfg(target_arch = "x86_64")]
1136     ///
1137     /// Return the list of initial MSR entries for a VCPU
1138     ///
1139     fn boot_msr_entries(&self) -> Vec<MsrEntry> {
1140         use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB};
1141 
1142         [
1143             msr!(msr_index::MSR_IA32_SYSENTER_CS),
1144             msr!(msr_index::MSR_IA32_SYSENTER_ESP),
1145             msr!(msr_index::MSR_IA32_SYSENTER_EIP),
1146             msr!(msr_index::MSR_STAR),
1147             msr!(msr_index::MSR_CSTAR),
1148             msr!(msr_index::MSR_LSTAR),
1149             msr!(msr_index::MSR_KERNEL_GS_BASE),
1150             msr!(msr_index::MSR_SYSCALL_MASK),
1151             msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB),
1152         ]
1153         .to_vec()
1154     }
1155 }
1156 
1157 impl MshvVcpu {
1158     #[cfg(target_arch = "x86_64")]
1159     ///
1160     /// X86 specific call that returns the vcpu's current "xsave struct".
1161     ///
1162     fn get_xsave(&self) -> cpu::Result<Xsave> {
1163         self.fd
1164             .get_xsave()
1165             .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))
1166     }
1167     #[cfg(target_arch = "x86_64")]
1168     ///
1169     /// X86 specific call that sets the vcpu's current "xsave struct".
1170     ///
1171     fn set_xsave(&self, xsave: &Xsave) -> cpu::Result<()> {
1172         self.fd
1173             .set_xsave(xsave)
1174             .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into()))
1175     }
1176     #[cfg(target_arch = "x86_64")]
1177     ///
1178     /// X86 specific call that returns the vcpu's current "xcrs".
1179     ///
1180     fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> {
1181         self.fd
1182             .get_xcrs()
1183             .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into()))
1184     }
1185     #[cfg(target_arch = "x86_64")]
1186     ///
1187     /// X86 specific call that sets the vcpu's current "xcrs".
1188     ///
1189     fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> {
1190         self.fd
1191             .set_xcrs(xcrs)
1192             .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into()))
1193     }
1194     #[cfg(target_arch = "x86_64")]
1195     ///
1196     /// Returns currently pending exceptions, interrupts, and NMIs as well as related
1197     /// states of the vcpu.
1198     ///
1199     fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> {
1200         self.fd
1201             .get_vcpu_events()
1202             .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into()))
1203     }
1204     #[cfg(target_arch = "x86_64")]
1205     ///
1206     /// Sets pending exceptions, interrupts, and NMIs as well as related states
1207     /// of the vcpu.
1208     ///
1209     fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> {
1210         self.fd
1211             .set_vcpu_events(events)
1212             .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into()))
1213     }
1214 }
1215 
1216 struct MshvEmulatorContext<'a> {
1217     vcpu: &'a MshvVcpu,
1218     map: (u64, u64), // Initial GVA to GPA mapping provided by the hypervisor
1219 }
1220 
1221 impl<'a> MshvEmulatorContext<'a> {
1222     // Do the actual gva -> gpa translation
1223     #[allow(non_upper_case_globals)]
1224     fn translate(&self, gva: u64) -> Result<u64, PlatformError> {
1225         if self.map.0 == gva {
1226             return Ok(self.map.1);
1227         }
1228 
1229         // TODO: More fine-grained control for the flags
1230         let flags = HV_TRANSLATE_GVA_VALIDATE_READ | HV_TRANSLATE_GVA_VALIDATE_WRITE;
1231 
1232         let (gpa, result_code) = self
1233             .vcpu
1234             .translate_gva(gva, flags.into())
1235             .map_err(|e| PlatformError::TranslateVirtualAddress(anyhow!(e)))?;
1236 
1237         match result_code {
1238             hv_translate_gva_result_code_HV_TRANSLATE_GVA_SUCCESS => Ok(gpa),
1239             _ => Err(PlatformError::TranslateVirtualAddress(anyhow!(result_code))),
1240         }
1241     }
1242 }
1243 
1244 /// Platform emulation for Hyper-V
1245 impl<'a> PlatformEmulator for MshvEmulatorContext<'a> {
1246     type CpuState = EmulatorCpuState;
1247 
1248     fn read_memory(&self, gva: u64, data: &mut [u8]) -> Result<(), PlatformError> {
1249         let gpa = self.translate(gva)?;
1250         debug!(
1251             "mshv emulator: memory read {} bytes from [{:#x} -> {:#x}]",
1252             data.len(),
1253             gva,
1254             gpa
1255         );
1256 
1257         if let Some(vm_ops) = &self.vcpu.vm_ops {
1258             if vm_ops.guest_mem_read(gpa, data).is_err() {
1259                 vm_ops
1260                     .mmio_read(gpa, data)
1261                     .map_err(|e| PlatformError::MemoryReadFailure(e.into()))?;
1262             }
1263         }
1264 
1265         Ok(())
1266     }
1267 
1268     fn write_memory(&mut self, gva: u64, data: &[u8]) -> Result<(), PlatformError> {
1269         let gpa = self.translate(gva)?;
1270         debug!(
1271             "mshv emulator: memory write {} bytes at [{:#x} -> {:#x}]",
1272             data.len(),
1273             gva,
1274             gpa
1275         );
1276 
1277         if let Some(vm_ops) = &self.vcpu.vm_ops {
1278             if vm_ops.guest_mem_write(gpa, data).is_err() {
1279                 vm_ops
1280                     .mmio_write(gpa, data)
1281                     .map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?;
1282             }
1283         }
1284 
1285         Ok(())
1286     }
1287 
1288     fn cpu_state(&self, cpu_id: usize) -> Result<Self::CpuState, PlatformError> {
1289         if cpu_id != self.vcpu.vp_index as usize {
1290             return Err(PlatformError::GetCpuStateFailure(anyhow!(
1291                 "CPU id mismatch {:?} {:?}",
1292                 cpu_id,
1293                 self.vcpu.vp_index
1294             )));
1295         }
1296 
1297         let regs = self
1298             .vcpu
1299             .get_regs()
1300             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
1301         let sregs = self
1302             .vcpu
1303             .get_sregs()
1304             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
1305 
1306         debug!("mshv emulator: Getting new CPU state");
1307         debug!("mshv emulator: {:#x?}", regs);
1308 
1309         Ok(EmulatorCpuState { regs, sregs })
1310     }
1311 
1312     fn set_cpu_state(&self, cpu_id: usize, state: Self::CpuState) -> Result<(), PlatformError> {
1313         if cpu_id != self.vcpu.vp_index as usize {
1314             return Err(PlatformError::SetCpuStateFailure(anyhow!(
1315                 "CPU id mismatch {:?} {:?}",
1316                 cpu_id,
1317                 self.vcpu.vp_index
1318             )));
1319         }
1320 
1321         debug!("mshv emulator: Setting new CPU state");
1322         debug!("mshv emulator: {:#x?}", state.regs);
1323 
1324         self.vcpu
1325             .set_regs(&state.regs)
1326             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))?;
1327         self.vcpu
1328             .set_sregs(&state.sregs)
1329             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))
1330     }
1331 
1332     fn gva_to_gpa(&self, gva: u64) -> Result<u64, PlatformError> {
1333         self.translate(gva)
1334     }
1335 
1336     fn fetch(&self, _ip: u64, _instruction_bytes: &mut [u8]) -> Result<(), PlatformError> {
1337         Err(PlatformError::MemoryReadFailure(anyhow!("unimplemented")))
1338     }
1339 }
1340 
1341 /// Wrapper over Mshv VM ioctls.
1342 pub struct MshvVm {
1343     fd: Arc<VmFd>,
1344     msrs: Vec<MsrEntry>,
1345     dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>,
1346 }
1347 
1348 impl MshvVm {
1349     ///
1350     /// Creates an in-kernel device.
1351     ///
1352     /// See the documentation for `MSHV_CREATE_DEVICE`.
1353     fn create_device(&self, device: &mut CreateDevice) -> vm::Result<VfioDeviceFd> {
1354         let device_fd = self
1355             .fd
1356             .create_device(device)
1357             .map_err(|e| vm::HypervisorVmError::CreateDevice(e.into()))?;
1358         Ok(VfioDeviceFd::new_from_mshv(device_fd))
1359     }
1360 }
1361 
1362 ///
1363 /// Implementation of Vm trait for Mshv
1364 ///
1365 /// # Examples
1366 ///
1367 /// ```
1368 /// # extern crate hypervisor;
1369 /// # use hypervisor::mshv::MshvHypervisor;
1370 /// # use std::sync::Arc;
1371 /// let mshv = MshvHypervisor::new().unwrap();
1372 /// let hypervisor = Arc::new(mshv);
1373 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
1374 /// ```
1375 impl vm::Vm for MshvVm {
1376     #[cfg(target_arch = "x86_64")]
1377     ///
1378     /// Sets the address of the one-page region in the VM's address space.
1379     ///
1380     fn set_identity_map_address(&self, _address: u64) -> vm::Result<()> {
1381         Ok(())
1382     }
1383     #[cfg(target_arch = "x86_64")]
1384     ///
1385     /// Sets the address of the three-page region in the VM's address space.
1386     ///
1387     fn set_tss_address(&self, _offset: usize) -> vm::Result<()> {
1388         Ok(())
1389     }
1390     ///
1391     /// Creates an in-kernel interrupt controller.
1392     ///
1393     fn create_irq_chip(&self) -> vm::Result<()> {
1394         Ok(())
1395     }
1396     ///
1397     /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
1398     ///
1399     fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1400         debug!("register_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1401 
1402         self.fd
1403             .register_irqfd(fd, gsi)
1404             .map_err(|e| vm::HypervisorVmError::RegisterIrqFd(e.into()))?;
1405 
1406         Ok(())
1407     }
1408     ///
1409     /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
1410     ///
1411     fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1412         debug!("unregister_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1413 
1414         self.fd
1415             .unregister_irqfd(fd, gsi)
1416             .map_err(|e| vm::HypervisorVmError::UnregisterIrqFd(e.into()))?;
1417 
1418         Ok(())
1419     }
1420     ///
1421     /// Creates a VcpuFd object from a vcpu RawFd.
1422     ///
1423     fn create_vcpu(
1424         &self,
1425         id: u8,
1426         vm_ops: Option<Arc<dyn VmOps>>,
1427     ) -> vm::Result<Arc<dyn cpu::Vcpu>> {
1428         let vcpu_fd = self
1429             .fd
1430             .create_vcpu(id)
1431             .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?;
1432         let vcpu = MshvVcpu {
1433             fd: vcpu_fd,
1434             vp_index: id,
1435             cpuid: Vec::new(),
1436             msrs: self.msrs.clone(),
1437             vm_ops,
1438         };
1439         Ok(Arc::new(vcpu))
1440     }
1441     #[cfg(target_arch = "x86_64")]
1442     fn enable_split_irq(&self) -> vm::Result<()> {
1443         Ok(())
1444     }
1445     #[cfg(target_arch = "x86_64")]
1446     fn enable_sgx_attribute(&self, _file: File) -> vm::Result<()> {
1447         Ok(())
1448     }
1449     fn register_ioevent(
1450         &self,
1451         fd: &EventFd,
1452         addr: &IoEventAddress,
1453         datamatch: Option<DataMatch>,
1454     ) -> vm::Result<()> {
1455         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1456         debug!(
1457             "register_ioevent fd {} addr {:x?} datamatch {:?}",
1458             fd.as_raw_fd(),
1459             addr,
1460             datamatch
1461         );
1462         if let Some(dm) = datamatch {
1463             match dm {
1464                 vm::DataMatch::DataMatch32(mshv_dm32) => self
1465                     .fd
1466                     .register_ioevent(fd, addr, mshv_dm32)
1467                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1468                 vm::DataMatch::DataMatch64(mshv_dm64) => self
1469                     .fd
1470                     .register_ioevent(fd, addr, mshv_dm64)
1471                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1472             }
1473         } else {
1474             self.fd
1475                 .register_ioevent(fd, addr, NoDatamatch)
1476                 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into()))
1477         }
1478     }
1479     /// Unregister an event from a certain address it has been previously registered to.
1480     fn unregister_ioevent(&self, fd: &EventFd, addr: &IoEventAddress) -> vm::Result<()> {
1481         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1482         debug!("unregister_ioevent fd {} addr {:x?}", fd.as_raw_fd(), addr);
1483 
1484         self.fd
1485             .unregister_ioevent(fd, addr, NoDatamatch)
1486             .map_err(|e| vm::HypervisorVmError::UnregisterIoEvent(e.into()))
1487     }
1488 
1489     /// Creates a guest physical memory region.
1490     fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1491         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1492         // No matter read only or not we keep track the slots.
1493         // For readonly hypervisor can enable the dirty bits,
1494         // but a VM exit happens before setting the dirty bits
1495         self.dirty_log_slots.write().unwrap().insert(
1496             user_memory_region.guest_pfn,
1497             MshvDirtyLogSlot {
1498                 guest_pfn: user_memory_region.guest_pfn,
1499                 memory_size: user_memory_region.size,
1500             },
1501         );
1502 
1503         self.fd
1504             .map_user_memory(user_memory_region)
1505             .map_err(|e| vm::HypervisorVmError::CreateUserMemory(e.into()))?;
1506         Ok(())
1507     }
1508 
1509     /// Removes a guest physical memory region.
1510     fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1511         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1512         // Remove the corresponding entry from "self.dirty_log_slots" if needed
1513         self.dirty_log_slots
1514             .write()
1515             .unwrap()
1516             .remove(&user_memory_region.guest_pfn);
1517 
1518         self.fd
1519             .unmap_user_memory(user_memory_region)
1520             .map_err(|e| vm::HypervisorVmError::RemoveUserMemory(e.into()))?;
1521         Ok(())
1522     }
1523 
1524     fn make_user_memory_region(
1525         &self,
1526         _slot: u32,
1527         guest_phys_addr: u64,
1528         memory_size: u64,
1529         userspace_addr: u64,
1530         readonly: bool,
1531         _log_dirty_pages: bool,
1532     ) -> UserMemoryRegion {
1533         let mut flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE;
1534         if !readonly {
1535             flags |= HV_MAP_GPA_WRITABLE;
1536         }
1537 
1538         mshv_user_mem_region {
1539             flags,
1540             guest_pfn: guest_phys_addr >> PAGE_SHIFT,
1541             size: memory_size,
1542             userspace_addr,
1543         }
1544         .into()
1545     }
1546 
1547     fn create_passthrough_device(&self) -> vm::Result<VfioDeviceFd> {
1548         let mut vfio_dev = mshv_create_device {
1549             type_: mshv_device_type_MSHV_DEV_TYPE_VFIO,
1550             fd: 0,
1551             flags: 0,
1552         };
1553 
1554         self.create_device(&mut vfio_dev)
1555             .map_err(|e| vm::HypervisorVmError::CreatePassthroughDevice(e.into()))
1556     }
1557 
1558     ///
1559     /// Constructs a routing entry
1560     ///
1561     fn make_routing_entry(&self, gsi: u32, config: &InterruptSourceConfig) -> IrqRoutingEntry {
1562         match config {
1563             InterruptSourceConfig::MsiIrq(cfg) => mshv_msi_routing_entry {
1564                 gsi,
1565                 address_lo: cfg.low_addr,
1566                 address_hi: cfg.high_addr,
1567                 data: cfg.data,
1568             }
1569             .into(),
1570             _ => {
1571                 unreachable!()
1572             }
1573         }
1574     }
1575 
1576     fn set_gsi_routing(&self, entries: &[IrqRoutingEntry]) -> vm::Result<()> {
1577         let mut msi_routing =
1578             vec_with_array_field::<mshv_msi_routing, mshv_msi_routing_entry>(entries.len());
1579         msi_routing[0].nr = entries.len() as u32;
1580 
1581         let entries: Vec<mshv_msi_routing_entry> = entries
1582             .iter()
1583             .map(|entry| match entry {
1584                 IrqRoutingEntry::Mshv(e) => *e,
1585                 #[allow(unreachable_patterns)]
1586                 _ => panic!("IrqRoutingEntry type is wrong"),
1587             })
1588             .collect();
1589 
1590         // SAFETY: msi_routing initialized with entries.len() and now it is being turned into
1591         // entries_slice with entries.len() again. It is guaranteed to be large enough to hold
1592         // everything from entries.
1593         unsafe {
1594             let entries_slice: &mut [mshv_msi_routing_entry] =
1595                 msi_routing[0].entries.as_mut_slice(entries.len());
1596             entries_slice.copy_from_slice(&entries);
1597         }
1598 
1599         self.fd
1600             .set_msi_routing(&msi_routing[0])
1601             .map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into()))
1602     }
1603     ///
1604     /// Start logging dirty pages
1605     ///
1606     fn start_dirty_log(&self) -> vm::Result<()> {
1607         self.fd
1608             .enable_dirty_page_tracking()
1609             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))
1610     }
1611     ///
1612     /// Stop logging dirty pages
1613     ///
1614     fn stop_dirty_log(&self) -> vm::Result<()> {
1615         let dirty_log_slots = self.dirty_log_slots.read().unwrap();
1616         // Before disabling the dirty page tracking we need
1617         // to set the dirty bits in the Hypervisor
1618         // This is a requirement from Microsoft Hypervisor
1619         for (_, s) in dirty_log_slots.iter() {
1620             self.fd
1621                 .get_dirty_log(s.guest_pfn, s.memory_size as usize, DIRTY_BITMAP_SET_DIRTY)
1622                 .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1623         }
1624         self.fd
1625             .disable_dirty_page_tracking()
1626             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1627         Ok(())
1628     }
1629     ///
1630     /// Get dirty pages bitmap (one bit per page)
1631     ///
1632     fn get_dirty_log(&self, _slot: u32, base_gpa: u64, memory_size: u64) -> vm::Result<Vec<u64>> {
1633         self.fd
1634             .get_dirty_log(
1635                 base_gpa >> PAGE_SHIFT,
1636                 memory_size as usize,
1637                 DIRTY_BITMAP_CLEAR_DIRTY,
1638             )
1639             .map_err(|e| vm::HypervisorVmError::GetDirtyLog(e.into()))
1640     }
1641     /// Retrieve guest clock.
1642     #[cfg(target_arch = "x86_64")]
1643     fn get_clock(&self) -> vm::Result<ClockData> {
1644         Ok(ClockData::Mshv)
1645     }
1646     /// Set guest clock.
1647     #[cfg(target_arch = "x86_64")]
1648     fn set_clock(&self, _data: &ClockData) -> vm::Result<()> {
1649         Ok(())
1650     }
1651     /// Downcast to the underlying MshvVm type
1652     fn as_any(&self) -> &dyn Any {
1653         self
1654     }
1655     /// Initialize the SEV-SNP VM
1656     #[cfg(feature = "sev_snp")]
1657     fn sev_snp_init(&self) -> vm::Result<()> {
1658         self.fd
1659             .set_partition_property(
1660                 hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_STATE,
1661                 hv_partition_isolation_state_HV_PARTITION_ISOLATION_SECURE as u64,
1662             )
1663             .map_err(|e| vm::HypervisorVmError::InitializeSevSnp(e.into()))
1664     }
1665 
1666     #[cfg(feature = "sev_snp")]
1667     fn import_isolated_pages(
1668         &self,
1669         page_type: u32,
1670         page_size: u32,
1671         pages: &[u64],
1672     ) -> vm::Result<()> {
1673         if pages.is_empty() {
1674             return Ok(());
1675         }
1676 
1677         let mut isolated_pages =
1678             vec_with_array_field::<mshv_import_isolated_pages, u64>(pages.len());
1679         isolated_pages[0].num_pages = pages.len() as u64;
1680         isolated_pages[0].page_type = page_type;
1681         isolated_pages[0].page_size = page_size;
1682         // SAFETY: isolated_pages initialized with pages.len() and now it is being turned into
1683         // pages_slice with pages.len() again. It is guaranteed to be large enough to hold
1684         // everything from pages.
1685         unsafe {
1686             let pages_slice: &mut [u64] = isolated_pages[0].page_number.as_mut_slice(pages.len());
1687             pages_slice.copy_from_slice(pages);
1688         }
1689         self.fd
1690             .import_isolated_pages(&isolated_pages[0])
1691             .map_err(|e| vm::HypervisorVmError::ImportIsolatedPages(e.into()))
1692     }
1693     #[cfg(feature = "sev_snp")]
1694     fn complete_isolated_import(
1695         &self,
1696         snp_id_block: IGVM_VHS_SNP_ID_BLOCK,
1697         host_data: &[u8],
1698         id_block_enabled: u8,
1699     ) -> vm::Result<()> {
1700         let mut auth_info = hv_snp_id_auth_info {
1701             id_key_algorithm: snp_id_block.id_key_algorithm,
1702             auth_key_algorithm: snp_id_block.author_key_algorithm,
1703             ..Default::default()
1704         };
1705         // Each of r/s component is 576 bits long
1706         auth_info.id_block_signature[..SIG_R_COMPONENT_SIZE_IN_BYTES]
1707             .copy_from_slice(snp_id_block.id_key_signature.r_comp.as_ref());
1708         auth_info.id_block_signature
1709             [SIG_R_COMPONENT_SIZE_IN_BYTES..SIG_R_AND_S_COMPONENT_SIZE_IN_BYTES]
1710             .copy_from_slice(snp_id_block.id_key_signature.s_comp.as_ref());
1711         auth_info.id_key[..ECDSA_CURVE_ID_SIZE_IN_BYTES]
1712             .copy_from_slice(snp_id_block.id_public_key.curve.to_le_bytes().as_ref());
1713         auth_info.id_key[ECDSA_SIG_X_COMPONENT_START..ECDSA_SIG_X_COMPONENT_END]
1714             .copy_from_slice(snp_id_block.id_public_key.qx.as_ref());
1715         auth_info.id_key[ECDSA_SIG_Y_COMPONENT_START..ECDSA_SIG_Y_COMPONENT_END]
1716             .copy_from_slice(snp_id_block.id_public_key.qy.as_ref());
1717 
1718         let data = mshv_complete_isolated_import {
1719             import_data: hv_partition_complete_isolated_import_data {
1720                 psp_parameters: hv_psp_launch_finish_data {
1721                     id_block: hv_snp_id_block {
1722                         launch_digest: snp_id_block.ld,
1723                         family_id: snp_id_block.family_id,
1724                         image_id: snp_id_block.image_id,
1725                         version: snp_id_block.version,
1726                         guest_svn: snp_id_block.guest_svn,
1727                         policy: get_default_snp_guest_policy(),
1728                     },
1729                     id_auth_info: auth_info,
1730                     host_data: host_data[0..32].try_into().unwrap(),
1731                     id_block_enabled,
1732                     author_key_enabled: 0,
1733                 },
1734             },
1735         };
1736         self.fd
1737             .complete_isolated_import(&data)
1738             .map_err(|e| vm::HypervisorVmError::CompleteIsolatedImport(e.into()))
1739     }
1740 }
1741