xref: /cloud-hypervisor/hypervisor/src/mshv/mod.rs (revision fa2b5ca12b35b23c9def34e0d03c2050f247451f)
1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
2 //
3 // Copyright © 2020, Microsoft Corporation
4 //
5 
6 use std::any::Any;
7 use std::collections::HashMap;
8 #[cfg(feature = "sev_snp")]
9 use std::num::NonZeroUsize;
10 use std::sync::{Arc, RwLock};
11 
12 #[cfg(feature = "sev_snp")]
13 use arc_swap::ArcSwap;
14 use mshv_bindings::*;
15 #[cfg(target_arch = "x86_64")]
16 use mshv_ioctls::InterruptRequest;
17 use mshv_ioctls::{set_registers_64, Mshv, NoDatamatch, VcpuFd, VmFd, VmType};
18 use vfio_ioctls::VfioDeviceFd;
19 use vm::DataMatch;
20 #[cfg(feature = "sev_snp")]
21 use vm_memory::bitmap::AtomicBitmap;
22 
23 #[cfg(target_arch = "x86_64")]
24 use crate::arch::emulator::PlatformEmulator;
25 #[cfg(target_arch = "x86_64")]
26 use crate::arch::x86::emulator::Emulator;
27 #[cfg(target_arch = "aarch64")]
28 use crate::mshv::aarch64::emulator;
29 use crate::mshv::emulator::MshvEmulatorContext;
30 use crate::vm::{self, InterruptSourceConfig, VmOps};
31 use crate::{cpu, hypervisor, vec_with_array_field, HypervisorType};
32 #[cfg(feature = "sev_snp")]
33 mod snp_constants;
34 // x86_64 dependencies
35 #[cfg(target_arch = "x86_64")]
36 pub mod x86_64;
37 // aarch64 dependencies
38 #[cfg(target_arch = "aarch64")]
39 pub mod aarch64;
40 #[cfg(target_arch = "x86_64")]
41 use std::fs::File;
42 use std::os::unix::io::AsRawFd;
43 #[cfg(target_arch = "aarch64")]
44 use std::sync::Mutex;
45 
46 #[cfg(target_arch = "aarch64")]
47 use aarch64::gic::MshvGicV2M;
48 #[cfg(target_arch = "aarch64")]
49 pub use aarch64::VcpuMshvState;
50 #[cfg(feature = "sev_snp")]
51 use igvm_defs::IGVM_VHS_SNP_ID_BLOCK;
52 #[cfg(feature = "sev_snp")]
53 use snp_constants::*;
54 use vmm_sys_util::eventfd::EventFd;
55 #[cfg(target_arch = "x86_64")]
56 pub use x86_64::*;
57 #[cfg(target_arch = "x86_64")]
58 pub use x86_64::{emulator, VcpuMshvState};
59 ///
60 /// Export generically-named wrappers of mshv-bindings for Unix-based platforms
61 ///
62 pub use {
63     mshv_bindings::mshv_create_device as CreateDevice,
64     mshv_bindings::mshv_device_attr as DeviceAttr, mshv_ioctls, mshv_ioctls::DeviceFd,
65 };
66 
67 #[cfg(target_arch = "aarch64")]
68 use crate::arch::aarch64::gic::{Vgic, VgicConfig};
69 #[cfg(target_arch = "aarch64")]
70 use crate::arch::aarch64::regs;
71 #[cfg(target_arch = "x86_64")]
72 use crate::arch::x86::{CpuIdEntry, FpuState, MsrEntry};
73 #[cfg(target_arch = "x86_64")]
74 use crate::ClockData;
75 use crate::{
76     CpuState, IoEventAddress, IrqRoutingEntry, MpState, UserMemoryRegion,
77     USER_MEMORY_REGION_ADJUSTABLE, USER_MEMORY_REGION_EXECUTE, USER_MEMORY_REGION_READ,
78     USER_MEMORY_REGION_WRITE,
79 };
80 
81 pub const PAGE_SHIFT: usize = 12;
82 
83 impl From<mshv_user_mem_region> for UserMemoryRegion {
84     fn from(region: mshv_user_mem_region) -> Self {
85         let mut flags: u32 = USER_MEMORY_REGION_READ | USER_MEMORY_REGION_ADJUSTABLE;
86         if region.flags & (1 << MSHV_SET_MEM_BIT_WRITABLE) != 0 {
87             flags |= USER_MEMORY_REGION_WRITE;
88         }
89         if region.flags & (1 << MSHV_SET_MEM_BIT_EXECUTABLE) != 0 {
90             flags |= USER_MEMORY_REGION_EXECUTE;
91         }
92 
93         UserMemoryRegion {
94             guest_phys_addr: (region.guest_pfn << PAGE_SHIFT as u64)
95                 + (region.userspace_addr & ((1 << PAGE_SHIFT) - 1)),
96             memory_size: region.size,
97             userspace_addr: region.userspace_addr,
98             flags,
99             ..Default::default()
100         }
101     }
102 }
103 
104 #[cfg(target_arch = "x86_64")]
105 impl From<MshvClockData> for ClockData {
106     fn from(d: MshvClockData) -> Self {
107         ClockData::Mshv(d)
108     }
109 }
110 
111 #[cfg(target_arch = "x86_64")]
112 impl From<ClockData> for MshvClockData {
113     fn from(ms: ClockData) -> Self {
114         match ms {
115             ClockData::Mshv(s) => s,
116             /* Needed in case other hypervisors are enabled */
117             #[allow(unreachable_patterns)]
118             _ => unreachable!("MSHV clock data is not valid"),
119         }
120     }
121 }
122 
123 impl From<UserMemoryRegion> for mshv_user_mem_region {
124     fn from(region: UserMemoryRegion) -> Self {
125         let mut flags: u8 = 0;
126         if region.flags & USER_MEMORY_REGION_WRITE != 0 {
127             flags |= 1 << MSHV_SET_MEM_BIT_WRITABLE;
128         }
129         if region.flags & USER_MEMORY_REGION_EXECUTE != 0 {
130             flags |= 1 << MSHV_SET_MEM_BIT_EXECUTABLE;
131         }
132 
133         mshv_user_mem_region {
134             guest_pfn: region.guest_phys_addr >> PAGE_SHIFT,
135             size: region.memory_size,
136             userspace_addr: region.userspace_addr,
137             flags,
138             ..Default::default()
139         }
140     }
141 }
142 
143 impl From<mshv_ioctls::IoEventAddress> for IoEventAddress {
144     fn from(a: mshv_ioctls::IoEventAddress) -> Self {
145         match a {
146             mshv_ioctls::IoEventAddress::Pio(x) => Self::Pio(x),
147             mshv_ioctls::IoEventAddress::Mmio(x) => Self::Mmio(x),
148         }
149     }
150 }
151 
152 impl From<IoEventAddress> for mshv_ioctls::IoEventAddress {
153     fn from(a: IoEventAddress) -> Self {
154         match a {
155             IoEventAddress::Pio(x) => Self::Pio(x),
156             IoEventAddress::Mmio(x) => Self::Mmio(x),
157         }
158     }
159 }
160 
161 impl From<VcpuMshvState> for CpuState {
162     fn from(s: VcpuMshvState) -> Self {
163         CpuState::Mshv(s)
164     }
165 }
166 
167 impl From<CpuState> for VcpuMshvState {
168     fn from(s: CpuState) -> Self {
169         match s {
170             CpuState::Mshv(s) => s,
171             /* Needed in case other hypervisors are enabled */
172             #[allow(unreachable_patterns)]
173             _ => panic!("CpuState is not valid"),
174         }
175     }
176 }
177 
178 impl From<mshv_bindings::StandardRegisters> for crate::StandardRegisters {
179     fn from(s: mshv_bindings::StandardRegisters) -> Self {
180         crate::StandardRegisters::Mshv(s)
181     }
182 }
183 
184 impl From<crate::StandardRegisters> for mshv_bindings::StandardRegisters {
185     fn from(e: crate::StandardRegisters) -> Self {
186         match e {
187             crate::StandardRegisters::Mshv(e) => e,
188             /* Needed in case other hypervisors are enabled */
189             #[allow(unreachable_patterns)]
190             _ => panic!("StandardRegisters are not valid"),
191         }
192     }
193 }
194 
195 impl From<mshv_user_irq_entry> for IrqRoutingEntry {
196     fn from(s: mshv_user_irq_entry) -> Self {
197         IrqRoutingEntry::Mshv(s)
198     }
199 }
200 
201 impl From<IrqRoutingEntry> for mshv_user_irq_entry {
202     fn from(e: IrqRoutingEntry) -> Self {
203         match e {
204             IrqRoutingEntry::Mshv(e) => e,
205             /* Needed in case other hypervisors are enabled */
206             #[allow(unreachable_patterns)]
207             _ => panic!("IrqRoutingEntry is not valid"),
208         }
209     }
210 }
211 
212 #[cfg(target_arch = "aarch64")]
213 impl From<mshv_bindings::MshvRegList> for crate::RegList {
214     fn from(s: mshv_bindings::MshvRegList) -> Self {
215         crate::RegList::Mshv(s)
216     }
217 }
218 
219 #[cfg(target_arch = "aarch64")]
220 impl From<crate::RegList> for mshv_bindings::MshvRegList {
221     fn from(e: crate::RegList) -> Self {
222         match e {
223             crate::RegList::Mshv(e) => e,
224             /* Needed in case other hypervisors are enabled */
225             #[allow(unreachable_patterns)]
226             _ => panic!("RegList is not valid"),
227         }
228     }
229 }
230 
231 #[cfg(target_arch = "aarch64")]
232 impl From<mshv_bindings::MshvVcpuInit> for crate::VcpuInit {
233     fn from(s: mshv_bindings::MshvVcpuInit) -> Self {
234         crate::VcpuInit::Mshv(s)
235     }
236 }
237 
238 #[cfg(target_arch = "aarch64")]
239 impl From<crate::VcpuInit> for mshv_bindings::MshvVcpuInit {
240     fn from(e: crate::VcpuInit) -> Self {
241         match e {
242             crate::VcpuInit::Mshv(e) => e,
243             /* Needed in case other hypervisors are enabled */
244             #[allow(unreachable_patterns)]
245             _ => panic!("VcpuInit is not valid"),
246         }
247     }
248 }
249 
250 struct MshvDirtyLogSlot {
251     guest_pfn: u64,
252     memory_size: u64,
253 }
254 
255 /// Wrapper over mshv system ioctls.
256 pub struct MshvHypervisor {
257     mshv: Mshv,
258 }
259 
260 impl MshvHypervisor {
261     #[cfg(target_arch = "x86_64")]
262     ///
263     /// Retrieve the list of MSRs supported by MSHV.
264     ///
265     fn get_msr_list(&self) -> hypervisor::Result<Vec<u32>> {
266         self.mshv
267             .get_msr_index_list()
268             .map_err(|e| hypervisor::HypervisorError::GetMsrList(e.into()))
269     }
270 
271     fn create_vm_with_type_and_memory_int(
272         &self,
273         vm_type: u64,
274         #[cfg(feature = "sev_snp")] _mem_size: Option<u64>,
275     ) -> hypervisor::Result<Arc<dyn crate::Vm>> {
276         let mshv_vm_type: VmType = match VmType::try_from(vm_type) {
277             Ok(vm_type) => vm_type,
278             Err(_) => return Err(hypervisor::HypervisorError::UnsupportedVmType()),
279         };
280         let fd: VmFd;
281         loop {
282             match self.mshv.create_vm_with_type(mshv_vm_type) {
283                 Ok(res) => fd = res,
284                 Err(e) => {
285                     if e.errno() == libc::EINTR {
286                         // If the error returned is EINTR, which means the
287                         // ioctl has been interrupted, we have to retry as
288                         // this can't be considered as a regular error.
289                         continue;
290                     } else {
291                         return Err(hypervisor::HypervisorError::VmCreate(e.into()));
292                     }
293                 }
294             }
295             break;
296         }
297 
298         // Set additional partition property for SEV-SNP partition.
299         #[cfg(target_arch = "x86_64")]
300         if mshv_vm_type == VmType::Snp {
301             let snp_policy = snp::get_default_snp_guest_policy();
302             let vmgexit_offloads = snp::get_default_vmgexit_offload_features();
303             // SAFETY: access union fields
304             unsafe {
305                 debug!(
306                     "Setting the partition isolation policy as: 0x{:x}",
307                     snp_policy.as_uint64
308                 );
309                 fd.set_partition_property(
310                     hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_POLICY,
311                     snp_policy.as_uint64,
312                 )
313                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
314                 debug!(
315                     "Setting the partition property to enable VMGEXIT offloads as : 0x{:x}",
316                     vmgexit_offloads.as_uint64
317                 );
318                 fd.set_partition_property(
319                     hv_partition_property_code_HV_PARTITION_PROPERTY_SEV_VMGEXIT_OFFLOADS,
320                     vmgexit_offloads.as_uint64,
321                 )
322                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
323             }
324         }
325 
326         // Default Microsoft Hypervisor behavior for unimplemented MSR is to
327         // send a fault to the guest if it tries to access it. It is possible
328         // to override this behavior with a more suitable option i.e., ignore
329         // writes from the guest and return zero in attempt to read unimplemented
330         // MSR.
331         #[cfg(target_arch = "x86_64")]
332         fd.set_partition_property(
333             hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
334             hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64,
335         )
336         .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
337 
338         // Always create a frozen partition
339         fd.set_partition_property(
340             hv_partition_property_code_HV_PARTITION_PROPERTY_TIME_FREEZE,
341             1u64,
342         )
343         .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
344 
345         let vm_fd = Arc::new(fd);
346 
347         #[cfg(target_arch = "x86_64")]
348         {
349             let msr_list = self.get_msr_list()?;
350             let mut msrs: Vec<MsrEntry> = vec![
351                 MsrEntry {
352                     ..Default::default()
353                 };
354                 msr_list.len()
355             ];
356             for (pos, index) in msr_list.iter().enumerate() {
357                 msrs[pos].index = *index;
358             }
359 
360             Ok(Arc::new(MshvVm {
361                 fd: vm_fd,
362                 msrs,
363                 dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
364                 #[cfg(feature = "sev_snp")]
365                 sev_snp_enabled: mshv_vm_type == VmType::Snp,
366                 #[cfg(feature = "sev_snp")]
367                 host_access_pages: ArcSwap::new(
368                     AtomicBitmap::new(
369                         _mem_size.unwrap_or_default() as usize,
370                         NonZeroUsize::new(HV_PAGE_SIZE).unwrap(),
371                     )
372                     .into(),
373                 ),
374             }))
375         }
376 
377         #[cfg(target_arch = "aarch64")]
378         {
379             Ok(Arc::new(MshvVm {
380                 fd: vm_fd,
381                 dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
382             }))
383         }
384     }
385 }
386 
387 impl MshvHypervisor {
388     /// Create a hypervisor based on Mshv
389     #[allow(clippy::new_ret_no_self)]
390     pub fn new() -> hypervisor::Result<Arc<dyn hypervisor::Hypervisor>> {
391         let mshv_obj =
392             Mshv::new().map_err(|e| hypervisor::HypervisorError::HypervisorCreate(e.into()))?;
393         Ok(Arc::new(MshvHypervisor { mshv: mshv_obj }))
394     }
395     /// Check if the hypervisor is available
396     pub fn is_available() -> hypervisor::Result<bool> {
397         match std::fs::metadata("/dev/mshv") {
398             Ok(_) => Ok(true),
399             Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false),
400             Err(err) => Err(hypervisor::HypervisorError::HypervisorAvailableCheck(
401                 err.into(),
402             )),
403         }
404     }
405 }
406 
407 /// Implementation of Hypervisor trait for Mshv
408 ///
409 /// # Examples
410 ///
411 /// ```
412 /// use hypervisor::mshv::MshvHypervisor;
413 /// use std::sync::Arc;
414 /// let mshv = MshvHypervisor::new().unwrap();
415 /// let hypervisor = Arc::new(mshv);
416 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
417 /// ```
418 impl hypervisor::Hypervisor for MshvHypervisor {
419     ///
420     /// Returns the type of the hypervisor
421     ///
422     fn hypervisor_type(&self) -> HypervisorType {
423         HypervisorType::Mshv
424     }
425 
426     ///
427     /// Create a Vm of a specific type using the underlying hypervisor, passing memory size
428     /// Return a hypervisor-agnostic Vm trait object
429     ///
430     /// # Examples
431     ///
432     /// ```
433     /// use hypervisor::kvm::KvmHypervisor;
434     /// use hypervisor::kvm::KvmVm;
435     /// let hypervisor = KvmHypervisor::new().unwrap();
436     /// let vm = hypervisor.create_vm_with_type(0, 512*1024*1024).unwrap();
437     /// ```
438     fn create_vm_with_type_and_memory(
439         &self,
440         vm_type: u64,
441         #[cfg(feature = "sev_snp")] _mem_size: u64,
442     ) -> hypervisor::Result<Arc<dyn vm::Vm>> {
443         self.create_vm_with_type_and_memory_int(
444             vm_type,
445             #[cfg(feature = "sev_snp")]
446             Some(_mem_size),
447         )
448     }
449 
450     fn create_vm_with_type(&self, vm_type: u64) -> hypervisor::Result<Arc<dyn crate::Vm>> {
451         self.create_vm_with_type_and_memory_int(
452             vm_type,
453             #[cfg(feature = "sev_snp")]
454             None,
455         )
456     }
457 
458     /// Create a mshv vm object and return the object as Vm trait object
459     ///
460     /// # Examples
461     ///
462     /// ```
463     /// # extern crate hypervisor;
464     /// use hypervisor::mshv::MshvHypervisor;
465     /// use hypervisor::mshv::MshvVm;
466     /// let hypervisor = MshvHypervisor::new().unwrap();
467     /// let vm = hypervisor.create_vm().unwrap();
468     /// ```
469     fn create_vm(&self) -> hypervisor::Result<Arc<dyn vm::Vm>> {
470         let vm_type = 0;
471         self.create_vm_with_type(vm_type)
472     }
473     #[cfg(target_arch = "x86_64")]
474     ///
475     /// Get the supported CpuID
476     ///
477     fn get_supported_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> {
478         let mut cpuid = Vec::new();
479         let functions: [u32; 2] = [0x1, 0xb];
480 
481         for function in functions {
482             cpuid.push(CpuIdEntry {
483                 function,
484                 ..Default::default()
485             });
486         }
487         Ok(cpuid)
488     }
489 
490     /// Get maximum number of vCPUs
491     fn get_max_vcpus(&self) -> u32 {
492         // TODO: Using HV_MAXIMUM_PROCESSORS would be better
493         // but the ioctl API is limited to u8
494         256
495     }
496 
497     fn get_guest_debug_hw_bps(&self) -> usize {
498         0
499     }
500 
501     #[cfg(target_arch = "aarch64")]
502     ///
503     /// Retrieve AArch64 host maximum IPA size supported by MSHV.
504     ///
505     fn get_host_ipa_limit(&self) -> i32 {
506         let host_ipa = self.mshv.get_host_partition_property(
507             hv_partition_property_code_HV_PARTITION_PROPERTY_PHYSICAL_ADDRESS_WIDTH,
508         );
509 
510         match host_ipa {
511             Ok(ipa) => ipa.try_into().unwrap(),
512             Err(e) => {
513                 panic!("Failed to get host IPA limit: {:?}", e);
514             }
515         }
516     }
517 }
518 
519 #[cfg(feature = "sev_snp")]
520 struct Ghcb(*mut svm_ghcb_base);
521 
522 #[cfg(feature = "sev_snp")]
523 // SAFETY: struct is based on GHCB page in the hypervisor,
524 // safe to Send across threads
525 unsafe impl Send for Ghcb {}
526 
527 #[cfg(feature = "sev_snp")]
528 // SAFETY: struct is based on GHCB page in the hypervisor,
529 // safe to Sync across threads as this is only required for Vcpu trait
530 // functionally not used anyway
531 unsafe impl Sync for Ghcb {}
532 
533 /// Vcpu struct for Microsoft Hypervisor
534 #[allow(dead_code)]
535 pub struct MshvVcpu {
536     fd: VcpuFd,
537     vp_index: u8,
538     #[cfg(target_arch = "x86_64")]
539     cpuid: Vec<CpuIdEntry>,
540     #[cfg(target_arch = "x86_64")]
541     msrs: Vec<MsrEntry>,
542     vm_ops: Option<Arc<dyn vm::VmOps>>,
543     vm_fd: Arc<VmFd>,
544     #[cfg(feature = "sev_snp")]
545     ghcb: Option<Ghcb>,
546     #[cfg(feature = "sev_snp")]
547     host_access_pages: ArcSwap<AtomicBitmap>,
548 }
549 
550 /// Implementation of Vcpu trait for Microsoft Hypervisor
551 ///
552 /// # Examples
553 ///
554 /// ```
555 /// use hypervisor::mshv::MshvHypervisor;
556 /// use std::sync::Arc;
557 /// let mshv = MshvHypervisor::new().unwrap();
558 /// let hypervisor = Arc::new(mshv);
559 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
560 /// let vcpu = vm.create_vcpu(0, None).unwrap();
561 /// ```
562 impl cpu::Vcpu for MshvVcpu {
563     ///
564     /// Returns StandardRegisters with default value set
565     ///
566     fn create_standard_regs(&self) -> crate::StandardRegisters {
567         mshv_bindings::StandardRegisters::default().into()
568     }
569     ///
570     /// Returns the vCPU general purpose registers.
571     ///
572     fn get_regs(&self) -> cpu::Result<crate::StandardRegisters> {
573         Ok(self
574             .fd
575             .get_regs()
576             .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))?
577             .into())
578     }
579 
580     ///
581     /// Sets the vCPU general purpose registers.
582     ///
583     fn set_regs(&self, regs: &crate::StandardRegisters) -> cpu::Result<()> {
584         let regs = (*regs).into();
585         self.fd
586             .set_regs(&regs)
587             .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into()))
588     }
589 
590     #[cfg(target_arch = "x86_64")]
591     ///
592     /// Returns the vCPU special registers.
593     ///
594     fn get_sregs(&self) -> cpu::Result<crate::arch::x86::SpecialRegisters> {
595         Ok(self
596             .fd
597             .get_sregs()
598             .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))?
599             .into())
600     }
601 
602     #[cfg(target_arch = "x86_64")]
603     ///
604     /// Sets the vCPU special registers.
605     ///
606     fn set_sregs(&self, sregs: &crate::arch::x86::SpecialRegisters) -> cpu::Result<()> {
607         let sregs = (*sregs).into();
608         self.fd
609             .set_sregs(&sregs)
610             .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into()))
611     }
612 
613     #[cfg(target_arch = "x86_64")]
614     ///
615     /// Returns the floating point state (FPU) from the vCPU.
616     ///
617     fn get_fpu(&self) -> cpu::Result<FpuState> {
618         Ok(self
619             .fd
620             .get_fpu()
621             .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))?
622             .into())
623     }
624 
625     #[cfg(target_arch = "x86_64")]
626     ///
627     /// Set the floating point state (FPU) of a vCPU.
628     ///
629     fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> {
630         let fpu: mshv_bindings::FloatingPointUnit = (*fpu).clone().into();
631         self.fd
632             .set_fpu(&fpu)
633             .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into()))
634     }
635 
636     #[cfg(target_arch = "x86_64")]
637     ///
638     /// Returns the model-specific registers (MSR) for this vCPU.
639     ///
640     fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> {
641         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
642         let mut mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
643         let succ = self
644             .fd
645             .get_msrs(&mut mshv_msrs)
646             .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
647 
648         msrs[..succ].copy_from_slice(
649             &mshv_msrs.as_slice()[..succ]
650                 .iter()
651                 .map(|e| (*e).into())
652                 .collect::<Vec<MsrEntry>>(),
653         );
654 
655         Ok(succ)
656     }
657 
658     #[cfg(target_arch = "x86_64")]
659     ///
660     /// Setup the model-specific registers (MSR) for this vCPU.
661     /// Returns the number of MSR entries actually written.
662     ///
663     fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> {
664         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
665         let mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
666         self.fd
667             .set_msrs(&mshv_msrs)
668             .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
669     }
670 
671     #[cfg(target_arch = "x86_64")]
672     ///
673     /// X86 specific call to enable HyperV SynIC
674     ///
675     fn enable_hyperv_synic(&self) -> cpu::Result<()> {
676         /* We always have SynIC enabled on MSHV */
677         Ok(())
678     }
679 
680     #[allow(non_upper_case_globals)]
681     fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> {
682         match self.fd.run() {
683             Ok(x) => match x.header.message_type {
684                 hv_message_type_HVMSG_X64_HALT => {
685                     debug!("HALT");
686                     Ok(cpu::VmExit::Reset)
687                 }
688                 hv_message_type_HVMSG_UNRECOVERABLE_EXCEPTION => {
689                     warn!("TRIPLE FAULT");
690                     Ok(cpu::VmExit::Shutdown)
691                 }
692                 #[cfg(target_arch = "x86_64")]
693                 hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT => {
694                     let info = x.to_ioport_info().unwrap();
695                     let access_info = info.access_info;
696                     // SAFETY: access_info is valid, otherwise we won't be here
697                     let len = unsafe { access_info.__bindgen_anon_1.access_size() } as usize;
698                     let is_write = info.header.intercept_access_type == 1;
699                     let port = info.port_number;
700                     let mut data: [u8; 4] = [0; 4];
701                     let mut ret_rax = info.rax;
702 
703                     /*
704                      * XXX: Ignore QEMU fw_cfg (0x5xx) and debug console (0x402) ports.
705                      *
706                      * Cloud Hypervisor doesn't support fw_cfg at the moment. It does support 0x402
707                      * under the "fwdebug" feature flag. But that feature is not enabled by default
708                      * and is considered legacy.
709                      *
710                      * OVMF unconditionally pokes these IO ports with string IO.
711                      *
712                      * Instead of trying to implement string IO support now which does not do much
713                      * now, skip those ports explicitly to avoid panicking.
714                      *
715                      * Proper string IO support can be added once we gain the ability to translate
716                      * guest virtual addresses to guest physical addresses on MSHV.
717                      */
718                     match port {
719                         0x402 | 0x510 | 0x511 | 0x514 => {
720                             self.advance_rip_update_rax(&info, ret_rax)?;
721                             return Ok(cpu::VmExit::Ignore);
722                         }
723                         _ => {}
724                     }
725 
726                     assert!(
727                         // SAFETY: access_info is valid, otherwise we won't be here
728                         (unsafe { access_info.__bindgen_anon_1.string_op() } != 1),
729                         "String IN/OUT not supported"
730                     );
731                     assert!(
732                         // SAFETY: access_info is valid, otherwise we won't be here
733                         (unsafe { access_info.__bindgen_anon_1.rep_prefix() } != 1),
734                         "Rep IN/OUT not supported"
735                     );
736 
737                     if is_write {
738                         let data = (info.rax as u32).to_le_bytes();
739                         if let Some(vm_ops) = &self.vm_ops {
740                             vm_ops
741                                 .pio_write(port.into(), &data[0..len])
742                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
743                         }
744                     } else {
745                         if let Some(vm_ops) = &self.vm_ops {
746                             vm_ops
747                                 .pio_read(port.into(), &mut data[0..len])
748                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
749                         }
750 
751                         let v = u32::from_le_bytes(data);
752                         /* Preserve high bits in EAX but clear out high bits in RAX */
753                         let mask = 0xffffffff >> (32 - len * 8);
754                         let eax = (info.rax as u32 & !mask) | (v & mask);
755                         ret_rax = eax as u64;
756                     }
757 
758                     self.advance_rip_update_rax(&info, ret_rax)?;
759                     Ok(cpu::VmExit::Ignore)
760                 }
761                 #[cfg(target_arch = "aarch64")]
762                 hv_message_type_HVMSG_UNMAPPED_GPA => {
763                     let info = x.to_memory_info().unwrap();
764                     let gva = info.guest_virtual_address;
765                     let gpa = info.guest_physical_address;
766 
767                     debug!("Unmapped GPA exit: GVA {:x} GPA {:x}", gva, gpa);
768 
769                     let context = MshvEmulatorContext {
770                         vcpu: self,
771                         map: (gva, gpa),
772                         syndrome: info.syndrome,
773                         instruction_bytes: info.instruction_bytes,
774                         instruction_byte_count: info.instruction_byte_count,
775                         // SAFETY: Accessing a union element from bindgen generated bindings.
776                         interruption_pending: unsafe {
777                             info.header
778                                 .execution_state
779                                 .__bindgen_anon_1
780                                 .interruption_pending()
781                                 != 0
782                         },
783                         pc: info.header.pc,
784                     };
785 
786                     let mut emulator = emulator::Emulator::new(context);
787                     emulator
788                         .emulate()
789                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
790 
791                     Ok(cpu::VmExit::Ignore)
792                 }
793                 #[cfg(target_arch = "x86_64")]
794                 msg_type @ (hv_message_type_HVMSG_UNMAPPED_GPA
795                 | hv_message_type_HVMSG_GPA_INTERCEPT) => {
796                     let info = x.to_memory_info().unwrap();
797                     let insn_len = info.instruction_byte_count as usize;
798                     let gva = info.guest_virtual_address;
799                     let gpa = info.guest_physical_address;
800 
801                     debug!("Exit ({:?}) GVA {:x} GPA {:x}", msg_type, gva, gpa);
802 
803                     let mut context = MshvEmulatorContext {
804                         vcpu: self,
805                         map: (gva, gpa),
806                     };
807 
808                     // Create a new emulator.
809                     let mut emul = Emulator::new(&mut context);
810 
811                     // Emulate the trapped instruction, and only the first one.
812                     let new_state = emul
813                         .emulate_first_insn(
814                             self.vp_index as usize,
815                             &info.instruction_bytes[..insn_len],
816                         )
817                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
818 
819                     // Set CPU state back.
820                     context
821                         .set_cpu_state(self.vp_index as usize, new_state)
822                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
823 
824                     Ok(cpu::VmExit::Ignore)
825                 }
826                 #[cfg(feature = "sev_snp")]
827                 hv_message_type_HVMSG_GPA_ATTRIBUTE_INTERCEPT => {
828                     let info = x.to_gpa_attribute_info().unwrap();
829                     let host_vis = info.__bindgen_anon_1.host_visibility();
830                     if host_vis >= HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE {
831                         warn!("Ignored attribute intercept with full host visibility");
832                         return Ok(cpu::VmExit::Ignore);
833                     }
834 
835                     let num_ranges = info.__bindgen_anon_1.range_count();
836                     assert!(num_ranges >= 1);
837                     if num_ranges > 1 {
838                         return Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
839                             "Unhandled VCPU exit(GPA_ATTRIBUTE_INTERCEPT): Expected num_ranges to be 1 but found num_ranges {:?}",
840                             num_ranges
841                         )));
842                     }
843 
844                     // TODO: we could also deny the request with HvCallCompleteIntercept
845                     let mut gpas = Vec::new();
846                     let ranges = info.ranges;
847                     let (gfn_start, gfn_count) = snp::parse_gpa_range(ranges[0]).unwrap();
848                     debug!(
849                         "Releasing pages: gfn_start: {:x?}, gfn_count: {:?}",
850                         gfn_start, gfn_count
851                     );
852                     let gpa_start = gfn_start * HV_PAGE_SIZE as u64;
853                     for i in 0..gfn_count {
854                         gpas.push(gpa_start + i * HV_PAGE_SIZE as u64);
855                     }
856 
857                     let mut gpa_list =
858                         vec_with_array_field::<mshv_modify_gpa_host_access, u64>(gpas.len());
859                     gpa_list[0].page_count = gpas.len() as u64;
860                     gpa_list[0].flags = 0;
861                     if host_vis & HV_MAP_GPA_READABLE != 0 {
862                         gpa_list[0].flags |= 1 << MSHV_GPA_HOST_ACCESS_BIT_READABLE;
863                     }
864                     if host_vis & HV_MAP_GPA_WRITABLE != 0 {
865                         gpa_list[0].flags |= 1 << MSHV_GPA_HOST_ACCESS_BIT_WRITABLE;
866                     }
867 
868                     // SAFETY: gpa_list initialized with gpas.len() and now it is being turned into
869                     // gpas_slice with gpas.len() again. It is guaranteed to be large enough to hold
870                     // everything from gpas.
871                     unsafe {
872                         let gpas_slice: &mut [u64] =
873                             gpa_list[0].guest_pfns.as_mut_slice(gpas.len());
874                         gpas_slice.copy_from_slice(gpas.as_slice());
875                     }
876 
877                     self.vm_fd
878                         .modify_gpa_host_access(&gpa_list[0])
879                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(anyhow!(
880                             "Unhandled VCPU exit: attribute intercept - couldn't modify host access {}", e
881                         )))?;
882                     // Guest is revoking the shared access, so we need to update the bitmap
883                     self.host_access_pages.rcu(|_bitmap| {
884                         let bm = self.host_access_pages.load().as_ref().clone();
885                         bm.reset_addr_range(gpa_start as usize, gfn_count as usize);
886                         bm
887                     });
888                     Ok(cpu::VmExit::Ignore)
889                 }
890                 #[cfg(target_arch = "x86_64")]
891                 hv_message_type_HVMSG_UNACCEPTED_GPA => {
892                     let info = x.to_memory_info().unwrap();
893                     let gva = info.guest_virtual_address;
894                     let gpa = info.guest_physical_address;
895 
896                     Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
897                         "Unhandled VCPU exit: Unaccepted GPA({:x}) found at GVA({:x})",
898                         gpa,
899                         gva,
900                     )))
901                 }
902                 #[cfg(target_arch = "x86_64")]
903                 hv_message_type_HVMSG_X64_CPUID_INTERCEPT => {
904                     let info = x.to_cpuid_info().unwrap();
905                     debug!("cpuid eax: {:x}", { info.rax });
906                     Ok(cpu::VmExit::Ignore)
907                 }
908                 #[cfg(target_arch = "x86_64")]
909                 hv_message_type_HVMSG_X64_MSR_INTERCEPT => {
910                     let info = x.to_msr_info().unwrap();
911                     if info.header.intercept_access_type == 0 {
912                         debug!("msr read: {:x}", { info.msr_number });
913                     } else {
914                         debug!("msr write: {:x}", { info.msr_number });
915                     }
916                     Ok(cpu::VmExit::Ignore)
917                 }
918                 #[cfg(target_arch = "x86_64")]
919                 hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT => {
920                     //TODO: Handler for VMCALL here.
921                     let info = x.to_exception_info().unwrap();
922                     debug!("Exception Info {:?}", { info.exception_vector });
923                     Ok(cpu::VmExit::Ignore)
924                 }
925                 #[cfg(target_arch = "x86_64")]
926                 hv_message_type_HVMSG_X64_APIC_EOI => {
927                     let info = x.to_apic_eoi_info().unwrap();
928                     // The kernel should dispatch the EOI to the correct thread.
929                     // Check the VP index is the same as the one we have.
930                     assert!(info.vp_index == self.vp_index as u32);
931                     // The interrupt vector in info is u32, but x86 only supports 256 vectors.
932                     // There is no good way to recover from this if the hypervisor messes around.
933                     // Just unwrap.
934                     Ok(cpu::VmExit::IoapicEoi(
935                         info.interrupt_vector.try_into().unwrap(),
936                     ))
937                 }
938                 #[cfg(feature = "sev_snp")]
939                 hv_message_type_HVMSG_X64_SEV_VMGEXIT_INTERCEPT => {
940                     let info = x.to_vmg_intercept_info().unwrap();
941                     let ghcb_data = info.ghcb_msr >> GHCB_INFO_BIT_WIDTH;
942                     let ghcb_msr = svm_ghcb_msr {
943                         as_uint64: info.ghcb_msr,
944                     };
945                     // Safe to use unwrap, for sev_snp guest we already have the
946                     // GHCB pointer wrapped in the option, otherwise this place is not reached.
947                     let ghcb = self.ghcb.as_ref().unwrap().0;
948 
949                     // SAFETY: Accessing a union element from bindgen generated bindings.
950                     let ghcb_op = unsafe { ghcb_msr.__bindgen_anon_2.ghcb_info() as u32 };
951                     // Sanity check on the header fields before handling other operations.
952                     assert!(info.header.intercept_access_type == HV_INTERCEPT_ACCESS_EXECUTE as u8);
953 
954                     match ghcb_op {
955                         GHCB_INFO_HYP_FEATURE_REQUEST => {
956                             // Pre-condition: GHCB data must be zero
957                             assert!(ghcb_data == 0);
958                             let mut ghcb_response = GHCB_INFO_HYP_FEATURE_RESPONSE as u64;
959                             // Indicate support for basic SEV-SNP features
960                             ghcb_response |=
961                                 (GHCB_HYP_FEATURE_SEV_SNP << GHCB_INFO_BIT_WIDTH) as u64;
962                             // Indicate support for SEV-SNP AP creation
963                             ghcb_response |= (GHCB_HYP_FEATURE_SEV_SNP_AP_CREATION
964                                 << GHCB_INFO_BIT_WIDTH)
965                                 as u64;
966                             debug!(
967                                 "GHCB_INFO_HYP_FEATURE_REQUEST: Supported features: {:0x}",
968                                 ghcb_response
969                             );
970                             let arr_reg_name_value =
971                                 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)];
972                             set_registers_64!(self.fd, arr_reg_name_value)
973                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
974                         }
975                         GHCB_INFO_REGISTER_REQUEST => {
976                             let mut ghcb_gpa = hv_x64_register_sev_ghcb::default();
977 
978                             // Disable the previously used GHCB page.
979                             self.disable_prev_ghcb_page()?;
980 
981                             // SAFETY: Accessing a union element from bindgen generated bindings.
982                             unsafe {
983                                 ghcb_gpa.__bindgen_anon_1.set_enabled(1);
984                                 ghcb_gpa
985                                     .__bindgen_anon_1
986                                     .set_page_number(ghcb_msr.__bindgen_anon_2.gpa_page_number());
987                             }
988                             // SAFETY: Accessing a union element from bindgen generated bindings.
989                             let reg_name_value = unsafe {
990                                 [(
991                                     hv_register_name_HV_X64_REGISTER_SEV_GHCB_GPA,
992                                     ghcb_gpa.as_uint64,
993                                 )]
994                             };
995 
996                             set_registers_64!(self.fd, reg_name_value)
997                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
998 
999                             let mut resp_ghcb_msr = svm_ghcb_msr::default();
1000                             // SAFETY: Accessing a union element from bindgen generated bindings.
1001                             unsafe {
1002                                 resp_ghcb_msr
1003                                     .__bindgen_anon_2
1004                                     .set_ghcb_info(GHCB_INFO_REGISTER_RESPONSE as u64);
1005                                 resp_ghcb_msr.__bindgen_anon_2.set_gpa_page_number(
1006                                     ghcb_msr.__bindgen_anon_2.gpa_page_number(),
1007                                 );
1008                                 debug!("GHCB GPA is {:x}", ghcb_gpa.as_uint64);
1009                             }
1010                             // SAFETY: Accessing a union element from bindgen generated bindings.
1011                             let reg_name_value = unsafe {
1012                                 [(
1013                                     hv_register_name_HV_X64_REGISTER_GHCB,
1014                                     resp_ghcb_msr.as_uint64,
1015                                 )]
1016                             };
1017 
1018                             set_registers_64!(self.fd, reg_name_value)
1019                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
1020                         }
1021                         GHCB_INFO_SEV_INFO_REQUEST => {
1022                             let sev_cpuid_function = 0x8000_001F;
1023                             let cpu_leaf = self
1024                                 .fd
1025                                 .get_cpuid_values(sev_cpuid_function, 0, 0, 0)
1026                                 .unwrap();
1027                             let ebx = cpu_leaf[1];
1028                             // First 6-byte of EBX represents page table encryption bit number
1029                             let pbit_encryption = (ebx & 0x3f) as u8;
1030                             let mut ghcb_response = GHCB_INFO_SEV_INFO_RESPONSE as u64;
1031 
1032                             // GHCBData[63:48] specifies the maximum GHCB protocol version supported
1033                             ghcb_response |= (GHCB_PROTOCOL_VERSION_MAX as u64) << 48;
1034                             // GHCBData[47:32] specifies the minimum GHCB protocol version supported
1035                             ghcb_response |= (GHCB_PROTOCOL_VERSION_MIN as u64) << 32;
1036                             // GHCBData[31:24] specifies the SEV page table encryption bit number.
1037                             ghcb_response |= (pbit_encryption as u64) << 24;
1038 
1039                             let arr_reg_name_value =
1040                                 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)];
1041                             set_registers_64!(self.fd, arr_reg_name_value)
1042                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
1043                         }
1044                         GHCB_INFO_NORMAL => {
1045                             let exit_code =
1046                                 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_code as u32;
1047 
1048                             match exit_code {
1049                                 SVM_EXITCODE_HV_DOORBELL_PAGE => {
1050                                     let exit_info1 =
1051                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32;
1052                                     match exit_info1 {
1053                                         SVM_NAE_HV_DOORBELL_PAGE_GET_PREFERRED => {
1054                                             // Hypervisor does not have any preference for doorbell GPA.
1055                                             let preferred_doorbell_gpa: u64 = 0xFFFFFFFFFFFFFFFF;
1056                                             set_svm_field_u64_ptr!(
1057                                                 ghcb,
1058                                                 exit_info2,
1059                                                 preferred_doorbell_gpa
1060                                             );
1061                                         }
1062                                         SVM_NAE_HV_DOORBELL_PAGE_SET => {
1063                                             let exit_info2 = info
1064                                                 .__bindgen_anon_2
1065                                                 .__bindgen_anon_1
1066                                                 .sw_exit_info2;
1067                                             let mut ghcb_doorbell_gpa =
1068                                                 hv_x64_register_sev_hv_doorbell::default();
1069                                             // SAFETY: Accessing a union element from bindgen generated bindings.
1070                                             unsafe {
1071                                                 ghcb_doorbell_gpa.__bindgen_anon_1.set_enabled(1);
1072                                                 ghcb_doorbell_gpa
1073                                                     .__bindgen_anon_1
1074                                                     .set_page_number(exit_info2 >> PAGE_SHIFT);
1075                                             }
1076                                             // SAFETY: Accessing a union element from bindgen generated bindings.
1077                                             let reg_names = unsafe {
1078                                                 [(
1079                                                     hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA,
1080                                                     ghcb_doorbell_gpa.as_uint64,
1081                                                 )]
1082                                             };
1083                                             set_registers_64!(self.fd, reg_names).map_err(|e| {
1084                                                 cpu::HypervisorCpuError::SetRegister(e.into())
1085                                             })?;
1086 
1087                                             set_svm_field_u64_ptr!(ghcb, exit_info2, exit_info2);
1088 
1089                                             // Clear the SW_EXIT_INFO1 register to indicate no error
1090                                             self.clear_swexit_info1()?;
1091                                         }
1092                                         SVM_NAE_HV_DOORBELL_PAGE_QUERY => {
1093                                             let mut reg_assocs = [ hv_register_assoc {
1094                                                 name: hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA,
1095                                                 ..Default::default()
1096                                             } ];
1097                                             self.fd.get_reg(&mut reg_assocs).unwrap();
1098                                             // SAFETY: Accessing a union element from bindgen generated bindings.
1099                                             let doorbell_gpa = unsafe { reg_assocs[0].value.reg64 };
1100 
1101                                             set_svm_field_u64_ptr!(ghcb, exit_info2, doorbell_gpa);
1102 
1103                                             // Clear the SW_EXIT_INFO1 register to indicate no error
1104                                             self.clear_swexit_info1()?;
1105                                         }
1106                                         SVM_NAE_HV_DOORBELL_PAGE_CLEAR => {
1107                                             set_svm_field_u64_ptr!(ghcb, exit_info2, 0);
1108                                         }
1109                                         _ => {
1110                                             panic!(
1111                                                 "SVM_EXITCODE_HV_DOORBELL_PAGE: Unhandled exit code: {:0x}",
1112                                                 exit_info1
1113                                             );
1114                                         }
1115                                     }
1116                                 }
1117                                 SVM_EXITCODE_IOIO_PROT => {
1118                                     let exit_info1 =
1119                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32;
1120                                     let port_info = hv_sev_vmgexit_port_info {
1121                                         as_uint32: exit_info1,
1122                                     };
1123 
1124                                     let port =
1125                                         // SAFETY: Accessing a union element from bindgen generated bindings.
1126                                         unsafe { port_info.__bindgen_anon_1.intercepted_port() };
1127                                     let mut len = 4;
1128                                     // SAFETY: Accessing a union element from bindgen generated bindings.
1129                                     unsafe {
1130                                         if port_info.__bindgen_anon_1.operand_size_16bit() == 1 {
1131                                             len = 2;
1132                                         } else if port_info.__bindgen_anon_1.operand_size_8bit()
1133                                             == 1
1134                                         {
1135                                             len = 1;
1136                                         }
1137                                     }
1138                                     let is_write =
1139                                         // SAFETY: Accessing a union element from bindgen generated bindings.
1140                                         unsafe { port_info.__bindgen_anon_1.access_type() == 0 };
1141                                     // SAFETY: Accessing the field from a mapped address
1142                                     let mut data = unsafe { (*ghcb).rax.to_le_bytes() };
1143 
1144                                     if is_write {
1145                                         if let Some(vm_ops) = &self.vm_ops {
1146                                             vm_ops.pio_write(port.into(), &data[..len]).map_err(
1147                                                 |e| cpu::HypervisorCpuError::RunVcpu(e.into()),
1148                                             )?;
1149                                         }
1150                                     } else {
1151                                         if let Some(vm_ops) = &self.vm_ops {
1152                                             vm_ops
1153                                                 .pio_read(port.into(), &mut data[..len])
1154                                                 .map_err(|e| {
1155                                                     cpu::HypervisorCpuError::RunVcpu(e.into())
1156                                                 })?;
1157                                         }
1158                                         set_svm_field_u64_ptr!(ghcb, rax, u64::from_le_bytes(data));
1159                                     }
1160 
1161                                     // Clear the SW_EXIT_INFO1 register to indicate no error
1162                                     self.clear_swexit_info1()?;
1163                                 }
1164                                 SVM_EXITCODE_MMIO_READ => {
1165                                     let src_gpa =
1166                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
1167                                     let data_len =
1168                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2
1169                                             as usize;
1170                                     // Sanity check to make sure data len is within supported range.
1171                                     assert!(data_len <= 0x8);
1172 
1173                                     let mut data: Vec<u8> = vec![0; data_len];
1174                                     if let Some(vm_ops) = &self.vm_ops {
1175                                         vm_ops.mmio_read(src_gpa, &mut data).map_err(|e| {
1176                                             cpu::HypervisorCpuError::RunVcpu(e.into())
1177                                         })?;
1178                                     }
1179                                     // Copy the data to the shared buffer of the GHCB page
1180                                     let mut buffer_data = [0; 8];
1181                                     buffer_data[..data_len].copy_from_slice(&data[..data_len]);
1182                                     // SAFETY: Updating the value of mapped area
1183                                     unsafe { (*ghcb).shared[0] = u64::from_le_bytes(buffer_data) };
1184 
1185                                     // Clear the SW_EXIT_INFO1 register to indicate no error
1186                                     self.clear_swexit_info1()?;
1187                                 }
1188                                 SVM_EXITCODE_MMIO_WRITE => {
1189                                     let dst_gpa =
1190                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
1191                                     let data_len =
1192                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2
1193                                             as usize;
1194                                     // Sanity check to make sure data len is within supported range.
1195                                     assert!(data_len <= 0x8);
1196 
1197                                     let mut data = vec![0; data_len];
1198                                     // SAFETY: Accessing data from a mapped address
1199                                     let bytes_shared_ghcb =
1200                                         unsafe { (*ghcb).shared[0].to_le_bytes() };
1201                                     data.copy_from_slice(&bytes_shared_ghcb[..data_len]);
1202 
1203                                     if let Some(vm_ops) = &self.vm_ops {
1204                                         vm_ops.mmio_write(dst_gpa, &data).map_err(|e| {
1205                                             cpu::HypervisorCpuError::RunVcpu(e.into())
1206                                         })?;
1207                                     }
1208 
1209                                     // Clear the SW_EXIT_INFO1 register to indicate no error
1210                                     self.clear_swexit_info1()?;
1211                                 }
1212                                 SVM_EXITCODE_SNP_GUEST_REQUEST
1213                                 | SVM_EXITCODE_SNP_EXTENDED_GUEST_REQUEST => {
1214                                     if exit_code == SVM_EXITCODE_SNP_EXTENDED_GUEST_REQUEST {
1215                                         info!("Fetching extended guest request is not supported");
1216                                         // We don't support extended guest request, so we just write empty data.
1217                                         // This matches the behavior of KVM in Linux 6.11.
1218 
1219                                         // Read RBX from the GHCB.
1220                                         // SAFETY: Accessing data from a mapped address
1221                                         let data_gpa = unsafe { (*ghcb).rax };
1222                                         // SAFETY: Accessing data from a mapped address
1223                                         let data_npages = unsafe { (*ghcb).rbx };
1224 
1225                                         if data_npages > 0 {
1226                                             // The certificates are terminated by 24 zero bytes.
1227                                             // TODO: Need to check if data_gpa is the address of the shared buffer in the GHCB page
1228                                             // in that case we should clear the shared buffer(24 bytes)
1229                                             self.gpa_write(data_gpa, &[0; 24])?;
1230                                         }
1231                                     }
1232 
1233                                     let req_gpa =
1234                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
1235                                     let rsp_gpa =
1236                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2;
1237 
1238                                     let mshv_psp_req =
1239                                         mshv_issue_psp_guest_request { req_gpa, rsp_gpa };
1240                                     self.vm_fd
1241                                         .psp_issue_guest_request(&mshv_psp_req)
1242                                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
1243 
1244                                     debug!(
1245                                         "SNP guest request: req_gpa {:0x} rsp_gpa {:0x}",
1246                                         req_gpa, rsp_gpa
1247                                     );
1248 
1249                                     set_svm_field_u64_ptr!(ghcb, exit_info2, 0);
1250                                 }
1251                                 SVM_EXITCODE_SNP_AP_CREATION => {
1252                                     let vmsa_gpa =
1253                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2;
1254                                     let apic_id =
1255                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 >> 32;
1256                                     debug!(
1257                                         "SNP AP CREATE REQUEST with VMSA GPA {:0x}, and APIC ID {:?}",
1258                                         vmsa_gpa, apic_id
1259                                     );
1260 
1261                                     let mshv_ap_create_req = mshv_sev_snp_ap_create {
1262                                         vp_id: apic_id,
1263                                         vmsa_gpa,
1264                                     };
1265                                     self.vm_fd
1266                                         .sev_snp_ap_create(&mshv_ap_create_req)
1267                                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
1268 
1269                                     // Clear the SW_EXIT_INFO1 register to indicate no error
1270                                     self.clear_swexit_info1()?;
1271                                 }
1272                                 _ => panic!(
1273                                     "GHCB_INFO_NORMAL: Unhandled exit code: {:0x}",
1274                                     exit_code
1275                                 ),
1276                             }
1277                         }
1278                         _ => panic!("Unsupported VMGEXIT operation: {:0x}", ghcb_op),
1279                     }
1280 
1281                     Ok(cpu::VmExit::Ignore)
1282                 }
1283                 exit => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
1284                     "Unhandled VCPU exit {:?}",
1285                     exit
1286                 ))),
1287             },
1288 
1289             Err(e) => match e.errno() {
1290                 libc::EAGAIN | libc::EINTR => Ok(cpu::VmExit::Ignore),
1291                 _ => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
1292                     "VCPU error {:?}",
1293                     e
1294                 ))),
1295             },
1296         }
1297     }
1298 
1299     #[cfg(target_arch = "aarch64")]
1300     fn init_pmu(&self, _irq: u32) -> cpu::Result<()> {
1301         unimplemented!()
1302     }
1303 
1304     #[cfg(target_arch = "aarch64")]
1305     fn has_pmu_support(&self) -> bool {
1306         unimplemented!()
1307     }
1308 
1309     #[cfg(target_arch = "aarch64")]
1310     fn setup_regs(&self, cpu_id: u8, boot_ip: u64, fdt_start: u64) -> cpu::Result<()> {
1311         let arr_reg_name_value = [(
1312             hv_register_name_HV_ARM64_REGISTER_PSTATE,
1313             regs::PSTATE_FAULT_BITS_64,
1314         )];
1315         set_registers_64!(self.fd, arr_reg_name_value)
1316             .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
1317 
1318         if cpu_id == 0 {
1319             let arr_reg_name_value = [
1320                 (hv_register_name_HV_ARM64_REGISTER_PC, boot_ip),
1321                 (hv_register_name_HV_ARM64_REGISTER_X0, fdt_start),
1322             ];
1323             set_registers_64!(self.fd, arr_reg_name_value)
1324                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
1325         }
1326 
1327         Ok(())
1328     }
1329 
1330     #[cfg(target_arch = "aarch64")]
1331     fn get_sys_reg(&self, sys_reg: u32) -> cpu::Result<u64> {
1332         let mshv_reg = self.sys_reg_to_mshv_reg(sys_reg)?;
1333 
1334         let mut reg_assocs = [hv_register_assoc {
1335             name: mshv_reg,
1336             ..Default::default()
1337         }];
1338         self.fd
1339             .get_reg(&mut reg_assocs)
1340             .map_err(|e| cpu::HypervisorCpuError::GetRegister(e.into()))?;
1341 
1342         // SAFETY: Accessing a union element from bindgen generated definition.
1343         let res = unsafe { reg_assocs[0].value.reg64 };
1344         Ok(res)
1345     }
1346 
1347     #[cfg(target_arch = "aarch64")]
1348     fn get_reg_list(&self, _reg_list: &mut crate::RegList) -> cpu::Result<()> {
1349         unimplemented!()
1350     }
1351 
1352     #[cfg(target_arch = "aarch64")]
1353     fn vcpu_init(&self, _kvi: &crate::VcpuInit) -> cpu::Result<()> {
1354         unimplemented!()
1355     }
1356 
1357     #[cfg(target_arch = "aarch64")]
1358     fn vcpu_finalize(&self, _feature: i32) -> cpu::Result<()> {
1359         unimplemented!()
1360     }
1361 
1362     #[cfg(target_arch = "aarch64")]
1363     fn vcpu_get_finalized_features(&self) -> i32 {
1364         unimplemented!()
1365     }
1366 
1367     #[cfg(target_arch = "aarch64")]
1368     fn vcpu_set_processor_features(
1369         &self,
1370         _vm: &Arc<dyn crate::Vm>,
1371         _kvi: &mut crate::VcpuInit,
1372         _id: u8,
1373     ) -> cpu::Result<()> {
1374         unimplemented!()
1375     }
1376 
1377     #[cfg(target_arch = "aarch64")]
1378     fn create_vcpu_init(&self) -> crate::VcpuInit {
1379         unimplemented!();
1380     }
1381 
1382     #[cfg(target_arch = "x86_64")]
1383     ///
1384     /// X86 specific call to setup the CPUID registers.
1385     ///
1386     fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> cpu::Result<()> {
1387         let cpuid: Vec<mshv_bindings::hv_cpuid_entry> = cpuid.iter().map(|e| (*e).into()).collect();
1388         let mshv_cpuid = <CpuId>::from_entries(&cpuid)
1389             .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?;
1390 
1391         self.fd
1392             .register_intercept_result_cpuid(&mshv_cpuid)
1393             .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
1394     }
1395 
1396     #[cfg(target_arch = "x86_64")]
1397     ///
1398     /// X86 specific call to retrieve the CPUID registers.
1399     ///
1400     fn get_cpuid2(&self, _num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
1401         Ok(self.cpuid.clone())
1402     }
1403 
1404     #[cfg(target_arch = "x86_64")]
1405     ///
1406     /// X86 specific call to retrieve cpuid leaf
1407     ///
1408     fn get_cpuid_values(
1409         &self,
1410         function: u32,
1411         index: u32,
1412         xfem: u64,
1413         xss: u64,
1414     ) -> cpu::Result<[u32; 4]> {
1415         self.fd
1416             .get_cpuid_values(function, index, xfem, xss)
1417             .map_err(|e| cpu::HypervisorCpuError::GetCpuidVales(e.into()))
1418     }
1419 
1420     #[cfg(target_arch = "x86_64")]
1421     ///
1422     /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
1423     ///
1424     fn get_lapic(&self) -> cpu::Result<crate::arch::x86::LapicState> {
1425         Ok(self
1426             .fd
1427             .get_lapic()
1428             .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))?
1429             .into())
1430     }
1431 
1432     #[cfg(target_arch = "x86_64")]
1433     ///
1434     /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
1435     ///
1436     fn set_lapic(&self, lapic: &crate::arch::x86::LapicState) -> cpu::Result<()> {
1437         let lapic: mshv_bindings::LapicState = (*lapic).clone().into();
1438         self.fd
1439             .set_lapic(&lapic)
1440             .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into()))
1441     }
1442 
1443     ///
1444     /// Returns the vcpu's current "multiprocessing state".
1445     ///
1446     fn get_mp_state(&self) -> cpu::Result<MpState> {
1447         Ok(MpState::Mshv)
1448     }
1449 
1450     ///
1451     /// Sets the vcpu's current "multiprocessing state".
1452     ///
1453     fn set_mp_state(&self, _mp_state: MpState) -> cpu::Result<()> {
1454         Ok(())
1455     }
1456 
1457     #[cfg(target_arch = "x86_64")]
1458     ///
1459     /// Set CPU state for x86_64 guest.
1460     ///
1461     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
1462         let mut state: VcpuMshvState = state.clone().into();
1463         self.set_msrs(&state.msrs)?;
1464         self.set_vcpu_events(&state.vcpu_events)?;
1465         self.set_regs(&state.regs.into())?;
1466         self.set_sregs(&state.sregs.into())?;
1467         self.set_fpu(&state.fpu)?;
1468         self.set_xcrs(&state.xcrs)?;
1469         // These registers are global and needed to be set only for first VCPU
1470         // as Microsoft Hypervisor allows setting this register for only one VCPU
1471         if self.vp_index == 0 {
1472             self.fd
1473                 .set_misc_regs(&state.misc)
1474                 .map_err(|e| cpu::HypervisorCpuError::SetMiscRegs(e.into()))?
1475         }
1476         self.fd
1477             .set_debug_regs(&state.dbg)
1478             .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into()))?;
1479         self.fd
1480             .set_all_vp_state_components(&mut state.vp_states)
1481             .map_err(|e| cpu::HypervisorCpuError::SetAllVpStateComponents(e.into()))?;
1482         Ok(())
1483     }
1484 
1485     #[cfg(target_arch = "aarch64")]
1486     ///
1487     /// Set CPU state for aarch64 guest.
1488     ///
1489     fn set_state(&self, _state: &CpuState) -> cpu::Result<()> {
1490         unimplemented!()
1491     }
1492 
1493     #[cfg(target_arch = "x86_64")]
1494     ///
1495     /// Get CPU State for x86_64 guest
1496     ///
1497     fn state(&self) -> cpu::Result<CpuState> {
1498         let regs = self.get_regs()?;
1499         let sregs = self.get_sregs()?;
1500         let xcrs = self.get_xcrs()?;
1501         let fpu = self.get_fpu()?;
1502         let vcpu_events = self.get_vcpu_events()?;
1503         let mut msrs = self.msrs.clone();
1504         self.get_msrs(&mut msrs)?;
1505         let misc = self
1506             .fd
1507             .get_misc_regs()
1508             .map_err(|e| cpu::HypervisorCpuError::GetMiscRegs(e.into()))?;
1509         let dbg = self
1510             .fd
1511             .get_debug_regs()
1512             .map_err(|e| cpu::HypervisorCpuError::GetDebugRegs(e.into()))?;
1513         let vp_states = self
1514             .fd
1515             .get_all_vp_state_components()
1516             .map_err(|e| cpu::HypervisorCpuError::GetAllVpStateComponents(e.into()))?;
1517 
1518         Ok(VcpuMshvState {
1519             msrs,
1520             vcpu_events,
1521             regs: regs.into(),
1522             sregs: sregs.into(),
1523             fpu,
1524             xcrs,
1525             dbg,
1526             misc,
1527             vp_states,
1528         }
1529         .into())
1530     }
1531 
1532     #[cfg(target_arch = "aarch64")]
1533     ///
1534     /// Get CPU state for aarch64 guest.
1535     ///
1536     fn state(&self) -> cpu::Result<CpuState> {
1537         unimplemented!()
1538     }
1539 
1540     #[cfg(target_arch = "x86_64")]
1541     ///
1542     /// Translate guest virtual address to guest physical address
1543     ///
1544     fn translate_gva(&self, gva: u64, flags: u64) -> cpu::Result<(u64, u32)> {
1545         let r = self
1546             .fd
1547             .translate_gva(gva, flags)
1548             .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?;
1549 
1550         let gpa = r.0;
1551         // SAFETY: r is valid, otherwise this function will have returned
1552         let result_code = unsafe { r.1.__bindgen_anon_1.result_code };
1553 
1554         Ok((gpa, result_code))
1555     }
1556 
1557     #[cfg(target_arch = "x86_64")]
1558     ///
1559     /// Return the list of initial MSR entries for a VCPU
1560     ///
1561     fn boot_msr_entries(&self) -> Vec<MsrEntry> {
1562         use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB};
1563 
1564         [
1565             msr!(msr_index::MSR_IA32_SYSENTER_CS),
1566             msr!(msr_index::MSR_IA32_SYSENTER_ESP),
1567             msr!(msr_index::MSR_IA32_SYSENTER_EIP),
1568             msr!(msr_index::MSR_STAR),
1569             msr!(msr_index::MSR_CSTAR),
1570             msr!(msr_index::MSR_LSTAR),
1571             msr!(msr_index::MSR_KERNEL_GS_BASE),
1572             msr!(msr_index::MSR_SYSCALL_MASK),
1573             msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB),
1574         ]
1575         .to_vec()
1576     }
1577 
1578     ///
1579     /// Sets the AMD specific vcpu's sev control register.
1580     ///
1581     #[cfg(feature = "sev_snp")]
1582     fn set_sev_control_register(&self, vmsa_pfn: u64) -> cpu::Result<()> {
1583         let sev_control_reg = snp::get_sev_control_register(vmsa_pfn);
1584 
1585         self.fd
1586             .set_sev_control_register(sev_control_reg)
1587             .map_err(|e| cpu::HypervisorCpuError::SetSevControlRegister(e.into()))
1588     }
1589     #[cfg(target_arch = "x86_64")]
1590     ///
1591     /// Trigger NMI interrupt
1592     ///
1593     fn nmi(&self) -> cpu::Result<()> {
1594         let cfg = InterruptRequest {
1595             interrupt_type: hv_interrupt_type_HV_X64_INTERRUPT_TYPE_NMI,
1596             apic_id: self.vp_index as u64,
1597             level_triggered: false,
1598             vector: 0,
1599             logical_destination_mode: false,
1600             long_mode: false,
1601         };
1602         self.vm_fd
1603             .request_virtual_interrupt(&cfg)
1604             .map_err(|e| cpu::HypervisorCpuError::Nmi(e.into()))
1605     }
1606     ///
1607     /// Set the GICR base address for the vcpu.
1608     ///
1609     #[cfg(target_arch = "aarch64")]
1610     fn set_gic_redistributor_addr(&self, gicr_base_addr: u64) -> cpu::Result<()> {
1611         debug!(
1612             "Setting GICR base address to: {:#x}, for vp_index: {:?}",
1613             gicr_base_addr, self.vp_index
1614         );
1615         let arr_reg_name_value = [(
1616             hv_register_name_HV_ARM64_REGISTER_GICR_BASE_GPA,
1617             gicr_base_addr,
1618         )];
1619         set_registers_64!(self.fd, arr_reg_name_value)
1620             .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
1621 
1622         Ok(())
1623     }
1624 }
1625 
1626 impl MshvVcpu {
1627     ///
1628     /// Deactivate previously used GHCB page.
1629     ///
1630     #[cfg(feature = "sev_snp")]
1631     fn disable_prev_ghcb_page(&self) -> cpu::Result<()> {
1632         let mut reg_assocs = [hv_register_assoc {
1633             name: hv_register_name_HV_X64_REGISTER_SEV_GHCB_GPA,
1634             ..Default::default()
1635         }];
1636         self.fd.get_reg(&mut reg_assocs).unwrap();
1637         // SAFETY: Accessing a union element from bindgen generated bindings.
1638         let prev_ghcb_gpa = unsafe { reg_assocs[0].value.reg64 };
1639 
1640         debug!("Prev GHCB GPA is {:x}", prev_ghcb_gpa);
1641 
1642         let mut ghcb_gpa = hv_x64_register_sev_ghcb::default();
1643 
1644         // SAFETY: Accessing a union element from bindgen generated bindings.
1645         unsafe {
1646             ghcb_gpa.__bindgen_anon_1.set_enabled(0);
1647             ghcb_gpa.__bindgen_anon_1.set_page_number(prev_ghcb_gpa);
1648         }
1649 
1650         // SAFETY: Accessing a union element from bindgen generated bindings.
1651         let reg_name_value = unsafe {
1652             [(
1653                 hv_register_name_HV_X64_REGISTER_SEV_GHCB_GPA,
1654                 ghcb_gpa.as_uint64,
1655             )]
1656         };
1657 
1658         set_registers_64!(self.fd, reg_name_value)
1659             .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
1660 
1661         Ok(())
1662     }
1663     #[cfg(target_arch = "x86_64")]
1664     ///
1665     /// X86 specific call that returns the vcpu's current "xcrs".
1666     ///
1667     fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> {
1668         self.fd
1669             .get_xcrs()
1670             .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into()))
1671     }
1672 
1673     #[cfg(target_arch = "x86_64")]
1674     ///
1675     /// X86 specific call that sets the vcpu's current "xcrs".
1676     ///
1677     fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> {
1678         self.fd
1679             .set_xcrs(xcrs)
1680             .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into()))
1681     }
1682 
1683     #[cfg(target_arch = "x86_64")]
1684     ///
1685     /// Returns currently pending exceptions, interrupts, and NMIs as well as related
1686     /// states of the vcpu.
1687     ///
1688     fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> {
1689         self.fd
1690             .get_vcpu_events()
1691             .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into()))
1692     }
1693 
1694     #[cfg(target_arch = "x86_64")]
1695     ///
1696     /// Sets pending exceptions, interrupts, and NMIs as well as related states
1697     /// of the vcpu.
1698     ///
1699     fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> {
1700         self.fd
1701             .set_vcpu_events(events)
1702             .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into()))
1703     }
1704 
1705     ///
1706     /// Clear SW_EXIT_INFO1 register for SEV-SNP guests.
1707     ///
1708     #[cfg(feature = "sev_snp")]
1709     fn clear_swexit_info1(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> {
1710         // Clear the SW_EXIT_INFO1 register to indicate no error
1711         // Safe to use unwrap, for sev_snp guest we already have the
1712         // GHCB pointer wrapped in the option, otherwise this place is not reached.
1713         let ghcb = self.ghcb.as_ref().unwrap().0;
1714         set_svm_field_u64_ptr!(ghcb, exit_info1, 0);
1715 
1716         Ok(cpu::VmExit::Ignore)
1717     }
1718 
1719     #[cfg(feature = "sev_snp")]
1720     fn gpa_write(&self, gpa: u64, data: &[u8]) -> cpu::Result<()> {
1721         for (gpa, chunk) in (gpa..)
1722             .step_by(HV_READ_WRITE_GPA_MAX_SIZE as usize)
1723             .zip(data.chunks(HV_READ_WRITE_GPA_MAX_SIZE as usize))
1724         {
1725             let mut data = [0; HV_READ_WRITE_GPA_MAX_SIZE as usize];
1726             data[..chunk.len()].copy_from_slice(chunk);
1727 
1728             let mut rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
1729                 base_gpa: gpa,
1730                 byte_count: chunk.len() as u32,
1731                 data,
1732                 ..Default::default()
1733             };
1734             self.fd
1735                 .gpa_write(&mut rw_gpa_arg)
1736                 .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
1737         }
1738 
1739         Ok(())
1740     }
1741 
1742     #[cfg(target_arch = "x86_64")]
1743     fn advance_rip_update_rax(
1744         &self,
1745         info: &hv_x64_io_port_intercept_message,
1746         ret_rax: u64,
1747     ) -> cpu::Result<()> {
1748         let insn_len = info.header.instruction_length() as u64;
1749         /*
1750          * Advance RIP and update RAX
1751          * First, try to update the registers using VP register page
1752          * which is mapped into user space for faster access.
1753          * If the register page is not available, fall back to regular
1754          * IOCTL to update the registers.
1755          */
1756         if let Some(reg_page) = self.fd.get_vp_reg_page() {
1757             let vp_reg_page = reg_page.0;
1758             set_gp_regs_field_ptr!(vp_reg_page, rax, ret_rax);
1759             // SAFETY: access union fields
1760             unsafe {
1761                 (*vp_reg_page).__bindgen_anon_1.__bindgen_anon_1.rip = info.header.rip + insn_len;
1762                 (*vp_reg_page).dirty |= 1 << HV_X64_REGISTER_CLASS_IP;
1763             }
1764         } else {
1765             let arr_reg_name_value = [
1766                 (
1767                     hv_register_name_HV_X64_REGISTER_RIP,
1768                     info.header.rip + insn_len,
1769                 ),
1770                 (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
1771             ];
1772             set_registers_64!(self.fd, arr_reg_name_value)
1773                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
1774         }
1775         Ok(())
1776     }
1777 
1778     #[cfg(target_arch = "aarch64")]
1779     fn sys_reg_to_mshv_reg(&self, sys_regs: u32) -> cpu::Result<u32> {
1780         match sys_regs {
1781             regs::MPIDR_EL1 => Ok(hv_register_name_HV_ARM64_REGISTER_MPIDR_EL1),
1782             _ => Err(cpu::HypervisorCpuError::UnsupportedSysReg(sys_regs)),
1783         }
1784     }
1785 }
1786 
1787 /// Wrapper over Mshv VM ioctls.
1788 pub struct MshvVm {
1789     fd: Arc<VmFd>,
1790     #[cfg(target_arch = "x86_64")]
1791     msrs: Vec<MsrEntry>,
1792     dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>,
1793     #[cfg(feature = "sev_snp")]
1794     sev_snp_enabled: bool,
1795     #[cfg(feature = "sev_snp")]
1796     host_access_pages: ArcSwap<AtomicBitmap>,
1797 }
1798 
1799 impl MshvVm {
1800     ///
1801     /// Creates an in-kernel device.
1802     ///
1803     /// See the documentation for `MSHV_CREATE_DEVICE`.
1804     fn create_device(&self, device: &mut CreateDevice) -> vm::Result<VfioDeviceFd> {
1805         let device_fd = self
1806             .fd
1807             .create_device(device)
1808             .map_err(|e| vm::HypervisorVmError::CreateDevice(e.into()))?;
1809         Ok(VfioDeviceFd::new_from_mshv(device_fd))
1810     }
1811 }
1812 
1813 ///
1814 /// Implementation of Vm trait for Mshv
1815 ///
1816 /// # Examples
1817 ///
1818 /// ```
1819 /// extern crate hypervisor;
1820 /// use hypervisor::mshv::MshvHypervisor;
1821 /// use std::sync::Arc;
1822 /// let mshv = MshvHypervisor::new().unwrap();
1823 /// let hypervisor = Arc::new(mshv);
1824 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
1825 /// ```
1826 impl vm::Vm for MshvVm {
1827     #[cfg(target_arch = "x86_64")]
1828     ///
1829     /// Sets the address of the one-page region in the VM's address space.
1830     ///
1831     fn set_identity_map_address(&self, _address: u64) -> vm::Result<()> {
1832         Ok(())
1833     }
1834 
1835     #[cfg(target_arch = "x86_64")]
1836     ///
1837     /// Sets the address of the three-page region in the VM's address space.
1838     ///
1839     fn set_tss_address(&self, _offset: usize) -> vm::Result<()> {
1840         Ok(())
1841     }
1842 
1843     ///
1844     /// Creates an in-kernel interrupt controller.
1845     ///
1846     fn create_irq_chip(&self) -> vm::Result<()> {
1847         Ok(())
1848     }
1849 
1850     ///
1851     /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
1852     ///
1853     fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1854         debug!("register_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1855 
1856         self.fd
1857             .register_irqfd(fd, gsi)
1858             .map_err(|e| vm::HypervisorVmError::RegisterIrqFd(e.into()))?;
1859 
1860         Ok(())
1861     }
1862 
1863     ///
1864     /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
1865     ///
1866     fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1867         debug!("unregister_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1868 
1869         self.fd
1870             .unregister_irqfd(fd, gsi)
1871             .map_err(|e| vm::HypervisorVmError::UnregisterIrqFd(e.into()))?;
1872 
1873         Ok(())
1874     }
1875 
1876     ///
1877     /// Creates a VcpuFd object from a vcpu RawFd.
1878     ///
1879     fn create_vcpu(
1880         &self,
1881         id: u8,
1882         vm_ops: Option<Arc<dyn VmOps>>,
1883     ) -> vm::Result<Arc<dyn cpu::Vcpu>> {
1884         let vcpu_fd = self
1885             .fd
1886             .create_vcpu(id)
1887             .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?;
1888 
1889         /* Map the GHCB page to the VMM(root) address space
1890          * The map is available after the vcpu creation. This address is mapped
1891          * to the overlay ghcb page of the Microsoft Hypervisor, don't have
1892          * to worry about the scenario when a guest changes the GHCB mapping.
1893          */
1894         #[cfg(feature = "sev_snp")]
1895         let ghcb = if self.sev_snp_enabled {
1896             // SAFETY: Safe to call as VCPU has this map already available upon creation
1897             let addr = unsafe {
1898                 libc::mmap(
1899                     std::ptr::null_mut(),
1900                     HV_PAGE_SIZE,
1901                     libc::PROT_READ | libc::PROT_WRITE,
1902                     libc::MAP_SHARED,
1903                     vcpu_fd.as_raw_fd(),
1904                     MSHV_VP_MMAP_OFFSET_GHCB as i64 * libc::sysconf(libc::_SC_PAGE_SIZE),
1905                 )
1906             };
1907             if std::ptr::eq(addr, libc::MAP_FAILED) {
1908                 // No point of continuing, without this mmap VMGEXIT will fail anyway
1909                 // Return error
1910                 return Err(vm::HypervisorVmError::MmapToRoot);
1911             }
1912             Some(Ghcb(addr as *mut svm_ghcb_base))
1913         } else {
1914             None
1915         };
1916         let vcpu = MshvVcpu {
1917             fd: vcpu_fd,
1918             vp_index: id,
1919             #[cfg(target_arch = "x86_64")]
1920             cpuid: Vec::new(),
1921             #[cfg(target_arch = "x86_64")]
1922             msrs: self.msrs.clone(),
1923             vm_ops,
1924             vm_fd: self.fd.clone(),
1925             #[cfg(feature = "sev_snp")]
1926             ghcb,
1927             #[cfg(feature = "sev_snp")]
1928             host_access_pages: ArcSwap::new(self.host_access_pages.load().clone()),
1929         };
1930         Ok(Arc::new(vcpu))
1931     }
1932 
1933     #[cfg(target_arch = "x86_64")]
1934     fn enable_split_irq(&self) -> vm::Result<()> {
1935         Ok(())
1936     }
1937 
1938     #[cfg(target_arch = "x86_64")]
1939     fn enable_sgx_attribute(&self, _file: File) -> vm::Result<()> {
1940         Ok(())
1941     }
1942 
1943     fn register_ioevent(
1944         &self,
1945         fd: &EventFd,
1946         addr: &IoEventAddress,
1947         datamatch: Option<DataMatch>,
1948     ) -> vm::Result<()> {
1949         #[cfg(feature = "sev_snp")]
1950         if self.sev_snp_enabled {
1951             return Ok(());
1952         }
1953 
1954         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1955         debug!(
1956             "register_ioevent fd {} addr {:x?} datamatch {:?}",
1957             fd.as_raw_fd(),
1958             addr,
1959             datamatch
1960         );
1961         if let Some(dm) = datamatch {
1962             match dm {
1963                 vm::DataMatch::DataMatch32(mshv_dm32) => self
1964                     .fd
1965                     .register_ioevent(fd, addr, mshv_dm32)
1966                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1967                 vm::DataMatch::DataMatch64(mshv_dm64) => self
1968                     .fd
1969                     .register_ioevent(fd, addr, mshv_dm64)
1970                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1971             }
1972         } else {
1973             self.fd
1974                 .register_ioevent(fd, addr, NoDatamatch)
1975                 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into()))
1976         }
1977     }
1978 
1979     /// Unregister an event from a certain address it has been previously registered to.
1980     fn unregister_ioevent(&self, fd: &EventFd, addr: &IoEventAddress) -> vm::Result<()> {
1981         #[cfg(feature = "sev_snp")]
1982         if self.sev_snp_enabled {
1983             return Ok(());
1984         }
1985 
1986         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1987         debug!("unregister_ioevent fd {} addr {:x?}", fd.as_raw_fd(), addr);
1988 
1989         self.fd
1990             .unregister_ioevent(fd, addr, NoDatamatch)
1991             .map_err(|e| vm::HypervisorVmError::UnregisterIoEvent(e.into()))
1992     }
1993 
1994     /// Creates a guest physical memory region.
1995     fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1996         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1997         // No matter read only or not we keep track the slots.
1998         // For readonly hypervisor can enable the dirty bits,
1999         // but a VM exit happens before setting the dirty bits
2000         self.dirty_log_slots.write().unwrap().insert(
2001             user_memory_region.guest_pfn,
2002             MshvDirtyLogSlot {
2003                 guest_pfn: user_memory_region.guest_pfn,
2004                 memory_size: user_memory_region.size,
2005             },
2006         );
2007 
2008         self.fd
2009             .map_user_memory(user_memory_region)
2010             .map_err(|e| vm::HypervisorVmError::CreateUserMemory(e.into()))?;
2011         Ok(())
2012     }
2013 
2014     /// Removes a guest physical memory region.
2015     fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
2016         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
2017         // Remove the corresponding entry from "self.dirty_log_slots" if needed
2018         self.dirty_log_slots
2019             .write()
2020             .unwrap()
2021             .remove(&user_memory_region.guest_pfn);
2022 
2023         self.fd
2024             .unmap_user_memory(user_memory_region)
2025             .map_err(|e| vm::HypervisorVmError::RemoveUserMemory(e.into()))?;
2026         Ok(())
2027     }
2028 
2029     fn make_user_memory_region(
2030         &self,
2031         _slot: u32,
2032         guest_phys_addr: u64,
2033         memory_size: u64,
2034         userspace_addr: u64,
2035         readonly: bool,
2036         _log_dirty_pages: bool,
2037     ) -> UserMemoryRegion {
2038         let mut flags = 1 << MSHV_SET_MEM_BIT_EXECUTABLE;
2039         if !readonly {
2040             flags |= 1 << MSHV_SET_MEM_BIT_WRITABLE;
2041         }
2042 
2043         mshv_user_mem_region {
2044             flags,
2045             guest_pfn: guest_phys_addr >> PAGE_SHIFT,
2046             size: memory_size,
2047             userspace_addr,
2048             ..Default::default()
2049         }
2050         .into()
2051     }
2052 
2053     fn create_passthrough_device(&self) -> vm::Result<VfioDeviceFd> {
2054         let mut vfio_dev = mshv_create_device {
2055             type_: MSHV_DEV_TYPE_VFIO,
2056             fd: 0,
2057             flags: 0,
2058         };
2059 
2060         self.create_device(&mut vfio_dev)
2061             .map_err(|e| vm::HypervisorVmError::CreatePassthroughDevice(e.into()))
2062     }
2063 
2064     ///
2065     /// Constructs a routing entry
2066     ///
2067     fn make_routing_entry(&self, gsi: u32, config: &InterruptSourceConfig) -> IrqRoutingEntry {
2068         match config {
2069             InterruptSourceConfig::MsiIrq(cfg) => mshv_user_irq_entry {
2070                 gsi,
2071                 address_lo: cfg.low_addr,
2072                 address_hi: cfg.high_addr,
2073                 data: cfg.data,
2074             }
2075             .into(),
2076             _ => {
2077                 unreachable!()
2078             }
2079         }
2080     }
2081 
2082     fn set_gsi_routing(&self, entries: &[IrqRoutingEntry]) -> vm::Result<()> {
2083         let mut msi_routing =
2084             vec_with_array_field::<mshv_user_irq_table, mshv_user_irq_entry>(entries.len());
2085         msi_routing[0].nr = entries.len() as u32;
2086 
2087         let entries: Vec<mshv_user_irq_entry> = entries
2088             .iter()
2089             .map(|entry| match entry {
2090                 IrqRoutingEntry::Mshv(e) => *e,
2091                 #[allow(unreachable_patterns)]
2092                 _ => panic!("IrqRoutingEntry type is wrong"),
2093             })
2094             .collect();
2095 
2096         // SAFETY: msi_routing initialized with entries.len() and now it is being turned into
2097         // entries_slice with entries.len() again. It is guaranteed to be large enough to hold
2098         // everything from entries.
2099         unsafe {
2100             let entries_slice: &mut [mshv_user_irq_entry] =
2101                 msi_routing[0].entries.as_mut_slice(entries.len());
2102             entries_slice.copy_from_slice(&entries);
2103         }
2104 
2105         self.fd
2106             .set_msi_routing(&msi_routing[0])
2107             .map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into()))
2108     }
2109 
2110     ///
2111     /// Start logging dirty pages
2112     ///
2113     fn start_dirty_log(&self) -> vm::Result<()> {
2114         self.fd
2115             .enable_dirty_page_tracking()
2116             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))
2117     }
2118 
2119     ///
2120     /// Stop logging dirty pages
2121     ///
2122     fn stop_dirty_log(&self) -> vm::Result<()> {
2123         let dirty_log_slots = self.dirty_log_slots.read().unwrap();
2124         // Before disabling the dirty page tracking we need
2125         // to set the dirty bits in the Hypervisor
2126         // This is a requirement from Microsoft Hypervisor
2127         for (_, s) in dirty_log_slots.iter() {
2128             self.fd
2129                 .get_dirty_log(
2130                     s.guest_pfn,
2131                     s.memory_size as usize,
2132                     MSHV_GPAP_ACCESS_OP_SET as u8,
2133                 )
2134                 .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
2135         }
2136         self.fd
2137             .disable_dirty_page_tracking()
2138             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
2139         Ok(())
2140     }
2141 
2142     ///
2143     /// Get dirty pages bitmap (one bit per page)
2144     ///
2145     fn get_dirty_log(&self, _slot: u32, base_gpa: u64, memory_size: u64) -> vm::Result<Vec<u64>> {
2146         self.fd
2147             .get_dirty_log(
2148                 base_gpa >> PAGE_SHIFT,
2149                 memory_size as usize,
2150                 MSHV_GPAP_ACCESS_OP_CLEAR as u8,
2151             )
2152             .map_err(|e| vm::HypervisorVmError::GetDirtyLog(e.into()))
2153     }
2154 
2155     /// Retrieve guest clock.
2156     #[cfg(target_arch = "x86_64")]
2157     fn get_clock(&self) -> vm::Result<ClockData> {
2158         let val = self
2159             .fd
2160             .get_partition_property(hv_partition_property_code_HV_PARTITION_PROPERTY_REFERENCE_TIME)
2161             .map_err(|e| vm::HypervisorVmError::GetClock(e.into()))?;
2162         Ok(MshvClockData { ref_time: val }.into())
2163     }
2164 
2165     /// Set guest clock.
2166     #[cfg(target_arch = "x86_64")]
2167     fn set_clock(&self, data: &ClockData) -> vm::Result<()> {
2168         let data: MshvClockData = (*data).into();
2169         self.fd
2170             .set_partition_property(
2171                 hv_partition_property_code_HV_PARTITION_PROPERTY_REFERENCE_TIME,
2172                 data.ref_time,
2173             )
2174             .map_err(|e| vm::HypervisorVmError::SetClock(e.into()))
2175     }
2176 
2177     /// Downcast to the underlying MshvVm type
2178     fn as_any(&self) -> &dyn Any {
2179         self
2180     }
2181 
2182     /// Initialize the SEV-SNP VM
2183     #[cfg(feature = "sev_snp")]
2184     fn sev_snp_init(&self) -> vm::Result<()> {
2185         self.fd
2186             .set_partition_property(
2187                 hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_STATE,
2188                 hv_partition_isolation_state_HV_PARTITION_ISOLATION_SECURE as u64,
2189             )
2190             .map_err(|e| vm::HypervisorVmError::InitializeSevSnp(e.into()))
2191     }
2192 
2193     ///
2194     /// Importing isolated pages, these pages will be used
2195     /// for the PSP(Platform Security Processor) measurement.
2196     #[cfg(feature = "sev_snp")]
2197     fn import_isolated_pages(
2198         &self,
2199         page_type: u32,
2200         page_size: u32,
2201         pages: &[u64],
2202     ) -> vm::Result<()> {
2203         debug_assert!(page_size == hv_isolated_page_size_HV_ISOLATED_PAGE_SIZE_4KB);
2204         if pages.is_empty() {
2205             return Ok(());
2206         }
2207 
2208         let mut isolated_pages =
2209             vec_with_array_field::<mshv_import_isolated_pages, u64>(pages.len());
2210         isolated_pages[0].page_type = page_type as u8;
2211         isolated_pages[0].page_count = pages.len() as u64;
2212         // SAFETY: isolated_pages initialized with pages.len() and now it is being turned into
2213         // pages_slice with pages.len() again. It is guaranteed to be large enough to hold
2214         // everything from pages.
2215         unsafe {
2216             let pages_slice: &mut [u64] = isolated_pages[0].guest_pfns.as_mut_slice(pages.len());
2217             pages_slice.copy_from_slice(pages);
2218         }
2219         self.fd
2220             .import_isolated_pages(&isolated_pages[0])
2221             .map_err(|e| vm::HypervisorVmError::ImportIsolatedPages(e.into()))
2222     }
2223 
2224     ///
2225     /// Complete isolated import, telling the hypervisor that
2226     /// importing the pages to guest memory is complete.
2227     ///
2228     #[cfg(feature = "sev_snp")]
2229     fn complete_isolated_import(
2230         &self,
2231         snp_id_block: IGVM_VHS_SNP_ID_BLOCK,
2232         host_data: [u8; 32],
2233         id_block_enabled: u8,
2234     ) -> vm::Result<()> {
2235         let mut auth_info = hv_snp_id_auth_info {
2236             id_key_algorithm: snp_id_block.id_key_algorithm,
2237             auth_key_algorithm: snp_id_block.author_key_algorithm,
2238             ..Default::default()
2239         };
2240         // Each of r/s component is 576 bits long
2241         auth_info.id_block_signature[..SIG_R_COMPONENT_SIZE_IN_BYTES]
2242             .copy_from_slice(snp_id_block.id_key_signature.r_comp.as_ref());
2243         auth_info.id_block_signature
2244             [SIG_R_COMPONENT_SIZE_IN_BYTES..SIG_R_AND_S_COMPONENT_SIZE_IN_BYTES]
2245             .copy_from_slice(snp_id_block.id_key_signature.s_comp.as_ref());
2246         auth_info.id_key[..ECDSA_CURVE_ID_SIZE_IN_BYTES]
2247             .copy_from_slice(snp_id_block.id_public_key.curve.to_le_bytes().as_ref());
2248         auth_info.id_key[ECDSA_SIG_X_COMPONENT_START..ECDSA_SIG_X_COMPONENT_END]
2249             .copy_from_slice(snp_id_block.id_public_key.qx.as_ref());
2250         auth_info.id_key[ECDSA_SIG_Y_COMPONENT_START..ECDSA_SIG_Y_COMPONENT_END]
2251             .copy_from_slice(snp_id_block.id_public_key.qy.as_ref());
2252 
2253         let data = mshv_complete_isolated_import {
2254             import_data: hv_partition_complete_isolated_import_data {
2255                 psp_parameters: hv_psp_launch_finish_data {
2256                     id_block: hv_snp_id_block {
2257                         launch_digest: snp_id_block.ld,
2258                         family_id: snp_id_block.family_id,
2259                         image_id: snp_id_block.image_id,
2260                         version: snp_id_block.version,
2261                         guest_svn: snp_id_block.guest_svn,
2262                         policy: get_default_snp_guest_policy(),
2263                     },
2264                     id_auth_info: auth_info,
2265                     host_data,
2266                     id_block_enabled,
2267                     author_key_enabled: 0,
2268                 },
2269             },
2270         };
2271         self.fd
2272             .complete_isolated_import(&data)
2273             .map_err(|e| vm::HypervisorVmError::CompleteIsolatedImport(e.into()))
2274     }
2275 
2276     #[cfg(target_arch = "aarch64")]
2277     fn create_vgic(&self, config: VgicConfig) -> vm::Result<Arc<Mutex<dyn Vgic>>> {
2278         let gic_device = MshvGicV2M::new(self, config)
2279             .map_err(|e| vm::HypervisorVmError::CreateVgic(anyhow!("Vgic error {:?}", e)))?;
2280 
2281         // Register GICD address with the hypervisor
2282         self.fd
2283             .set_partition_property(
2284                 hv_partition_property_code_HV_PARTITION_PROPERTY_GICD_BASE_ADDRESS,
2285                 gic_device.dist_addr,
2286             )
2287             .map_err(|e| {
2288                 vm::HypervisorVmError::CreateVgic(anyhow!("Failed to set GICD address: {}", e))
2289             })?;
2290 
2291         // Register GITS address with the hypervisor
2292         self.fd
2293             .set_partition_property(
2294                 // spellchecker:disable-line
2295                 hv_partition_property_code_HV_PARTITION_PROPERTY_GITS_TRANSLATER_BASE_ADDRESS,
2296                 gic_device.gits_addr,
2297             )
2298             .map_err(|e| {
2299                 vm::HypervisorVmError::CreateVgic(anyhow!("Failed to set GITS address: {}", e))
2300             })?;
2301 
2302         Ok(Arc::new(Mutex::new(gic_device)))
2303     }
2304 
2305     #[cfg(target_arch = "aarch64")]
2306     fn get_preferred_target(&self, _kvi: &mut crate::VcpuInit) -> vm::Result<()> {
2307         unimplemented!()
2308     }
2309 
2310     /// Pause the VM
2311     fn pause(&self) -> vm::Result<()> {
2312         // Freeze the partition
2313         self.fd
2314             .set_partition_property(
2315                 hv_partition_property_code_HV_PARTITION_PROPERTY_TIME_FREEZE,
2316                 1u64,
2317             )
2318             .map_err(|e| {
2319                 vm::HypervisorVmError::SetVmProperty(anyhow!(
2320                     "Failed to set partition property: {}",
2321                     e
2322                 ))
2323             })
2324     }
2325 
2326     /// Resume the VM
2327     fn resume(&self) -> vm::Result<()> {
2328         // Resuming the partition using TIME_FREEZE property
2329         self.fd
2330             .set_partition_property(
2331                 hv_partition_property_code_HV_PARTITION_PROPERTY_TIME_FREEZE,
2332                 0u64,
2333             )
2334             .map_err(|e| {
2335                 vm::HypervisorVmError::SetVmProperty(anyhow!(
2336                     "Failed to set partition property: {}",
2337                     e
2338                 ))
2339             })
2340     }
2341 
2342     #[cfg(feature = "sev_snp")]
2343     fn gain_page_access(&self, gpa: u64, size: u32) -> vm::Result<()> {
2344         use mshv_ioctls::set_bits;
2345         const ONE_GB: usize = 1024 * 1024 * 1024;
2346 
2347         if !self.sev_snp_enabled {
2348             return Ok(());
2349         }
2350 
2351         let start_gpfn: u64 = gpa >> PAGE_SHIFT;
2352         let end_gpfn: u64 = (gpa + size as u64 - 1) >> PAGE_SHIFT;
2353 
2354         // Enlarge the bitmap if the PFN is greater than the bitmap length
2355         if end_gpfn >= self.host_access_pages.load().as_ref().len() as u64 {
2356             self.host_access_pages.rcu(|bitmap| {
2357                 let mut bm = bitmap.as_ref().clone();
2358                 bm.enlarge(ONE_GB);
2359                 bm
2360             });
2361         }
2362 
2363         let gpas: Vec<u64> = (start_gpfn..=end_gpfn)
2364             .filter(|x| {
2365                 !self
2366                     .host_access_pages
2367                     .load()
2368                     .as_ref()
2369                     .is_bit_set(*x as usize)
2370             })
2371             .map(|x| x << PAGE_SHIFT)
2372             .collect();
2373 
2374         if !gpas.is_empty() {
2375             let mut gpa_list = vec_with_array_field::<mshv_modify_gpa_host_access, u64>(gpas.len());
2376             gpa_list[0].page_count = gpas.len() as u64;
2377             gpa_list[0].flags = set_bits!(
2378                 u8,
2379                 MSHV_GPA_HOST_ACCESS_BIT_ACQUIRE,
2380                 MSHV_GPA_HOST_ACCESS_BIT_READABLE,
2381                 MSHV_GPA_HOST_ACCESS_BIT_WRITABLE
2382             );
2383 
2384             // SAFETY: gpa_list initialized with gpas.len() and now it is being turned into
2385             // gpas_slice with gpas.len() again. It is guaranteed to be large enough to hold
2386             // everything from gpas.
2387             unsafe {
2388                 let gpas_slice: &mut [u64] = gpa_list[0].guest_pfns.as_mut_slice(gpas.len());
2389                 gpas_slice.copy_from_slice(gpas.as_slice());
2390             }
2391 
2392             self.fd
2393                 .modify_gpa_host_access(&gpa_list[0])
2394                 .map_err(|e| vm::HypervisorVmError::ModifyGpaHostAccess(e.into()))?;
2395 
2396             for acquired_gpa in gpas {
2397                 self.host_access_pages.rcu(|bitmap| {
2398                     let bm = bitmap.clone();
2399                     bm.set_bit((acquired_gpa >> PAGE_SHIFT) as usize);
2400                     bm
2401                 });
2402             }
2403         }
2404 
2405         Ok(())
2406     }
2407 }
2408