xref: /cloud-hypervisor/hypervisor/src/kvm/mod.rs (revision eeae63b4595fbf0cc69f62b6e9d9a79c543c4ac7)
1 // Copyright © 2024 Institute of Software, CAS. All rights reserved.
2 //
3 // Copyright © 2019 Intel Corporation
4 //
5 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
6 //
7 // Copyright © 2020, Microsoft Corporation
8 //
9 // Copyright 2018-2019 CrowdStrike, Inc.
10 //
11 //
12 
13 use std::any::Any;
14 use std::collections::HashMap;
15 #[cfg(target_arch = "x86_64")]
16 use std::fs::File;
17 #[cfg(target_arch = "x86_64")]
18 use std::os::unix::io::AsRawFd;
19 #[cfg(feature = "tdx")]
20 use std::os::unix::io::RawFd;
21 use std::result;
22 #[cfg(target_arch = "x86_64")]
23 use std::sync::atomic::{AtomicBool, Ordering};
24 use std::sync::{Arc, Mutex, RwLock};
25 
26 use kvm_ioctls::{NoDatamatch, VcpuFd, VmFd};
27 use vmm_sys_util::eventfd::EventFd;
28 
29 #[cfg(target_arch = "aarch64")]
30 use crate::aarch64::gic::KvmGicV3Its;
31 #[cfg(target_arch = "aarch64")]
32 pub use crate::aarch64::{
33     check_required_kvm_extensions, gic::Gicv3ItsState as GicState, is_system_register, VcpuInit,
34     VcpuKvmState,
35 };
36 #[cfg(target_arch = "aarch64")]
37 use crate::arch::aarch64::gic::{Vgic, VgicConfig};
38 #[cfg(target_arch = "riscv64")]
39 use crate::arch::riscv64::aia::{Vaia, VaiaConfig};
40 #[cfg(target_arch = "riscv64")]
41 use crate::riscv64::aia::KvmAiaImsics;
42 #[cfg(target_arch = "riscv64")]
43 pub use crate::riscv64::{
44     aia::AiaImsicsState as AiaState, check_required_kvm_extensions, is_non_core_register,
45     VcpuKvmState,
46 };
47 use crate::vm::{self, InterruptSourceConfig, VmOps};
48 #[cfg(target_arch = "aarch64")]
49 use crate::{arm64_core_reg_id, offset_of};
50 use crate::{cpu, hypervisor, vec_with_array_field, HypervisorType};
51 #[cfg(target_arch = "riscv64")]
52 use crate::{offset_of, riscv64_reg_id};
53 // x86_64 dependencies
54 #[cfg(target_arch = "x86_64")]
55 pub mod x86_64;
56 #[cfg(target_arch = "aarch64")]
57 use aarch64::{RegList, Register};
58 #[cfg(target_arch = "x86_64")]
59 use kvm_bindings::{
60     kvm_enable_cap, kvm_msr_entry, MsrList, KVM_CAP_HYPERV_SYNIC, KVM_CAP_SPLIT_IRQCHIP,
61     KVM_GUESTDBG_USE_HW_BP,
62 };
63 #[cfg(target_arch = "riscv64")]
64 use riscv64::{RegList, Register};
65 #[cfg(target_arch = "x86_64")]
66 use x86_64::check_required_kvm_extensions;
67 #[cfg(target_arch = "x86_64")]
68 pub use x86_64::{CpuId, ExtendedControlRegisters, MsrEntries, VcpuKvmState};
69 
70 #[cfg(target_arch = "x86_64")]
71 use crate::arch::x86::{
72     CpuIdEntry, FpuState, LapicState, MsrEntry, SpecialRegisters, XsaveState, NUM_IOAPIC_PINS,
73 };
74 #[cfg(target_arch = "x86_64")]
75 use crate::ClockData;
76 use crate::{
77     CpuState, IoEventAddress, IrqRoutingEntry, MpState, StandardRegisters, UserMemoryRegion,
78     USER_MEMORY_REGION_LOG_DIRTY, USER_MEMORY_REGION_READ, USER_MEMORY_REGION_WRITE,
79 };
80 // aarch64 dependencies
81 #[cfg(target_arch = "aarch64")]
82 pub mod aarch64;
83 // riscv64 dependencies
84 #[cfg(target_arch = "riscv64")]
85 pub mod riscv64;
86 #[cfg(target_arch = "aarch64")]
87 use std::mem;
88 
89 ///
90 /// Export generically-named wrappers of kvm-bindings for Unix-based platforms
91 ///
92 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
93 pub use kvm_bindings::kvm_vcpu_events as VcpuEvents;
94 pub use kvm_bindings::{
95     kvm_clock_data, kvm_create_device, kvm_create_device as CreateDevice,
96     kvm_device_attr as DeviceAttr, kvm_device_type_KVM_DEV_TYPE_VFIO, kvm_guest_debug,
97     kvm_irq_routing, kvm_irq_routing_entry, kvm_mp_state, kvm_run, kvm_userspace_memory_region,
98     KVM_GUESTDBG_ENABLE, KVM_GUESTDBG_SINGLESTEP, KVM_IRQ_ROUTING_IRQCHIP, KVM_IRQ_ROUTING_MSI,
99     KVM_MEM_LOG_DIRTY_PAGES, KVM_MEM_READONLY, KVM_MSI_VALID_DEVID,
100 };
101 #[cfg(target_arch = "aarch64")]
102 use kvm_bindings::{
103     kvm_regs, user_fpsimd_state, user_pt_regs, KVM_GUESTDBG_USE_HW, KVM_NR_SPSR, KVM_REG_ARM64,
104     KVM_REG_ARM64_SYSREG, KVM_REG_ARM64_SYSREG_CRM_MASK, KVM_REG_ARM64_SYSREG_CRN_MASK,
105     KVM_REG_ARM64_SYSREG_OP0_MASK, KVM_REG_ARM64_SYSREG_OP1_MASK, KVM_REG_ARM64_SYSREG_OP2_MASK,
106     KVM_REG_ARM_CORE, KVM_REG_SIZE_U128, KVM_REG_SIZE_U32, KVM_REG_SIZE_U64,
107 };
108 #[cfg(target_arch = "riscv64")]
109 use kvm_bindings::{kvm_riscv_core, user_regs_struct, KVM_REG_RISCV_CORE};
110 #[cfg(feature = "tdx")]
111 use kvm_bindings::{kvm_run__bindgen_ty_1, KVMIO};
112 pub use kvm_ioctls::{Cap, Kvm, VcpuExit};
113 use thiserror::Error;
114 use vfio_ioctls::VfioDeviceFd;
115 #[cfg(feature = "tdx")]
116 use vmm_sys_util::{ioctl::ioctl_with_val, ioctl_ioc_nr, ioctl_iowr_nr};
117 pub use {kvm_bindings, kvm_ioctls};
118 
119 #[cfg(target_arch = "x86_64")]
120 const KVM_CAP_SGX_ATTRIBUTE: u32 = 196;
121 
122 #[cfg(target_arch = "x86_64")]
123 use vmm_sys_util::ioctl_io_nr;
124 #[cfg(all(not(feature = "tdx"), target_arch = "x86_64"))]
125 use vmm_sys_util::ioctl_ioc_nr;
126 
127 #[cfg(target_arch = "x86_64")]
128 ioctl_io_nr!(KVM_NMI, kvm_bindings::KVMIO, 0x9a);
129 
130 #[cfg(feature = "tdx")]
131 const KVM_EXIT_TDX: u32 = 50;
132 #[cfg(feature = "tdx")]
133 const TDG_VP_VMCALL_GET_QUOTE: u64 = 0x10002;
134 #[cfg(feature = "tdx")]
135 const TDG_VP_VMCALL_SETUP_EVENT_NOTIFY_INTERRUPT: u64 = 0x10004;
136 #[cfg(feature = "tdx")]
137 const TDG_VP_VMCALL_SUCCESS: u64 = 0;
138 #[cfg(feature = "tdx")]
139 const TDG_VP_VMCALL_INVALID_OPERAND: u64 = 0x8000000000000000;
140 
141 #[cfg(feature = "tdx")]
142 ioctl_iowr_nr!(KVM_MEMORY_ENCRYPT_OP, KVMIO, 0xba, std::os::raw::c_ulong);
143 
144 #[cfg(feature = "tdx")]
145 #[repr(u32)]
146 enum TdxCommand {
147     Capabilities = 0,
148     InitVm,
149     InitVcpu,
150     InitMemRegion,
151     Finalize,
152 }
153 
154 #[cfg(feature = "tdx")]
155 pub enum TdxExitDetails {
156     GetQuote,
157     SetupEventNotifyInterrupt,
158 }
159 
160 #[cfg(feature = "tdx")]
161 pub enum TdxExitStatus {
162     Success,
163     InvalidOperand,
164 }
165 
166 #[cfg(feature = "tdx")]
167 const TDX_MAX_NR_CPUID_CONFIGS: usize = 6;
168 
169 #[cfg(feature = "tdx")]
170 #[repr(C)]
171 #[derive(Debug, Default)]
172 pub struct TdxCpuidConfig {
173     pub leaf: u32,
174     pub sub_leaf: u32,
175     pub eax: u32,
176     pub ebx: u32,
177     pub ecx: u32,
178     pub edx: u32,
179 }
180 
181 #[cfg(feature = "tdx")]
182 #[repr(C)]
183 #[derive(Debug, Default)]
184 pub struct TdxCapabilities {
185     pub attrs_fixed0: u64,
186     pub attrs_fixed1: u64,
187     pub xfam_fixed0: u64,
188     pub xfam_fixed1: u64,
189     pub nr_cpuid_configs: u32,
190     pub padding: u32,
191     pub cpuid_configs: [TdxCpuidConfig; TDX_MAX_NR_CPUID_CONFIGS],
192 }
193 
194 #[cfg(feature = "tdx")]
195 #[derive(Copy, Clone)]
196 pub struct KvmTdxExit {
197     pub type_: u32,
198     pub pad: u32,
199     pub u: KvmTdxExitU,
200 }
201 
202 #[cfg(feature = "tdx")]
203 #[repr(C)]
204 #[derive(Copy, Clone)]
205 pub union KvmTdxExitU {
206     pub vmcall: KvmTdxExitVmcall,
207 }
208 
209 #[cfg(feature = "tdx")]
210 #[repr(C)]
211 #[derive(Debug, Default, Copy, Clone, PartialEq)]
212 pub struct KvmTdxExitVmcall {
213     pub type_: u64,
214     pub subfunction: u64,
215     pub reg_mask: u64,
216     pub in_r12: u64,
217     pub in_r13: u64,
218     pub in_r14: u64,
219     pub in_r15: u64,
220     pub in_rbx: u64,
221     pub in_rdi: u64,
222     pub in_rsi: u64,
223     pub in_r8: u64,
224     pub in_r9: u64,
225     pub in_rdx: u64,
226     pub status_code: u64,
227     pub out_r11: u64,
228     pub out_r12: u64,
229     pub out_r13: u64,
230     pub out_r14: u64,
231     pub out_r15: u64,
232     pub out_rbx: u64,
233     pub out_rdi: u64,
234     pub out_rsi: u64,
235     pub out_r8: u64,
236     pub out_r9: u64,
237     pub out_rdx: u64,
238 }
239 
240 impl From<kvm_userspace_memory_region> for UserMemoryRegion {
241     fn from(region: kvm_userspace_memory_region) -> Self {
242         let mut flags = USER_MEMORY_REGION_READ;
243         if region.flags & KVM_MEM_READONLY == 0 {
244             flags |= USER_MEMORY_REGION_WRITE;
245         }
246         if region.flags & KVM_MEM_LOG_DIRTY_PAGES != 0 {
247             flags |= USER_MEMORY_REGION_LOG_DIRTY;
248         }
249 
250         UserMemoryRegion {
251             slot: region.slot,
252             guest_phys_addr: region.guest_phys_addr,
253             memory_size: region.memory_size,
254             userspace_addr: region.userspace_addr,
255             flags,
256         }
257     }
258 }
259 
260 impl From<UserMemoryRegion> for kvm_userspace_memory_region {
261     fn from(region: UserMemoryRegion) -> Self {
262         assert!(
263             region.flags & USER_MEMORY_REGION_READ != 0,
264             "KVM mapped memory is always readable"
265         );
266 
267         let mut flags = 0;
268         if region.flags & USER_MEMORY_REGION_WRITE == 0 {
269             flags |= KVM_MEM_READONLY;
270         }
271         if region.flags & USER_MEMORY_REGION_LOG_DIRTY != 0 {
272             flags |= KVM_MEM_LOG_DIRTY_PAGES;
273         }
274 
275         kvm_userspace_memory_region {
276             slot: region.slot,
277             guest_phys_addr: region.guest_phys_addr,
278             memory_size: region.memory_size,
279             userspace_addr: region.userspace_addr,
280             flags,
281         }
282     }
283 }
284 
285 impl From<kvm_mp_state> for MpState {
286     fn from(s: kvm_mp_state) -> Self {
287         MpState::Kvm(s)
288     }
289 }
290 
291 impl From<MpState> for kvm_mp_state {
292     fn from(ms: MpState) -> Self {
293         match ms {
294             MpState::Kvm(s) => s,
295             /* Needed in case other hypervisors are enabled */
296             #[allow(unreachable_patterns)]
297             _ => panic!("CpuState is not valid"),
298         }
299     }
300 }
301 
302 impl From<kvm_ioctls::IoEventAddress> for IoEventAddress {
303     fn from(a: kvm_ioctls::IoEventAddress) -> Self {
304         match a {
305             kvm_ioctls::IoEventAddress::Pio(x) => Self::Pio(x),
306             kvm_ioctls::IoEventAddress::Mmio(x) => Self::Mmio(x),
307         }
308     }
309 }
310 
311 impl From<IoEventAddress> for kvm_ioctls::IoEventAddress {
312     fn from(a: IoEventAddress) -> Self {
313         match a {
314             IoEventAddress::Pio(x) => Self::Pio(x),
315             IoEventAddress::Mmio(x) => Self::Mmio(x),
316         }
317     }
318 }
319 
320 impl From<VcpuKvmState> for CpuState {
321     fn from(s: VcpuKvmState) -> Self {
322         CpuState::Kvm(s)
323     }
324 }
325 
326 impl From<CpuState> for VcpuKvmState {
327     fn from(s: CpuState) -> Self {
328         match s {
329             CpuState::Kvm(s) => s,
330             /* Needed in case other hypervisors are enabled */
331             #[allow(unreachable_patterns)]
332             _ => panic!("CpuState is not valid"),
333         }
334     }
335 }
336 
337 #[cfg(target_arch = "x86_64")]
338 impl From<kvm_clock_data> for ClockData {
339     fn from(d: kvm_clock_data) -> Self {
340         ClockData::Kvm(d)
341     }
342 }
343 
344 #[cfg(target_arch = "x86_64")]
345 impl From<ClockData> for kvm_clock_data {
346     fn from(ms: ClockData) -> Self {
347         match ms {
348             ClockData::Kvm(s) => s,
349             /* Needed in case other hypervisors are enabled */
350             #[allow(unreachable_patterns)]
351             _ => panic!("CpuState is not valid"),
352         }
353     }
354 }
355 
356 #[cfg(not(target_arch = "riscv64"))]
357 impl From<kvm_bindings::kvm_regs> for crate::StandardRegisters {
358     fn from(s: kvm_bindings::kvm_regs) -> Self {
359         crate::StandardRegisters::Kvm(s)
360     }
361 }
362 
363 #[cfg(not(target_arch = "riscv64"))]
364 impl From<crate::StandardRegisters> for kvm_bindings::kvm_regs {
365     fn from(e: crate::StandardRegisters) -> Self {
366         match e {
367             crate::StandardRegisters::Kvm(e) => e,
368             /* Needed in case other hypervisors are enabled */
369             #[allow(unreachable_patterns)]
370             _ => panic!("StandardRegisters are not valid"),
371         }
372     }
373 }
374 
375 #[cfg(target_arch = "riscv64")]
376 impl From<kvm_bindings::kvm_riscv_core> for crate::StandardRegisters {
377     fn from(s: kvm_bindings::kvm_riscv_core) -> Self {
378         crate::StandardRegisters::Kvm(s)
379     }
380 }
381 
382 #[cfg(target_arch = "riscv64")]
383 impl From<crate::StandardRegisters> for kvm_bindings::kvm_riscv_core {
384     fn from(e: crate::StandardRegisters) -> Self {
385         match e {
386             crate::StandardRegisters::Kvm(e) => e,
387             /* Needed in case other hypervisors are enabled */
388             #[allow(unreachable_patterns)]
389             _ => panic!("StandardRegisters are not valid"),
390         }
391     }
392 }
393 
394 impl From<kvm_irq_routing_entry> for IrqRoutingEntry {
395     fn from(s: kvm_irq_routing_entry) -> Self {
396         IrqRoutingEntry::Kvm(s)
397     }
398 }
399 
400 impl From<IrqRoutingEntry> for kvm_irq_routing_entry {
401     fn from(e: IrqRoutingEntry) -> Self {
402         match e {
403             IrqRoutingEntry::Kvm(e) => e,
404             /* Needed in case other hypervisors are enabled */
405             #[allow(unreachable_patterns)]
406             _ => panic!("IrqRoutingEntry is not valid"),
407         }
408     }
409 }
410 
411 struct KvmDirtyLogSlot {
412     slot: u32,
413     guest_phys_addr: u64,
414     memory_size: u64,
415     userspace_addr: u64,
416 }
417 
418 /// Wrapper over KVM VM ioctls.
419 pub struct KvmVm {
420     fd: Arc<VmFd>,
421     #[cfg(target_arch = "x86_64")]
422     msrs: Vec<MsrEntry>,
423     dirty_log_slots: Arc<RwLock<HashMap<u32, KvmDirtyLogSlot>>>,
424 }
425 
426 impl KvmVm {
427     ///
428     /// Creates an emulated device in the kernel.
429     ///
430     /// See the documentation for `KVM_CREATE_DEVICE`.
431     fn create_device(&self, device: &mut CreateDevice) -> vm::Result<vfio_ioctls::VfioDeviceFd> {
432         let device_fd = self
433             .fd
434             .create_device(device)
435             .map_err(|e| vm::HypervisorVmError::CreateDevice(e.into()))?;
436         Ok(VfioDeviceFd::new_from_kvm(device_fd))
437     }
438     /// Checks if a particular `Cap` is available.
439     pub fn check_extension(&self, c: Cap) -> bool {
440         self.fd.check_extension(c)
441     }
442 }
443 
444 /// Implementation of Vm trait for KVM
445 ///
446 /// # Examples
447 ///
448 /// ```
449 /// # use hypervisor::kvm::KvmHypervisor;
450 /// # use std::sync::Arc;
451 /// let kvm = KvmHypervisor::new().unwrap();
452 /// let hypervisor = Arc::new(kvm);
453 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
454 /// ```
455 impl vm::Vm for KvmVm {
456     #[cfg(target_arch = "x86_64")]
457     ///
458     /// Sets the address of the one-page region in the VM's address space.
459     ///
460     fn set_identity_map_address(&self, address: u64) -> vm::Result<()> {
461         self.fd
462             .set_identity_map_address(address)
463             .map_err(|e| vm::HypervisorVmError::SetIdentityMapAddress(e.into()))
464     }
465 
466     #[cfg(target_arch = "x86_64")]
467     ///
468     /// Sets the address of the three-page region in the VM's address space.
469     ///
470     fn set_tss_address(&self, offset: usize) -> vm::Result<()> {
471         self.fd
472             .set_tss_address(offset)
473             .map_err(|e| vm::HypervisorVmError::SetTssAddress(e.into()))
474     }
475 
476     #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
477     ///
478     /// Creates an in-kernel interrupt controller.
479     ///
480     fn create_irq_chip(&self) -> vm::Result<()> {
481         self.fd
482             .create_irq_chip()
483             .map_err(|e| vm::HypervisorVmError::CreateIrq(e.into()))
484     }
485 
486     ///
487     /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
488     ///
489     fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
490         self.fd
491             .register_irqfd(fd, gsi)
492             .map_err(|e| vm::HypervisorVmError::RegisterIrqFd(e.into()))
493     }
494 
495     ///
496     /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
497     ///
498     fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
499         self.fd
500             .unregister_irqfd(fd, gsi)
501             .map_err(|e| vm::HypervisorVmError::UnregisterIrqFd(e.into()))
502     }
503 
504     ///
505     /// Creates a VcpuFd object from a vcpu RawFd.
506     ///
507     fn create_vcpu(
508         &self,
509         id: u8,
510         vm_ops: Option<Arc<dyn VmOps>>,
511     ) -> vm::Result<Arc<dyn cpu::Vcpu>> {
512         let fd = self
513             .fd
514             .create_vcpu(id as u64)
515             .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?;
516         let vcpu = KvmVcpu {
517             fd: Arc::new(Mutex::new(fd)),
518             #[cfg(target_arch = "x86_64")]
519             msrs: self.msrs.clone(),
520             vm_ops,
521             #[cfg(target_arch = "x86_64")]
522             hyperv_synic: AtomicBool::new(false),
523         };
524         Ok(Arc::new(vcpu))
525     }
526 
527     #[cfg(target_arch = "aarch64")]
528     ///
529     /// Creates a virtual GIC device.
530     ///
531     fn create_vgic(&self, config: VgicConfig) -> vm::Result<Arc<Mutex<dyn Vgic>>> {
532         let gic_device = KvmGicV3Its::new(self, config)
533             .map_err(|e| vm::HypervisorVmError::CreateVgic(anyhow!("Vgic error {:?}", e)))?;
534         Ok(Arc::new(Mutex::new(gic_device)))
535     }
536 
537     #[cfg(target_arch = "riscv64")]
538     ///
539     /// Creates a virtual AIA device.
540     ///
541     fn create_vaia(&self, config: VaiaConfig) -> vm::Result<Arc<Mutex<dyn Vaia>>> {
542         let aia_device = KvmAiaImsics::new(self, config)
543             .map_err(|e| vm::HypervisorVmError::CreateVaia(anyhow!("Vaia error {:?}", e)))?;
544         Ok(Arc::new(Mutex::new(aia_device)))
545     }
546 
547     ///
548     /// Registers an event to be signaled whenever a certain address is written to.
549     ///
550     fn register_ioevent(
551         &self,
552         fd: &EventFd,
553         addr: &IoEventAddress,
554         datamatch: Option<vm::DataMatch>,
555     ) -> vm::Result<()> {
556         let addr = &kvm_ioctls::IoEventAddress::from(*addr);
557         if let Some(dm) = datamatch {
558             match dm {
559                 vm::DataMatch::DataMatch32(kvm_dm32) => self
560                     .fd
561                     .register_ioevent(fd, addr, kvm_dm32)
562                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
563                 vm::DataMatch::DataMatch64(kvm_dm64) => self
564                     .fd
565                     .register_ioevent(fd, addr, kvm_dm64)
566                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
567             }
568         } else {
569             self.fd
570                 .register_ioevent(fd, addr, NoDatamatch)
571                 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into()))
572         }
573     }
574 
575     ///
576     /// Unregisters an event from a certain address it has been previously registered to.
577     ///
578     fn unregister_ioevent(&self, fd: &EventFd, addr: &IoEventAddress) -> vm::Result<()> {
579         let addr = &kvm_ioctls::IoEventAddress::from(*addr);
580         self.fd
581             .unregister_ioevent(fd, addr, NoDatamatch)
582             .map_err(|e| vm::HypervisorVmError::UnregisterIoEvent(e.into()))
583     }
584 
585     ///
586     /// Constructs a routing entry
587     ///
588     fn make_routing_entry(&self, gsi: u32, config: &InterruptSourceConfig) -> IrqRoutingEntry {
589         match &config {
590             InterruptSourceConfig::MsiIrq(cfg) => {
591                 let mut kvm_route = kvm_irq_routing_entry {
592                     gsi,
593                     type_: KVM_IRQ_ROUTING_MSI,
594                     ..Default::default()
595                 };
596 
597                 kvm_route.u.msi.address_lo = cfg.low_addr;
598                 kvm_route.u.msi.address_hi = cfg.high_addr;
599                 kvm_route.u.msi.data = cfg.data;
600 
601                 if self.check_extension(crate::kvm::Cap::MsiDevid) {
602                     // On AArch64, there is limitation on the range of the 'devid',
603                     // it cannot be greater than 65536 (the max of u16).
604                     //
605                     // BDF cannot be used directly, because 'segment' is in high
606                     // 16 bits. The layout of the u32 BDF is:
607                     // |---- 16 bits ----|-- 8 bits --|-- 5 bits --|-- 3 bits --|
608                     // |      segment    |     bus    |   device   |  function  |
609                     //
610                     // Now that we support 1 bus only in a segment, we can build a
611                     // 'devid' by replacing the 'bus' bits with the low 8 bits of
612                     // 'segment' data.
613                     // This way we can resolve the range checking problem and give
614                     // different `devid` to all the devices. Limitation is that at
615                     // most 256 segments can be supported.
616                     //
617                     let modified_devid = (cfg.devid & 0x00ff_0000) >> 8 | cfg.devid & 0xff;
618 
619                     kvm_route.flags = KVM_MSI_VALID_DEVID;
620                     kvm_route.u.msi.__bindgen_anon_1.devid = modified_devid;
621                 }
622                 kvm_route.into()
623             }
624             InterruptSourceConfig::LegacyIrq(cfg) => {
625                 let mut kvm_route = kvm_irq_routing_entry {
626                     gsi,
627                     type_: KVM_IRQ_ROUTING_IRQCHIP,
628                     ..Default::default()
629                 };
630                 kvm_route.u.irqchip.irqchip = cfg.irqchip;
631                 kvm_route.u.irqchip.pin = cfg.pin;
632 
633                 kvm_route.into()
634             }
635         }
636     }
637 
638     ///
639     /// Sets the GSI routing table entries, overwriting any previously set
640     /// entries, as per the `KVM_SET_GSI_ROUTING` ioctl.
641     ///
642     fn set_gsi_routing(&self, entries: &[IrqRoutingEntry]) -> vm::Result<()> {
643         let mut irq_routing =
644             vec_with_array_field::<kvm_irq_routing, kvm_irq_routing_entry>(entries.len());
645         irq_routing[0].nr = entries.len() as u32;
646         irq_routing[0].flags = 0;
647         let entries: Vec<kvm_irq_routing_entry> = entries
648             .iter()
649             .map(|entry| match entry {
650                 IrqRoutingEntry::Kvm(e) => *e,
651                 #[allow(unreachable_patterns)]
652                 _ => panic!("IrqRoutingEntry type is wrong"),
653             })
654             .collect();
655 
656         // SAFETY: irq_routing initialized with entries.len() and now it is being turned into
657         // entries_slice with entries.len() again. It is guaranteed to be large enough to hold
658         // everything from entries.
659         unsafe {
660             let entries_slice: &mut [kvm_irq_routing_entry] =
661                 irq_routing[0].entries.as_mut_slice(entries.len());
662             entries_slice.copy_from_slice(&entries);
663         }
664 
665         self.fd
666             .set_gsi_routing(&irq_routing[0])
667             .map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into()))
668     }
669 
670     ///
671     /// Creates a memory region structure that can be used with {create/remove}_user_memory_region
672     ///
673     fn make_user_memory_region(
674         &self,
675         slot: u32,
676         guest_phys_addr: u64,
677         memory_size: u64,
678         userspace_addr: u64,
679         readonly: bool,
680         log_dirty_pages: bool,
681     ) -> UserMemoryRegion {
682         kvm_userspace_memory_region {
683             slot,
684             guest_phys_addr,
685             memory_size,
686             userspace_addr,
687             flags: if readonly { KVM_MEM_READONLY } else { 0 }
688                 | if log_dirty_pages {
689                     KVM_MEM_LOG_DIRTY_PAGES
690                 } else {
691                     0
692                 },
693         }
694         .into()
695     }
696 
697     ///
698     /// Creates a guest physical memory region.
699     ///
700     fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
701         let mut region: kvm_userspace_memory_region = user_memory_region.into();
702 
703         if (region.flags & KVM_MEM_LOG_DIRTY_PAGES) != 0 {
704             if (region.flags & KVM_MEM_READONLY) != 0 {
705                 return Err(vm::HypervisorVmError::CreateUserMemory(anyhow!(
706                     "Error creating regions with both 'dirty-pages-log' and 'read-only'."
707                 )));
708             }
709 
710             // Keep track of the regions that need dirty pages log
711             self.dirty_log_slots.write().unwrap().insert(
712                 region.slot,
713                 KvmDirtyLogSlot {
714                     slot: region.slot,
715                     guest_phys_addr: region.guest_phys_addr,
716                     memory_size: region.memory_size,
717                     userspace_addr: region.userspace_addr,
718                 },
719             );
720 
721             // Always create guest physical memory region without `KVM_MEM_LOG_DIRTY_PAGES`.
722             // For regions that need this flag, dirty pages log will be turned on in `start_dirty_log`.
723             region.flags = 0;
724         }
725 
726         // SAFETY: Safe because guest regions are guaranteed not to overlap.
727         unsafe {
728             self.fd
729                 .set_user_memory_region(region)
730                 .map_err(|e| vm::HypervisorVmError::CreateUserMemory(e.into()))
731         }
732     }
733 
734     ///
735     /// Removes a guest physical memory region.
736     ///
737     fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
738         let mut region: kvm_userspace_memory_region = user_memory_region.into();
739 
740         // Remove the corresponding entry from "self.dirty_log_slots" if needed
741         self.dirty_log_slots.write().unwrap().remove(&region.slot);
742 
743         // Setting the size to 0 means "remove"
744         region.memory_size = 0;
745         // SAFETY: Safe because guest regions are guaranteed not to overlap.
746         unsafe {
747             self.fd
748                 .set_user_memory_region(region)
749                 .map_err(|e| vm::HypervisorVmError::RemoveUserMemory(e.into()))
750         }
751     }
752 
753     ///
754     /// Returns the preferred CPU target type which can be emulated by KVM on underlying host.
755     ///
756     #[cfg(target_arch = "aarch64")]
757     fn get_preferred_target(&self, kvi: &mut VcpuInit) -> vm::Result<()> {
758         self.fd
759             .get_preferred_target(kvi)
760             .map_err(|e| vm::HypervisorVmError::GetPreferredTarget(e.into()))
761     }
762 
763     #[cfg(target_arch = "x86_64")]
764     fn enable_split_irq(&self) -> vm::Result<()> {
765         // Create split irqchip
766         // Only the local APIC is emulated in kernel, both PICs and IOAPIC
767         // are not.
768         let mut cap = kvm_enable_cap {
769             cap: KVM_CAP_SPLIT_IRQCHIP,
770             ..Default::default()
771         };
772         cap.args[0] = NUM_IOAPIC_PINS as u64;
773         self.fd
774             .enable_cap(&cap)
775             .map_err(|e| vm::HypervisorVmError::EnableSplitIrq(e.into()))?;
776         Ok(())
777     }
778 
779     #[cfg(target_arch = "x86_64")]
780     fn enable_sgx_attribute(&self, file: File) -> vm::Result<()> {
781         let mut cap = kvm_enable_cap {
782             cap: KVM_CAP_SGX_ATTRIBUTE,
783             ..Default::default()
784         };
785         cap.args[0] = file.as_raw_fd() as u64;
786         self.fd
787             .enable_cap(&cap)
788             .map_err(|e| vm::HypervisorVmError::EnableSgxAttribute(e.into()))?;
789         Ok(())
790     }
791 
792     /// Retrieve guest clock.
793     #[cfg(target_arch = "x86_64")]
794     fn get_clock(&self) -> vm::Result<ClockData> {
795         Ok(self
796             .fd
797             .get_clock()
798             .map_err(|e| vm::HypervisorVmError::GetClock(e.into()))?
799             .into())
800     }
801 
802     /// Set guest clock.
803     #[cfg(target_arch = "x86_64")]
804     fn set_clock(&self, data: &ClockData) -> vm::Result<()> {
805         let data = (*data).into();
806         self.fd
807             .set_clock(&data)
808             .map_err(|e| vm::HypervisorVmError::SetClock(e.into()))
809     }
810 
811     /// Create a device that is used for passthrough
812     fn create_passthrough_device(&self) -> vm::Result<VfioDeviceFd> {
813         let mut vfio_dev = kvm_create_device {
814             type_: kvm_device_type_KVM_DEV_TYPE_VFIO,
815             fd: 0,
816             flags: 0,
817         };
818 
819         self.create_device(&mut vfio_dev)
820             .map_err(|e| vm::HypervisorVmError::CreatePassthroughDevice(e.into()))
821     }
822 
823     ///
824     /// Start logging dirty pages
825     ///
826     fn start_dirty_log(&self) -> vm::Result<()> {
827         let dirty_log_slots = self.dirty_log_slots.read().unwrap();
828         for (_, s) in dirty_log_slots.iter() {
829             let region = kvm_userspace_memory_region {
830                 slot: s.slot,
831                 guest_phys_addr: s.guest_phys_addr,
832                 memory_size: s.memory_size,
833                 userspace_addr: s.userspace_addr,
834                 flags: KVM_MEM_LOG_DIRTY_PAGES,
835             };
836             // SAFETY: Safe because guest regions are guaranteed not to overlap.
837             unsafe {
838                 self.fd
839                     .set_user_memory_region(region)
840                     .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
841             }
842         }
843 
844         Ok(())
845     }
846 
847     ///
848     /// Stop logging dirty pages
849     ///
850     fn stop_dirty_log(&self) -> vm::Result<()> {
851         let dirty_log_slots = self.dirty_log_slots.read().unwrap();
852         for (_, s) in dirty_log_slots.iter() {
853             let region = kvm_userspace_memory_region {
854                 slot: s.slot,
855                 guest_phys_addr: s.guest_phys_addr,
856                 memory_size: s.memory_size,
857                 userspace_addr: s.userspace_addr,
858                 flags: 0,
859             };
860             // SAFETY: Safe because guest regions are guaranteed not to overlap.
861             unsafe {
862                 self.fd
863                     .set_user_memory_region(region)
864                     .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
865             }
866         }
867 
868         Ok(())
869     }
870 
871     ///
872     /// Get dirty pages bitmap (one bit per page)
873     ///
874     fn get_dirty_log(&self, slot: u32, _base_gpa: u64, memory_size: u64) -> vm::Result<Vec<u64>> {
875         self.fd
876             .get_dirty_log(slot, memory_size as usize)
877             .map_err(|e| vm::HypervisorVmError::GetDirtyLog(e.into()))
878     }
879 
880     ///
881     /// Initialize TDX for this VM
882     ///
883     #[cfg(feature = "tdx")]
884     fn tdx_init(&self, cpuid: &[CpuIdEntry], max_vcpus: u32) -> vm::Result<()> {
885         const TDX_ATTR_SEPT_VE_DISABLE: usize = 28;
886 
887         let mut cpuid: Vec<kvm_bindings::kvm_cpuid_entry2> =
888             cpuid.iter().map(|e| (*e).into()).collect();
889         cpuid.resize(256, kvm_bindings::kvm_cpuid_entry2::default());
890 
891         #[repr(C)]
892         struct TdxInitVm {
893             attributes: u64,
894             max_vcpus: u32,
895             padding: u32,
896             mrconfigid: [u64; 6],
897             mrowner: [u64; 6],
898             mrownerconfig: [u64; 6],
899             cpuid_nent: u32,
900             cpuid_padding: u32,
901             cpuid_entries: [kvm_bindings::kvm_cpuid_entry2; 256],
902         }
903         let data = TdxInitVm {
904             attributes: 1 << TDX_ATTR_SEPT_VE_DISABLE,
905             max_vcpus,
906             padding: 0,
907             mrconfigid: [0; 6],
908             mrowner: [0; 6],
909             mrownerconfig: [0; 6],
910             cpuid_nent: cpuid.len() as u32,
911             cpuid_padding: 0,
912             cpuid_entries: cpuid.as_slice().try_into().unwrap(),
913         };
914 
915         tdx_command(
916             &self.fd.as_raw_fd(),
917             TdxCommand::InitVm,
918             0,
919             &data as *const _ as u64,
920         )
921         .map_err(vm::HypervisorVmError::InitializeTdx)
922     }
923 
924     ///
925     /// Finalize the TDX setup for this VM
926     ///
927     #[cfg(feature = "tdx")]
928     fn tdx_finalize(&self) -> vm::Result<()> {
929         tdx_command(&self.fd.as_raw_fd(), TdxCommand::Finalize, 0, 0)
930             .map_err(vm::HypervisorVmError::FinalizeTdx)
931     }
932 
933     ///
934     /// Initialize memory regions for the TDX VM
935     ///
936     #[cfg(feature = "tdx")]
937     fn tdx_init_memory_region(
938         &self,
939         host_address: u64,
940         guest_address: u64,
941         size: u64,
942         measure: bool,
943     ) -> vm::Result<()> {
944         #[repr(C)]
945         struct TdxInitMemRegion {
946             host_address: u64,
947             guest_address: u64,
948             pages: u64,
949         }
950         let data = TdxInitMemRegion {
951             host_address,
952             guest_address,
953             pages: size / 4096,
954         };
955 
956         tdx_command(
957             &self.fd.as_raw_fd(),
958             TdxCommand::InitMemRegion,
959             u32::from(measure),
960             &data as *const _ as u64,
961         )
962         .map_err(vm::HypervisorVmError::InitMemRegionTdx)
963     }
964 
965     /// Downcast to the underlying KvmVm type
966     fn as_any(&self) -> &dyn Any {
967         self
968     }
969 }
970 
971 #[cfg(feature = "tdx")]
972 fn tdx_command(
973     fd: &RawFd,
974     command: TdxCommand,
975     flags: u32,
976     data: u64,
977 ) -> std::result::Result<(), std::io::Error> {
978     #[repr(C)]
979     struct TdxIoctlCmd {
980         command: TdxCommand,
981         flags: u32,
982         data: u64,
983         error: u64,
984         unused: u64,
985     }
986     let cmd = TdxIoctlCmd {
987         command,
988         flags,
989         data,
990         error: 0,
991         unused: 0,
992     };
993     // SAFETY: FFI call. All input parameters are valid.
994     let ret = unsafe {
995         ioctl_with_val(
996             fd,
997             KVM_MEMORY_ENCRYPT_OP(),
998             &cmd as *const TdxIoctlCmd as std::os::raw::c_ulong,
999         )
1000     };
1001 
1002     if ret < 0 {
1003         return Err(std::io::Error::last_os_error());
1004     }
1005     Ok(())
1006 }
1007 
1008 /// Wrapper over KVM system ioctls.
1009 pub struct KvmHypervisor {
1010     kvm: Kvm,
1011 }
1012 
1013 impl KvmHypervisor {
1014     #[cfg(target_arch = "x86_64")]
1015     ///
1016     /// Retrieve the list of MSRs supported by the hypervisor.
1017     ///
1018     fn get_msr_list(&self) -> hypervisor::Result<MsrList> {
1019         self.kvm
1020             .get_msr_index_list()
1021             .map_err(|e| hypervisor::HypervisorError::GetMsrList(e.into()))
1022     }
1023 }
1024 
1025 /// Enum for KVM related error
1026 #[derive(Debug, Error)]
1027 pub enum KvmError {
1028     #[error("Capability missing: {0:?}")]
1029     CapabilityMissing(Cap),
1030 }
1031 
1032 pub type KvmResult<T> = result::Result<T, KvmError>;
1033 
1034 impl KvmHypervisor {
1035     /// Create a hypervisor based on Kvm
1036     #[allow(clippy::new_ret_no_self)]
1037     pub fn new() -> hypervisor::Result<Arc<dyn hypervisor::Hypervisor>> {
1038         let kvm_obj = Kvm::new().map_err(|e| hypervisor::HypervisorError::VmCreate(e.into()))?;
1039         let api_version = kvm_obj.get_api_version();
1040 
1041         if api_version != kvm_bindings::KVM_API_VERSION as i32 {
1042             return Err(hypervisor::HypervisorError::IncompatibleApiVersion);
1043         }
1044 
1045         Ok(Arc::new(KvmHypervisor { kvm: kvm_obj }))
1046     }
1047 
1048     /// Check if the hypervisor is available
1049     pub fn is_available() -> hypervisor::Result<bool> {
1050         match std::fs::metadata("/dev/kvm") {
1051             Ok(_) => Ok(true),
1052             Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false),
1053             Err(err) => Err(hypervisor::HypervisorError::HypervisorAvailableCheck(
1054                 err.into(),
1055             )),
1056         }
1057     }
1058 }
1059 
1060 /// Implementation of Hypervisor trait for KVM
1061 ///
1062 /// # Examples
1063 ///
1064 /// ```
1065 /// # use hypervisor::kvm::KvmHypervisor;
1066 /// # use std::sync::Arc;
1067 /// let kvm = KvmHypervisor::new().unwrap();
1068 /// let hypervisor = Arc::new(kvm);
1069 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
1070 /// ```
1071 impl hypervisor::Hypervisor for KvmHypervisor {
1072     ///
1073     /// Returns the type of the hypervisor
1074     ///
1075     fn hypervisor_type(&self) -> HypervisorType {
1076         HypervisorType::Kvm
1077     }
1078 
1079     ///
1080     /// Create a Vm of a specific type using the underlying hypervisor, passing memory size
1081     /// Return a hypervisor-agnostic Vm trait object
1082     ///
1083     /// # Examples
1084     ///
1085     /// ```
1086     /// # use hypervisor::kvm::KvmHypervisor;
1087     /// use hypervisor::kvm::KvmVm;
1088     /// let hypervisor = KvmHypervisor::new().unwrap();
1089     /// let vm = hypervisor.create_vm_with_type_and_memory(0).unwrap();
1090     /// ```
1091     fn create_vm_with_type_and_memory(
1092         &self,
1093         vm_type: u64,
1094         #[cfg(feature = "sev_snp")] _mem_size: u64,
1095     ) -> hypervisor::Result<Arc<dyn vm::Vm>> {
1096         self.create_vm_with_type(vm_type)
1097     }
1098 
1099     /// Create a KVM vm object of a specific VM type and return the object as Vm trait object
1100     ///
1101     /// # Examples
1102     ///
1103     /// ```
1104     /// # use hypervisor::kvm::KvmHypervisor;
1105     /// use hypervisor::kvm::KvmVm;
1106     /// let hypervisor = KvmHypervisor::new().unwrap();
1107     /// let vm = hypervisor.create_vm_with_type(0).unwrap();
1108     /// ```
1109     fn create_vm_with_type(&self, vm_type: u64) -> hypervisor::Result<Arc<dyn vm::Vm>> {
1110         let fd: VmFd;
1111         loop {
1112             match self.kvm.create_vm_with_type(vm_type) {
1113                 Ok(res) => fd = res,
1114                 Err(e) => {
1115                     if e.errno() == libc::EINTR {
1116                         // If the error returned is EINTR, which means the
1117                         // ioctl has been interrupted, we have to retry as
1118                         // this can't be considered as a regular error.
1119                         continue;
1120                     } else {
1121                         return Err(hypervisor::HypervisorError::VmCreate(e.into()));
1122                     }
1123                 }
1124             }
1125             break;
1126         }
1127 
1128         let vm_fd = Arc::new(fd);
1129 
1130         #[cfg(target_arch = "x86_64")]
1131         {
1132             let msr_list = self.get_msr_list()?;
1133             let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize;
1134             let mut msrs: Vec<MsrEntry> = vec![
1135                 MsrEntry {
1136                     ..Default::default()
1137                 };
1138                 num_msrs
1139             ];
1140             let indices = msr_list.as_slice();
1141             for (pos, index) in indices.iter().enumerate() {
1142                 msrs[pos].index = *index;
1143             }
1144 
1145             Ok(Arc::new(KvmVm {
1146                 fd: vm_fd,
1147                 msrs,
1148                 dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
1149             }))
1150         }
1151 
1152         #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
1153         {
1154             Ok(Arc::new(KvmVm {
1155                 fd: vm_fd,
1156                 dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
1157             }))
1158         }
1159     }
1160 
1161     /// Create a KVM vm object and return the object as Vm trait object
1162     ///
1163     /// # Examples
1164     ///
1165     /// ```
1166     /// # use hypervisor::kvm::KvmHypervisor;
1167     /// use hypervisor::kvm::KvmVm;
1168     /// let hypervisor = KvmHypervisor::new().unwrap();
1169     /// let vm = hypervisor.create_vm().unwrap();
1170     /// ```
1171     fn create_vm(&self) -> hypervisor::Result<Arc<dyn vm::Vm>> {
1172         #[allow(unused_mut)]
1173         let mut vm_type: u64 = 0; // Create with default platform type
1174 
1175         // When KVM supports Cap::ArmVmIPASize, it is better to get the IPA
1176         // size from the host and use that when creating the VM, which may
1177         // avoid unnecessary VM creation failures.
1178         #[cfg(target_arch = "aarch64")]
1179         if self.kvm.check_extension(Cap::ArmVmIPASize) {
1180             vm_type = self.kvm.get_host_ipa_limit().try_into().unwrap();
1181         }
1182 
1183         self.create_vm_with_type(vm_type)
1184     }
1185 
1186     fn check_required_extensions(&self) -> hypervisor::Result<()> {
1187         check_required_kvm_extensions(&self.kvm)
1188             .map_err(|e| hypervisor::HypervisorError::CheckExtensions(e.into()))
1189     }
1190 
1191     #[cfg(target_arch = "x86_64")]
1192     ///
1193     /// X86 specific call to get the system supported CPUID values.
1194     ///
1195     fn get_supported_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> {
1196         let kvm_cpuid = self
1197             .kvm
1198             .get_supported_cpuid(kvm_bindings::KVM_MAX_CPUID_ENTRIES)
1199             .map_err(|e| hypervisor::HypervisorError::GetCpuId(e.into()))?;
1200 
1201         let v = kvm_cpuid.as_slice().iter().map(|e| (*e).into()).collect();
1202 
1203         Ok(v)
1204     }
1205 
1206     #[cfg(target_arch = "aarch64")]
1207     ///
1208     /// Retrieve AArch64 host maximum IPA size supported by KVM.
1209     ///
1210     fn get_host_ipa_limit(&self) -> i32 {
1211         self.kvm.get_host_ipa_limit()
1212     }
1213 
1214     ///
1215     /// Retrieve TDX capabilities
1216     ///
1217     #[cfg(feature = "tdx")]
1218     fn tdx_capabilities(&self) -> hypervisor::Result<TdxCapabilities> {
1219         let data = TdxCapabilities {
1220             nr_cpuid_configs: TDX_MAX_NR_CPUID_CONFIGS as u32,
1221             ..Default::default()
1222         };
1223 
1224         tdx_command(
1225             &self.kvm.as_raw_fd(),
1226             TdxCommand::Capabilities,
1227             0,
1228             &data as *const _ as u64,
1229         )
1230         .map_err(|e| hypervisor::HypervisorError::TdxCapabilities(e.into()))?;
1231 
1232         Ok(data)
1233     }
1234 
1235     #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
1236     ///
1237     /// Get the number of supported hardware breakpoints
1238     ///
1239     fn get_guest_debug_hw_bps(&self) -> usize {
1240         #[cfg(target_arch = "x86_64")]
1241         {
1242             4
1243         }
1244         #[cfg(target_arch = "aarch64")]
1245         {
1246             self.kvm.get_guest_debug_hw_bps() as usize
1247         }
1248     }
1249 
1250     /// Get maximum number of vCPUs
1251     fn get_max_vcpus(&self) -> u32 {
1252         self.kvm.get_max_vcpus().min(u32::MAX as usize) as u32
1253     }
1254 }
1255 
1256 /// Vcpu struct for KVM
1257 pub struct KvmVcpu {
1258     fd: Arc<Mutex<VcpuFd>>,
1259     #[cfg(target_arch = "x86_64")]
1260     msrs: Vec<MsrEntry>,
1261     vm_ops: Option<Arc<dyn vm::VmOps>>,
1262     #[cfg(target_arch = "x86_64")]
1263     hyperv_synic: AtomicBool,
1264 }
1265 
1266 /// Implementation of Vcpu trait for KVM
1267 ///
1268 /// # Examples
1269 ///
1270 /// ```
1271 /// # use hypervisor::kvm::KvmHypervisor;
1272 /// # use std::sync::Arc;
1273 /// let kvm = KvmHypervisor::new().unwrap();
1274 /// let hypervisor = Arc::new(kvm);
1275 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
1276 /// let vcpu = vm.create_vcpu(0, None).unwrap();
1277 /// ```
1278 impl cpu::Vcpu for KvmVcpu {
1279     ///
1280     /// Returns StandardRegisters with default value set
1281     ///
1282     fn create_standard_regs(&self) -> StandardRegisters {
1283         #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
1284         {
1285             kvm_bindings::kvm_regs::default().into()
1286         }
1287         #[cfg(target_arch = "riscv64")]
1288         {
1289             kvm_bindings::kvm_riscv_core::default().into()
1290         }
1291     }
1292     #[cfg(target_arch = "x86_64")]
1293     ///
1294     /// Returns the vCPU general purpose registers.
1295     ///
1296     fn get_regs(&self) -> cpu::Result<StandardRegisters> {
1297         Ok(self
1298             .fd
1299             .lock()
1300             .unwrap()
1301             .get_regs()
1302             .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))?
1303             .into())
1304     }
1305 
1306     ///
1307     /// Returns the vCPU general purpose registers.
1308     /// The `KVM_GET_REGS` ioctl is not available on AArch64, `KVM_GET_ONE_REG`
1309     /// is used to get registers one by one.
1310     ///
1311     #[cfg(target_arch = "aarch64")]
1312     fn get_regs(&self) -> cpu::Result<StandardRegisters> {
1313         let mut state = kvm_regs::default();
1314         let mut off = offset_of!(user_pt_regs, regs);
1315         // There are 31 user_pt_regs:
1316         // https://elixir.free-electrons.com/linux/v4.14.174/source/arch/arm64/include/uapi/asm/ptrace.h#L72
1317         // These actually are the general-purpose registers of the Armv8-a
1318         // architecture (i.e x0-x30 if used as a 64bit register or w0-30 when used as a 32bit register).
1319         for i in 0..31 {
1320             let mut bytes = [0_u8; 8];
1321             self.fd
1322                 .lock()
1323                 .unwrap()
1324                 .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
1325                 .map_err(|e| cpu::HypervisorCpuError::GetAarchCoreRegister(e.into()))?;
1326             state.regs.regs[i] = u64::from_le_bytes(bytes);
1327             off += std::mem::size_of::<u64>();
1328         }
1329 
1330         // We are now entering the "Other register" section of the ARMv8-a architecture.
1331         // First one, stack pointer.
1332         let off = offset_of!(user_pt_regs, sp);
1333         let mut bytes = [0_u8; 8];
1334         self.fd
1335             .lock()
1336             .unwrap()
1337             .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
1338             .map_err(|e| cpu::HypervisorCpuError::GetAarchCoreRegister(e.into()))?;
1339         state.regs.sp = u64::from_le_bytes(bytes);
1340 
1341         // Second one, the program counter.
1342         let off = offset_of!(user_pt_regs, pc);
1343         let mut bytes = [0_u8; 8];
1344         self.fd
1345             .lock()
1346             .unwrap()
1347             .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
1348             .map_err(|e| cpu::HypervisorCpuError::GetAarchCoreRegister(e.into()))?;
1349         state.regs.pc = u64::from_le_bytes(bytes);
1350 
1351         // Next is the processor state.
1352         let off = offset_of!(user_pt_regs, pstate);
1353         let mut bytes = [0_u8; 8];
1354         self.fd
1355             .lock()
1356             .unwrap()
1357             .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
1358             .map_err(|e| cpu::HypervisorCpuError::GetAarchCoreRegister(e.into()))?;
1359         state.regs.pstate = u64::from_le_bytes(bytes);
1360 
1361         // The stack pointer associated with EL1
1362         let off = offset_of!(kvm_regs, sp_el1);
1363         let mut bytes = [0_u8; 8];
1364         self.fd
1365             .lock()
1366             .unwrap()
1367             .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
1368             .map_err(|e| cpu::HypervisorCpuError::GetAarchCoreRegister(e.into()))?;
1369         state.sp_el1 = u64::from_le_bytes(bytes);
1370 
1371         // Exception Link Register for EL1, when taking an exception to EL1, this register
1372         // holds the address to which to return afterwards.
1373         let off = offset_of!(kvm_regs, elr_el1);
1374         let mut bytes = [0_u8; 8];
1375         self.fd
1376             .lock()
1377             .unwrap()
1378             .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
1379             .map_err(|e| cpu::HypervisorCpuError::GetAarchCoreRegister(e.into()))?;
1380         state.elr_el1 = u64::from_le_bytes(bytes);
1381 
1382         // Saved Program Status Registers, there are 5 of them used in the kernel.
1383         let mut off = offset_of!(kvm_regs, spsr);
1384         for i in 0..KVM_NR_SPSR as usize {
1385             let mut bytes = [0_u8; 8];
1386             self.fd
1387                 .lock()
1388                 .unwrap()
1389                 .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes)
1390                 .map_err(|e| cpu::HypervisorCpuError::GetAarchCoreRegister(e.into()))?;
1391             state.spsr[i] = u64::from_le_bytes(bytes);
1392             off += std::mem::size_of::<u64>();
1393         }
1394 
1395         // Now moving on to floating point registers which are stored in the user_fpsimd_state in the kernel:
1396         // https://elixir.free-electrons.com/linux/v4.9.62/source/arch/arm64/include/uapi/asm/kvm.h#L53
1397         let mut off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, vregs);
1398         for i in 0..32 {
1399             let mut bytes = [0_u8; 16];
1400             self.fd
1401                 .lock()
1402                 .unwrap()
1403                 .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U128, off), &mut bytes)
1404                 .map_err(|e| cpu::HypervisorCpuError::GetAarchCoreRegister(e.into()))?;
1405             state.fp_regs.vregs[i] = u128::from_le_bytes(bytes);
1406             off += mem::size_of::<u128>();
1407         }
1408 
1409         // Floating-point Status Register
1410         let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpsr);
1411         let mut bytes = [0_u8; 4];
1412         self.fd
1413             .lock()
1414             .unwrap()
1415             .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U32, off), &mut bytes)
1416             .map_err(|e| cpu::HypervisorCpuError::GetAarchCoreRegister(e.into()))?;
1417         state.fp_regs.fpsr = u32::from_le_bytes(bytes);
1418 
1419         // Floating-point Control Register
1420         let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpcr);
1421         let mut bytes = [0_u8; 4];
1422         self.fd
1423             .lock()
1424             .unwrap()
1425             .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U32, off), &mut bytes)
1426             .map_err(|e| cpu::HypervisorCpuError::GetAarchCoreRegister(e.into()))?;
1427         state.fp_regs.fpcr = u32::from_le_bytes(bytes);
1428         Ok(state.into())
1429     }
1430 
1431     #[cfg(target_arch = "riscv64")]
1432     ///
1433     /// Returns the RISC-V vCPU core registers.
1434     /// The `KVM_GET_REGS` ioctl is not available on RISC-V 64-bit,
1435     /// `KVM_GET_ONE_REG` is used to get registers one by one.
1436     ///
1437     fn get_regs(&self) -> cpu::Result<StandardRegisters> {
1438         let mut state = kvm_riscv_core::default();
1439 
1440         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, pc);
1441         let mut bytes = [0_u8; 8];
1442         self.fd
1443             .lock()
1444             .unwrap()
1445             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1446             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1447         state.regs.pc = u64::from_le_bytes(bytes);
1448 
1449         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, ra);
1450         let mut bytes = [0_u8; 8];
1451         self.fd
1452             .lock()
1453             .unwrap()
1454             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1455             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1456         state.regs.ra = u64::from_le_bytes(bytes);
1457 
1458         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, sp);
1459         let mut bytes = [0_u8; 8];
1460         self.fd
1461             .lock()
1462             .unwrap()
1463             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1464             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1465         state.regs.sp = u64::from_le_bytes(bytes);
1466 
1467         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, gp);
1468         let mut bytes = [0_u8; 8];
1469         self.fd
1470             .lock()
1471             .unwrap()
1472             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1473             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1474         state.regs.gp = u64::from_le_bytes(bytes);
1475 
1476         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, tp);
1477         let mut bytes = [0_u8; 8];
1478         self.fd
1479             .lock()
1480             .unwrap()
1481             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1482             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1483         state.regs.tp = u64::from_le_bytes(bytes);
1484 
1485         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t0);
1486         let mut bytes = [0_u8; 8];
1487         self.fd
1488             .lock()
1489             .unwrap()
1490             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1491             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1492         state.regs.t0 = u64::from_le_bytes(bytes);
1493 
1494         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t1);
1495         let mut bytes = [0_u8; 8];
1496         self.fd
1497             .lock()
1498             .unwrap()
1499             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1500             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1501         state.regs.t1 = u64::from_le_bytes(bytes);
1502 
1503         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t2);
1504         let mut bytes = [0_u8; 8];
1505         self.fd
1506             .lock()
1507             .unwrap()
1508             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1509             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1510         state.regs.t2 = u64::from_le_bytes(bytes);
1511 
1512         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s0);
1513         let mut bytes = [0_u8; 8];
1514         self.fd
1515             .lock()
1516             .unwrap()
1517             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1518             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1519         state.regs.s0 = u64::from_le_bytes(bytes);
1520 
1521         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s1);
1522         let mut bytes = [0_u8; 8];
1523         self.fd
1524             .lock()
1525             .unwrap()
1526             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1527             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1528         state.regs.s1 = u64::from_le_bytes(bytes);
1529 
1530         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a0);
1531         let mut bytes = [0_u8; 8];
1532         self.fd
1533             .lock()
1534             .unwrap()
1535             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1536             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1537         state.regs.a0 = u64::from_le_bytes(bytes);
1538 
1539         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a1);
1540         let mut bytes = [0_u8; 8];
1541         self.fd
1542             .lock()
1543             .unwrap()
1544             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1545             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1546         state.regs.a1 = u64::from_le_bytes(bytes);
1547 
1548         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a2);
1549         let mut bytes = [0_u8; 8];
1550         self.fd
1551             .lock()
1552             .unwrap()
1553             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1554             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1555         state.regs.a2 = u64::from_le_bytes(bytes);
1556 
1557         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a3);
1558         let mut bytes = [0_u8; 8];
1559         self.fd
1560             .lock()
1561             .unwrap()
1562             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1563             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1564         state.regs.a3 = u64::from_le_bytes(bytes);
1565 
1566         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a4);
1567         let mut bytes = [0_u8; 8];
1568         self.fd
1569             .lock()
1570             .unwrap()
1571             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1572             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1573         state.regs.a4 = u64::from_le_bytes(bytes);
1574 
1575         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a5);
1576         let mut bytes = [0_u8; 8];
1577         self.fd
1578             .lock()
1579             .unwrap()
1580             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1581             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1582         state.regs.a5 = u64::from_le_bytes(bytes);
1583 
1584         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a6);
1585         let mut bytes = [0_u8; 8];
1586         self.fd
1587             .lock()
1588             .unwrap()
1589             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1590             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1591         state.regs.a6 = u64::from_le_bytes(bytes);
1592 
1593         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a7);
1594         let mut bytes = [0_u8; 8];
1595         self.fd
1596             .lock()
1597             .unwrap()
1598             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1599             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1600         state.regs.a7 = u64::from_le_bytes(bytes);
1601 
1602         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s2);
1603         let mut bytes = [0_u8; 8];
1604         self.fd
1605             .lock()
1606             .unwrap()
1607             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1608             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1609         state.regs.s2 = u64::from_le_bytes(bytes);
1610 
1611         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s3);
1612         let mut bytes = [0_u8; 8];
1613         self.fd
1614             .lock()
1615             .unwrap()
1616             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1617             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1618         state.regs.s3 = u64::from_le_bytes(bytes);
1619 
1620         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s4);
1621         let mut bytes = [0_u8; 8];
1622         self.fd
1623             .lock()
1624             .unwrap()
1625             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1626             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1627         state.regs.s4 = u64::from_le_bytes(bytes);
1628 
1629         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s5);
1630         let mut bytes = [0_u8; 8];
1631         self.fd
1632             .lock()
1633             .unwrap()
1634             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1635             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1636         state.regs.s5 = u64::from_le_bytes(bytes);
1637 
1638         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s6);
1639         let mut bytes = [0_u8; 8];
1640         self.fd
1641             .lock()
1642             .unwrap()
1643             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1644             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1645         state.regs.s6 = u64::from_le_bytes(bytes);
1646 
1647         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s7);
1648         let mut bytes = [0_u8; 8];
1649         self.fd
1650             .lock()
1651             .unwrap()
1652             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1653             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1654         state.regs.s7 = u64::from_le_bytes(bytes);
1655 
1656         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s8);
1657         let mut bytes = [0_u8; 8];
1658         self.fd
1659             .lock()
1660             .unwrap()
1661             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1662             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1663         state.regs.s8 = u64::from_le_bytes(bytes);
1664 
1665         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s9);
1666         let mut bytes = [0_u8; 8];
1667         self.fd
1668             .lock()
1669             .unwrap()
1670             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1671             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1672         state.regs.s9 = u64::from_le_bytes(bytes);
1673 
1674         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s10);
1675         let mut bytes = [0_u8; 8];
1676         self.fd
1677             .lock()
1678             .unwrap()
1679             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1680             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1681         state.regs.s10 = u64::from_le_bytes(bytes);
1682 
1683         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s11);
1684         let mut bytes = [0_u8; 8];
1685         self.fd
1686             .lock()
1687             .unwrap()
1688             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1689             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1690         state.regs.s11 = u64::from_le_bytes(bytes);
1691 
1692         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t3);
1693         let mut bytes = [0_u8; 8];
1694         self.fd
1695             .lock()
1696             .unwrap()
1697             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1698             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1699         state.regs.t3 = u64::from_le_bytes(bytes);
1700 
1701         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t4);
1702         let mut bytes = [0_u8; 8];
1703         self.fd
1704             .lock()
1705             .unwrap()
1706             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1707             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1708         state.regs.t4 = u64::from_le_bytes(bytes);
1709 
1710         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t5);
1711         let mut bytes = [0_u8; 8];
1712         self.fd
1713             .lock()
1714             .unwrap()
1715             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1716             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1717         state.regs.t5 = u64::from_le_bytes(bytes);
1718 
1719         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t6);
1720         let mut bytes = [0_u8; 8];
1721         self.fd
1722             .lock()
1723             .unwrap()
1724             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1725             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1726         state.regs.t6 = u64::from_le_bytes(bytes);
1727 
1728         let off = offset_of!(kvm_riscv_core, mode);
1729         let mut bytes = [0_u8; 8];
1730         self.fd
1731             .lock()
1732             .unwrap()
1733             .get_one_reg(riscv64_reg_id!(KVM_REG_RISCV_CORE, off), &mut bytes)
1734             .map_err(|e| cpu::HypervisorCpuError::GetRiscvCoreRegister(e.into()))?;
1735         state.mode = u64::from_le_bytes(bytes);
1736 
1737         Ok(state.into())
1738     }
1739 
1740     #[cfg(target_arch = "x86_64")]
1741     ///
1742     /// Sets the vCPU general purpose registers using the `KVM_SET_REGS` ioctl.
1743     ///
1744     fn set_regs(&self, regs: &StandardRegisters) -> cpu::Result<()> {
1745         let regs = (*regs).into();
1746         self.fd
1747             .lock()
1748             .unwrap()
1749             .set_regs(&regs)
1750             .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into()))
1751     }
1752 
1753     ///
1754     /// Sets the vCPU general purpose registers.
1755     /// The `KVM_SET_REGS` ioctl is not available on AArch64, `KVM_SET_ONE_REG`
1756     /// is used to set registers one by one.
1757     ///
1758     #[cfg(target_arch = "aarch64")]
1759     fn set_regs(&self, state: &StandardRegisters) -> cpu::Result<()> {
1760         // The function follows the exact identical order from `state`. Look there
1761         // for some additional info on registers.
1762         let kvm_regs_state: kvm_regs = (*state).into();
1763         let mut off = offset_of!(user_pt_regs, regs);
1764         for i in 0..31 {
1765             self.fd
1766                 .lock()
1767                 .unwrap()
1768                 .set_one_reg(
1769                     arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
1770                     &kvm_regs_state.regs.regs[i].to_le_bytes(),
1771                 )
1772                 .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
1773             off += std::mem::size_of::<u64>();
1774         }
1775 
1776         let off = offset_of!(user_pt_regs, sp);
1777         self.fd
1778             .lock()
1779             .unwrap()
1780             .set_one_reg(
1781                 arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
1782                 &kvm_regs_state.regs.sp.to_le_bytes(),
1783             )
1784             .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
1785 
1786         let off = offset_of!(user_pt_regs, pc);
1787         self.fd
1788             .lock()
1789             .unwrap()
1790             .set_one_reg(
1791                 arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
1792                 &kvm_regs_state.regs.pc.to_le_bytes(),
1793             )
1794             .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
1795 
1796         let off = offset_of!(user_pt_regs, pstate);
1797         self.fd
1798             .lock()
1799             .unwrap()
1800             .set_one_reg(
1801                 arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
1802                 &kvm_regs_state.regs.pstate.to_le_bytes(),
1803             )
1804             .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
1805 
1806         let off = offset_of!(kvm_regs, sp_el1);
1807         self.fd
1808             .lock()
1809             .unwrap()
1810             .set_one_reg(
1811                 arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
1812                 &kvm_regs_state.sp_el1.to_le_bytes(),
1813             )
1814             .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
1815 
1816         let off = offset_of!(kvm_regs, elr_el1);
1817         self.fd
1818             .lock()
1819             .unwrap()
1820             .set_one_reg(
1821                 arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
1822                 &kvm_regs_state.elr_el1.to_le_bytes(),
1823             )
1824             .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
1825 
1826         let mut off = offset_of!(kvm_regs, spsr);
1827         for i in 0..KVM_NR_SPSR as usize {
1828             self.fd
1829                 .lock()
1830                 .unwrap()
1831                 .set_one_reg(
1832                     arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
1833                     &kvm_regs_state.spsr[i].to_le_bytes(),
1834                 )
1835                 .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
1836             off += std::mem::size_of::<u64>();
1837         }
1838 
1839         let mut off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, vregs);
1840         for i in 0..32 {
1841             self.fd
1842                 .lock()
1843                 .unwrap()
1844                 .set_one_reg(
1845                     arm64_core_reg_id!(KVM_REG_SIZE_U128, off),
1846                     &kvm_regs_state.fp_regs.vregs[i].to_le_bytes(),
1847                 )
1848                 .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
1849             off += mem::size_of::<u128>();
1850         }
1851 
1852         let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpsr);
1853         self.fd
1854             .lock()
1855             .unwrap()
1856             .set_one_reg(
1857                 arm64_core_reg_id!(KVM_REG_SIZE_U32, off),
1858                 &kvm_regs_state.fp_regs.fpsr.to_le_bytes(),
1859             )
1860             .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
1861 
1862         let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpcr);
1863         self.fd
1864             .lock()
1865             .unwrap()
1866             .set_one_reg(
1867                 arm64_core_reg_id!(KVM_REG_SIZE_U32, off),
1868                 &kvm_regs_state.fp_regs.fpcr.to_le_bytes(),
1869             )
1870             .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
1871         Ok(())
1872     }
1873 
1874     #[cfg(target_arch = "riscv64")]
1875     ///
1876     /// Sets the RISC-V vCPU core registers.
1877     /// The `KVM_SET_REGS` ioctl is not available on RISC-V 64-bit,
1878     /// `KVM_SET_ONE_REG` is used to set registers one by one.
1879     ///
1880     fn set_regs(&self, state: &StandardRegisters) -> cpu::Result<()> {
1881         // The function follows the exact identical order from `state`. Look there
1882         // for some additional info on registers.
1883         let kvm_regs_state: kvm_riscv_core = (*state).into();
1884 
1885         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, pc);
1886         self.fd
1887             .lock()
1888             .unwrap()
1889             .set_one_reg(
1890                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1891                 &kvm_regs_state.regs.pc.to_le_bytes(),
1892             )
1893             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1894 
1895         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, ra);
1896         self.fd
1897             .lock()
1898             .unwrap()
1899             .set_one_reg(
1900                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1901                 &kvm_regs_state.regs.ra.to_le_bytes(),
1902             )
1903             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1904 
1905         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, sp);
1906         self.fd
1907             .lock()
1908             .unwrap()
1909             .set_one_reg(
1910                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1911                 &kvm_regs_state.regs.sp.to_le_bytes(),
1912             )
1913             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1914 
1915         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, gp);
1916         self.fd
1917             .lock()
1918             .unwrap()
1919             .set_one_reg(
1920                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1921                 &kvm_regs_state.regs.gp.to_le_bytes(),
1922             )
1923             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1924 
1925         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, tp);
1926         self.fd
1927             .lock()
1928             .unwrap()
1929             .set_one_reg(
1930                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1931                 &kvm_regs_state.regs.tp.to_le_bytes(),
1932             )
1933             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1934 
1935         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t0);
1936         self.fd
1937             .lock()
1938             .unwrap()
1939             .set_one_reg(
1940                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1941                 &kvm_regs_state.regs.t0.to_le_bytes(),
1942             )
1943             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1944 
1945         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t1);
1946         self.fd
1947             .lock()
1948             .unwrap()
1949             .set_one_reg(
1950                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1951                 &kvm_regs_state.regs.t1.to_le_bytes(),
1952             )
1953             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1954 
1955         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t2);
1956         self.fd
1957             .lock()
1958             .unwrap()
1959             .set_one_reg(
1960                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1961                 &kvm_regs_state.regs.t2.to_le_bytes(),
1962             )
1963             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1964 
1965         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s0);
1966         self.fd
1967             .lock()
1968             .unwrap()
1969             .set_one_reg(
1970                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1971                 &kvm_regs_state.regs.s0.to_le_bytes(),
1972             )
1973             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1974 
1975         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s1);
1976         self.fd
1977             .lock()
1978             .unwrap()
1979             .set_one_reg(
1980                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1981                 &kvm_regs_state.regs.s1.to_le_bytes(),
1982             )
1983             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1984 
1985         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a0);
1986         self.fd
1987             .lock()
1988             .unwrap()
1989             .set_one_reg(
1990                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
1991                 &kvm_regs_state.regs.a0.to_le_bytes(),
1992             )
1993             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
1994 
1995         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a1);
1996         self.fd
1997             .lock()
1998             .unwrap()
1999             .set_one_reg(
2000                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2001                 &kvm_regs_state.regs.a1.to_le_bytes(),
2002             )
2003             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2004 
2005         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a2);
2006         self.fd
2007             .lock()
2008             .unwrap()
2009             .set_one_reg(
2010                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2011                 &kvm_regs_state.regs.a2.to_le_bytes(),
2012             )
2013             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2014 
2015         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a3);
2016         self.fd
2017             .lock()
2018             .unwrap()
2019             .set_one_reg(
2020                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2021                 &kvm_regs_state.regs.a3.to_le_bytes(),
2022             )
2023             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2024 
2025         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a4);
2026         self.fd
2027             .lock()
2028             .unwrap()
2029             .set_one_reg(
2030                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2031                 &kvm_regs_state.regs.a4.to_le_bytes(),
2032             )
2033             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2034 
2035         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a5);
2036         self.fd
2037             .lock()
2038             .unwrap()
2039             .set_one_reg(
2040                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2041                 &kvm_regs_state.regs.a5.to_le_bytes(),
2042             )
2043             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2044 
2045         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a6);
2046         self.fd
2047             .lock()
2048             .unwrap()
2049             .set_one_reg(
2050                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2051                 &kvm_regs_state.regs.a6.to_le_bytes(),
2052             )
2053             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2054 
2055         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, a7);
2056         self.fd
2057             .lock()
2058             .unwrap()
2059             .set_one_reg(
2060                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2061                 &kvm_regs_state.regs.a7.to_le_bytes(),
2062             )
2063             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2064 
2065         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s2);
2066         self.fd
2067             .lock()
2068             .unwrap()
2069             .set_one_reg(
2070                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2071                 &kvm_regs_state.regs.s2.to_le_bytes(),
2072             )
2073             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2074 
2075         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s3);
2076         self.fd
2077             .lock()
2078             .unwrap()
2079             .set_one_reg(
2080                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2081                 &kvm_regs_state.regs.s3.to_le_bytes(),
2082             )
2083             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2084 
2085         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s4);
2086         self.fd
2087             .lock()
2088             .unwrap()
2089             .set_one_reg(
2090                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2091                 &kvm_regs_state.regs.s4.to_le_bytes(),
2092             )
2093             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2094 
2095         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s5);
2096         self.fd
2097             .lock()
2098             .unwrap()
2099             .set_one_reg(
2100                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2101                 &kvm_regs_state.regs.s5.to_le_bytes(),
2102             )
2103             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2104 
2105         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s6);
2106         self.fd
2107             .lock()
2108             .unwrap()
2109             .set_one_reg(
2110                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2111                 &kvm_regs_state.regs.s6.to_le_bytes(),
2112             )
2113             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2114 
2115         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s7);
2116         self.fd
2117             .lock()
2118             .unwrap()
2119             .set_one_reg(
2120                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2121                 &kvm_regs_state.regs.s7.to_le_bytes(),
2122             )
2123             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2124 
2125         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s8);
2126         self.fd
2127             .lock()
2128             .unwrap()
2129             .set_one_reg(
2130                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2131                 &kvm_regs_state.regs.s8.to_le_bytes(),
2132             )
2133             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2134 
2135         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s9);
2136         self.fd
2137             .lock()
2138             .unwrap()
2139             .set_one_reg(
2140                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2141                 &kvm_regs_state.regs.s9.to_le_bytes(),
2142             )
2143             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2144 
2145         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s10);
2146         self.fd
2147             .lock()
2148             .unwrap()
2149             .set_one_reg(
2150                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2151                 &kvm_regs_state.regs.s10.to_le_bytes(),
2152             )
2153             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2154 
2155         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, s11);
2156         self.fd
2157             .lock()
2158             .unwrap()
2159             .set_one_reg(
2160                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2161                 &kvm_regs_state.regs.s11.to_le_bytes(),
2162             )
2163             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2164 
2165         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t3);
2166         self.fd
2167             .lock()
2168             .unwrap()
2169             .set_one_reg(
2170                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2171                 &kvm_regs_state.regs.t3.to_le_bytes(),
2172             )
2173             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2174 
2175         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t4);
2176         self.fd
2177             .lock()
2178             .unwrap()
2179             .set_one_reg(
2180                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2181                 &kvm_regs_state.regs.t4.to_le_bytes(),
2182             )
2183             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2184 
2185         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t5);
2186         self.fd
2187             .lock()
2188             .unwrap()
2189             .set_one_reg(
2190                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2191                 &kvm_regs_state.regs.t5.to_le_bytes(),
2192             )
2193             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2194 
2195         let off = offset_of!(kvm_riscv_core, regs, user_regs_struct, t6);
2196         self.fd
2197             .lock()
2198             .unwrap()
2199             .set_one_reg(
2200                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2201                 &kvm_regs_state.regs.t6.to_le_bytes(),
2202             )
2203             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2204 
2205         let off = offset_of!(kvm_riscv_core, mode);
2206         self.fd
2207             .lock()
2208             .unwrap()
2209             .set_one_reg(
2210                 riscv64_reg_id!(KVM_REG_RISCV_CORE, off),
2211                 &kvm_regs_state.mode.to_le_bytes(),
2212             )
2213             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2214 
2215         Ok(())
2216     }
2217 
2218     #[cfg(target_arch = "x86_64")]
2219     ///
2220     /// Returns the vCPU special registers.
2221     ///
2222     fn get_sregs(&self) -> cpu::Result<SpecialRegisters> {
2223         Ok(self
2224             .fd
2225             .lock()
2226             .unwrap()
2227             .get_sregs()
2228             .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))?
2229             .into())
2230     }
2231 
2232     #[cfg(target_arch = "x86_64")]
2233     ///
2234     /// Sets the vCPU special registers using the `KVM_SET_SREGS` ioctl.
2235     ///
2236     fn set_sregs(&self, sregs: &SpecialRegisters) -> cpu::Result<()> {
2237         let sregs = (*sregs).into();
2238         self.fd
2239             .lock()
2240             .unwrap()
2241             .set_sregs(&sregs)
2242             .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into()))
2243     }
2244 
2245     #[cfg(target_arch = "x86_64")]
2246     ///
2247     /// Returns the floating point state (FPU) from the vCPU.
2248     ///
2249     fn get_fpu(&self) -> cpu::Result<FpuState> {
2250         Ok(self
2251             .fd
2252             .lock()
2253             .unwrap()
2254             .get_fpu()
2255             .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))?
2256             .into())
2257     }
2258 
2259     #[cfg(target_arch = "x86_64")]
2260     ///
2261     /// Set the floating point state (FPU) of a vCPU using the `KVM_SET_FPU` ioctl.
2262     ///
2263     fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> {
2264         let fpu: kvm_bindings::kvm_fpu = (*fpu).clone().into();
2265         self.fd
2266             .lock()
2267             .unwrap()
2268             .set_fpu(&fpu)
2269             .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into()))
2270     }
2271 
2272     #[cfg(target_arch = "x86_64")]
2273     ///
2274     /// X86 specific call to setup the CPUID registers.
2275     ///
2276     fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> cpu::Result<()> {
2277         let cpuid: Vec<kvm_bindings::kvm_cpuid_entry2> =
2278             cpuid.iter().map(|e| (*e).into()).collect();
2279         let kvm_cpuid = <CpuId>::from_entries(&cpuid)
2280             .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?;
2281 
2282         self.fd
2283             .lock()
2284             .unwrap()
2285             .set_cpuid2(&kvm_cpuid)
2286             .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
2287     }
2288 
2289     #[cfg(target_arch = "x86_64")]
2290     ///
2291     /// X86 specific call to enable HyperV SynIC
2292     ///
2293     fn enable_hyperv_synic(&self) -> cpu::Result<()> {
2294         // Update the information about Hyper-V SynIC being enabled and
2295         // emulated as it will influence later which MSRs should be saved.
2296         self.hyperv_synic.store(true, Ordering::Release);
2297 
2298         let cap = kvm_enable_cap {
2299             cap: KVM_CAP_HYPERV_SYNIC,
2300             ..Default::default()
2301         };
2302         self.fd
2303             .lock()
2304             .unwrap()
2305             .enable_cap(&cap)
2306             .map_err(|e| cpu::HypervisorCpuError::EnableHyperVSyncIc(e.into()))
2307     }
2308 
2309     ///
2310     /// X86 specific call to retrieve the CPUID registers.
2311     ///
2312     #[cfg(target_arch = "x86_64")]
2313     fn get_cpuid2(&self, num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
2314         let kvm_cpuid = self
2315             .fd
2316             .lock()
2317             .unwrap()
2318             .get_cpuid2(num_entries)
2319             .map_err(|e| cpu::HypervisorCpuError::GetCpuid(e.into()))?;
2320 
2321         let v = kvm_cpuid.as_slice().iter().map(|e| (*e).into()).collect();
2322 
2323         Ok(v)
2324     }
2325 
2326     #[cfg(target_arch = "x86_64")]
2327     ///
2328     /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
2329     ///
2330     fn get_lapic(&self) -> cpu::Result<LapicState> {
2331         Ok(self
2332             .fd
2333             .lock()
2334             .unwrap()
2335             .get_lapic()
2336             .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))?
2337             .into())
2338     }
2339 
2340     #[cfg(target_arch = "x86_64")]
2341     ///
2342     /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
2343     ///
2344     fn set_lapic(&self, klapic: &LapicState) -> cpu::Result<()> {
2345         let klapic: kvm_bindings::kvm_lapic_state = (*klapic).clone().into();
2346         self.fd
2347             .lock()
2348             .unwrap()
2349             .set_lapic(&klapic)
2350             .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into()))
2351     }
2352 
2353     #[cfg(target_arch = "x86_64")]
2354     ///
2355     /// Returns the model-specific registers (MSR) for this vCPU.
2356     ///
2357     fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> {
2358         let kvm_msrs: Vec<kvm_msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
2359         let mut kvm_msrs = MsrEntries::from_entries(&kvm_msrs).unwrap();
2360         let succ = self
2361             .fd
2362             .lock()
2363             .unwrap()
2364             .get_msrs(&mut kvm_msrs)
2365             .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
2366 
2367         msrs[..succ].copy_from_slice(
2368             &kvm_msrs.as_slice()[..succ]
2369                 .iter()
2370                 .map(|e| (*e).into())
2371                 .collect::<Vec<MsrEntry>>(),
2372         );
2373 
2374         Ok(succ)
2375     }
2376 
2377     #[cfg(target_arch = "x86_64")]
2378     ///
2379     /// Setup the model-specific registers (MSR) for this vCPU.
2380     /// Returns the number of MSR entries actually written.
2381     ///
2382     fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> {
2383         let kvm_msrs: Vec<kvm_msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
2384         let kvm_msrs = MsrEntries::from_entries(&kvm_msrs).unwrap();
2385         self.fd
2386             .lock()
2387             .unwrap()
2388             .set_msrs(&kvm_msrs)
2389             .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
2390     }
2391 
2392     ///
2393     /// Returns the vcpu's current "multiprocessing state".
2394     ///
2395     fn get_mp_state(&self) -> cpu::Result<MpState> {
2396         Ok(self
2397             .fd
2398             .lock()
2399             .unwrap()
2400             .get_mp_state()
2401             .map_err(|e| cpu::HypervisorCpuError::GetMpState(e.into()))?
2402             .into())
2403     }
2404 
2405     ///
2406     /// Sets the vcpu's current "multiprocessing state".
2407     ///
2408     fn set_mp_state(&self, mp_state: MpState) -> cpu::Result<()> {
2409         self.fd
2410             .lock()
2411             .unwrap()
2412             .set_mp_state(mp_state.into())
2413             .map_err(|e| cpu::HypervisorCpuError::SetMpState(e.into()))
2414     }
2415 
2416     #[cfg(target_arch = "x86_64")]
2417     ///
2418     /// Translates guest virtual address to guest physical address using the `KVM_TRANSLATE` ioctl.
2419     ///
2420     fn translate_gva(&self, gva: u64, _flags: u64) -> cpu::Result<(u64, u32)> {
2421         let tr = self
2422             .fd
2423             .lock()
2424             .unwrap()
2425             .translate_gva(gva)
2426             .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?;
2427         // tr.valid is set if the GVA is mapped to valid GPA.
2428         match tr.valid {
2429             0 => Err(cpu::HypervisorCpuError::TranslateVirtualAddress(anyhow!(
2430                 "Invalid GVA: {:#x}",
2431                 gva
2432             ))),
2433             _ => Ok((tr.physical_address, 0)),
2434         }
2435     }
2436 
2437     ///
2438     /// Triggers the running of the current virtual CPU returning an exit reason.
2439     ///
2440     fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> {
2441         match self.fd.lock().unwrap().run() {
2442             Ok(run) => match run {
2443                 #[cfg(target_arch = "x86_64")]
2444                 VcpuExit::IoIn(addr, data) => {
2445                     if let Some(vm_ops) = &self.vm_ops {
2446                         return vm_ops
2447                             .pio_read(addr.into(), data)
2448                             .map(|_| cpu::VmExit::Ignore)
2449                             .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()));
2450                     }
2451 
2452                     Ok(cpu::VmExit::Ignore)
2453                 }
2454                 #[cfg(target_arch = "x86_64")]
2455                 VcpuExit::IoOut(addr, data) => {
2456                     if let Some(vm_ops) = &self.vm_ops {
2457                         return vm_ops
2458                             .pio_write(addr.into(), data)
2459                             .map(|_| cpu::VmExit::Ignore)
2460                             .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()));
2461                     }
2462 
2463                     Ok(cpu::VmExit::Ignore)
2464                 }
2465                 #[cfg(target_arch = "x86_64")]
2466                 VcpuExit::IoapicEoi(vector) => Ok(cpu::VmExit::IoapicEoi(vector)),
2467                 #[cfg(target_arch = "x86_64")]
2468                 VcpuExit::Shutdown | VcpuExit::Hlt => Ok(cpu::VmExit::Reset),
2469 
2470                 #[cfg(target_arch = "aarch64")]
2471                 VcpuExit::SystemEvent(event_type, flags) => {
2472                     use kvm_bindings::{KVM_SYSTEM_EVENT_RESET, KVM_SYSTEM_EVENT_SHUTDOWN};
2473                     // On Aarch64, when the VM is shutdown, run() returns
2474                     // VcpuExit::SystemEvent with reason KVM_SYSTEM_EVENT_SHUTDOWN
2475                     if event_type == KVM_SYSTEM_EVENT_RESET {
2476                         Ok(cpu::VmExit::Reset)
2477                     } else if event_type == KVM_SYSTEM_EVENT_SHUTDOWN {
2478                         Ok(cpu::VmExit::Shutdown)
2479                     } else {
2480                         Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
2481                             "Unexpected system event with type 0x{:x}, flags 0x{:x?}",
2482                             event_type,
2483                             flags
2484                         )))
2485                     }
2486                 }
2487 
2488                 VcpuExit::MmioRead(addr, data) => {
2489                     if let Some(vm_ops) = &self.vm_ops {
2490                         return vm_ops
2491                             .mmio_read(addr, data)
2492                             .map(|_| cpu::VmExit::Ignore)
2493                             .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()));
2494                     }
2495 
2496                     Ok(cpu::VmExit::Ignore)
2497                 }
2498                 VcpuExit::MmioWrite(addr, data) => {
2499                     if let Some(vm_ops) = &self.vm_ops {
2500                         return vm_ops
2501                             .mmio_write(addr, data)
2502                             .map(|_| cpu::VmExit::Ignore)
2503                             .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()));
2504                     }
2505 
2506                     Ok(cpu::VmExit::Ignore)
2507                 }
2508                 VcpuExit::Hyperv => Ok(cpu::VmExit::Hyperv),
2509                 #[cfg(feature = "tdx")]
2510                 VcpuExit::Unsupported(KVM_EXIT_TDX) => Ok(cpu::VmExit::Tdx),
2511                 VcpuExit::Debug(_) => Ok(cpu::VmExit::Debug),
2512 
2513                 r => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
2514                     "Unexpected exit reason on vcpu run: {:?}",
2515                     r
2516                 ))),
2517             },
2518 
2519             Err(ref e) => match e.errno() {
2520                 libc::EAGAIN | libc::EINTR => Ok(cpu::VmExit::Ignore),
2521                 _ => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
2522                     "VCPU error {:?}",
2523                     e
2524                 ))),
2525             },
2526         }
2527     }
2528 
2529     #[cfg(target_arch = "x86_64")]
2530     ///
2531     /// Let the guest know that it has been paused, which prevents from
2532     /// potential soft lockups when being resumed.
2533     ///
2534     fn notify_guest_clock_paused(&self) -> cpu::Result<()> {
2535         if let Err(e) = self.fd.lock().unwrap().kvmclock_ctrl() {
2536             // Linux kernel returns -EINVAL if the PV clock isn't yet initialised
2537             // which could be because we're still in firmware or the guest doesn't
2538             // use KVM clock.
2539             if e.errno() != libc::EINVAL {
2540                 return Err(cpu::HypervisorCpuError::NotifyGuestClockPaused(e.into()));
2541             }
2542         }
2543 
2544         Ok(())
2545     }
2546 
2547     #[cfg(not(target_arch = "riscv64"))]
2548     ///
2549     /// Sets debug registers to set hardware breakpoints and/or enable single step.
2550     ///
2551     fn set_guest_debug(
2552         &self,
2553         addrs: &[vm_memory::GuestAddress],
2554         singlestep: bool,
2555     ) -> cpu::Result<()> {
2556         let mut dbg = kvm_guest_debug {
2557             #[cfg(target_arch = "x86_64")]
2558             control: KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP,
2559             #[cfg(target_arch = "aarch64")]
2560             control: KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW,
2561             ..Default::default()
2562         };
2563         if singlestep {
2564             dbg.control |= KVM_GUESTDBG_SINGLESTEP;
2565         }
2566 
2567         // Set the debug registers.
2568         // Here we assume that the number of addresses do not exceed what
2569         // `Hypervisor::get_guest_debug_hw_bps()` specifies.
2570         #[cfg(target_arch = "x86_64")]
2571         {
2572             // Set bits 9 and 10.
2573             // bit 9: GE (global exact breakpoint enable) flag.
2574             // bit 10: always 1.
2575             dbg.arch.debugreg[7] = 0x0600;
2576 
2577             for (i, addr) in addrs.iter().enumerate() {
2578                 dbg.arch.debugreg[i] = addr.0;
2579                 // Set global breakpoint enable flag
2580                 dbg.arch.debugreg[7] |= 2 << (i * 2);
2581             }
2582         }
2583         #[cfg(target_arch = "aarch64")]
2584         {
2585             for (i, addr) in addrs.iter().enumerate() {
2586                 // DBGBCR_EL1 (Debug Breakpoint Control Registers, D13.3.2):
2587                 // bit 0: 1 (Enabled)
2588                 // bit 1~2: 0b11 (PMC = EL1/EL0)
2589                 // bit 5~8: 0b1111 (BAS = AArch64)
2590                 // others: 0
2591                 dbg.arch.dbg_bcr[i] = 0b1u64 | 0b110u64 | 0b1_1110_0000u64;
2592                 // DBGBVR_EL1 (Debug Breakpoint Value Registers, D13.3.3):
2593                 // bit 2~52: VA[2:52]
2594                 dbg.arch.dbg_bvr[i] = (!0u64 >> 11) & addr.0;
2595             }
2596         }
2597         self.fd
2598             .lock()
2599             .unwrap()
2600             .set_guest_debug(&dbg)
2601             .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into()))
2602     }
2603 
2604     #[cfg(target_arch = "aarch64")]
2605     fn vcpu_init(&self, kvi: &VcpuInit) -> cpu::Result<()> {
2606         self.fd
2607             .lock()
2608             .unwrap()
2609             .vcpu_init(kvi)
2610             .map_err(|e| cpu::HypervisorCpuError::VcpuInit(e.into()))
2611     }
2612 
2613     #[cfg(target_arch = "aarch64")]
2614     fn vcpu_finalize(&self, feature: i32) -> cpu::Result<()> {
2615         self.fd
2616             .lock()
2617             .unwrap()
2618             .vcpu_finalize(&feature)
2619             .map_err(|e| cpu::HypervisorCpuError::VcpuFinalize(e.into()))
2620     }
2621 
2622     #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
2623     ///
2624     /// Gets a list of the guest registers that are supported for the
2625     /// KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
2626     ///
2627     fn get_reg_list(&self, reg_list: &mut RegList) -> cpu::Result<()> {
2628         self.fd
2629             .lock()
2630             .unwrap()
2631             .get_reg_list(reg_list)
2632             .map_err(|e| cpu::HypervisorCpuError::GetRegList(e.into()))
2633     }
2634 
2635     ///
2636     /// Gets the value of a system register
2637     ///
2638     #[cfg(target_arch = "aarch64")]
2639     fn get_sys_reg(&self, sys_reg: u32) -> cpu::Result<u64> {
2640         //
2641         // Arm Architecture Reference Manual defines the encoding of
2642         // AArch64 system registers, see
2643         // https://developer.arm.com/documentation/ddi0487 (chapter D12).
2644         // While KVM defines another ID for each AArch64 system register,
2645         // which is used in calling `KVM_G/SET_ONE_REG` to access a system
2646         // register of a guest.
2647         // A mapping exists between the Arm standard encoding and the KVM ID.
2648         // This function takes the standard u32 ID as input parameter, converts
2649         // it to the corresponding KVM ID, and call `KVM_GET_ONE_REG` API to
2650         // get the value of the system parameter.
2651         //
2652         let id: u64 = KVM_REG_ARM64
2653             | KVM_REG_SIZE_U64
2654             | KVM_REG_ARM64_SYSREG as u64
2655             | ((((sys_reg) >> 5)
2656                 & (KVM_REG_ARM64_SYSREG_OP0_MASK
2657                     | KVM_REG_ARM64_SYSREG_OP1_MASK
2658                     | KVM_REG_ARM64_SYSREG_CRN_MASK
2659                     | KVM_REG_ARM64_SYSREG_CRM_MASK
2660                     | KVM_REG_ARM64_SYSREG_OP2_MASK)) as u64);
2661         let mut bytes = [0_u8; 8];
2662         self.fd
2663             .lock()
2664             .unwrap()
2665             .get_one_reg(id, &mut bytes)
2666             .map_err(|e| cpu::HypervisorCpuError::GetSysRegister(e.into()))?;
2667         Ok(u64::from_le_bytes(bytes))
2668     }
2669 
2670     ///
2671     /// Gets the value of a non-core register
2672     ///
2673     #[cfg(target_arch = "riscv64")]
2674     fn get_non_core_reg(&self, _non_core_reg: u32) -> cpu::Result<u64> {
2675         unimplemented!()
2676     }
2677 
2678     ///
2679     /// Configure core registers for a given CPU.
2680     ///
2681     #[cfg(target_arch = "aarch64")]
2682     fn setup_regs(&self, cpu_id: u8, boot_ip: u64, fdt_start: u64) -> cpu::Result<()> {
2683         #[allow(non_upper_case_globals)]
2684         // PSR (Processor State Register) bits.
2685         // Taken from arch/arm64/include/uapi/asm/ptrace.h.
2686         const PSR_MODE_EL1h: u64 = 0x0000_0005;
2687         const PSR_F_BIT: u64 = 0x0000_0040;
2688         const PSR_I_BIT: u64 = 0x0000_0080;
2689         const PSR_A_BIT: u64 = 0x0000_0100;
2690         const PSR_D_BIT: u64 = 0x0000_0200;
2691         // Taken from arch/arm64/kvm/inject_fault.c.
2692         const PSTATE_FAULT_BITS_64: u64 =
2693             PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | PSR_I_BIT | PSR_D_BIT;
2694 
2695         let kreg_off = offset_of!(kvm_regs, regs);
2696 
2697         // Get the register index of the PSTATE (Processor State) register.
2698         let pstate = offset_of!(user_pt_regs, pstate) + kreg_off;
2699         self.fd
2700             .lock()
2701             .unwrap()
2702             .set_one_reg(
2703                 arm64_core_reg_id!(KVM_REG_SIZE_U64, pstate),
2704                 &PSTATE_FAULT_BITS_64.to_le_bytes(),
2705             )
2706             .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
2707 
2708         // Other vCPUs are powered off initially awaiting PSCI wakeup.
2709         if cpu_id == 0 {
2710             // Setting the PC (Processor Counter) to the current program address (kernel address).
2711             let pc = offset_of!(user_pt_regs, pc) + kreg_off;
2712             self.fd
2713                 .lock()
2714                 .unwrap()
2715                 .set_one_reg(
2716                     arm64_core_reg_id!(KVM_REG_SIZE_U64, pc),
2717                     &boot_ip.to_le_bytes(),
2718                 )
2719                 .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
2720 
2721             // Last mandatory thing to set -> the address pointing to the FDT (also called DTB).
2722             // "The device tree blob (dtb) must be placed on an 8-byte boundary and must
2723             // not exceed 2 megabytes in size." -> https://www.kernel.org/doc/Documentation/arm64/booting.txt.
2724             // We are choosing to place it the end of DRAM. See `get_fdt_addr`.
2725             let regs0 = offset_of!(user_pt_regs, regs) + kreg_off;
2726             self.fd
2727                 .lock()
2728                 .unwrap()
2729                 .set_one_reg(
2730                     arm64_core_reg_id!(KVM_REG_SIZE_U64, regs0),
2731                     &fdt_start.to_le_bytes(),
2732                 )
2733                 .map_err(|e| cpu::HypervisorCpuError::SetAarchCoreRegister(e.into()))?;
2734         }
2735         Ok(())
2736     }
2737 
2738     #[cfg(target_arch = "riscv64")]
2739     ///
2740     /// Configure registers for a given RISC-V CPU.
2741     ///
2742     fn setup_regs(&self, cpu_id: u8, boot_ip: u64, fdt_start: u64) -> cpu::Result<()> {
2743         // Setting the A0 () to the hartid of this CPU.
2744         let a0 = offset_of!(kvm_riscv_core, regs, user_regs_struct, a0);
2745         self.fd
2746             .lock()
2747             .unwrap()
2748             .set_one_reg(
2749                 riscv64_reg_id!(KVM_REG_RISCV_CORE, a0),
2750                 &u64::from(cpu_id).to_le_bytes(),
2751             )
2752             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2753 
2754         // Setting the PC (Processor Counter) to the current program address (kernel address).
2755         let pc = offset_of!(kvm_riscv_core, regs, user_regs_struct, pc);
2756         self.fd
2757             .lock()
2758             .unwrap()
2759             .set_one_reg(
2760                 riscv64_reg_id!(KVM_REG_RISCV_CORE, pc),
2761                 &boot_ip.to_le_bytes(),
2762             )
2763             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2764 
2765         // Last mandatory thing to set -> the address pointing to the FDT (also called DTB).
2766         // "The device tree blob (dtb) must be placed on an 8-byte boundary and must
2767         // not exceed 64 kilobytes in size." -> https://www.kernel.org/doc/Documentation/arch/riscv/boot.txt.
2768         let a1 = offset_of!(kvm_riscv_core, regs, user_regs_struct, a1);
2769         self.fd
2770             .lock()
2771             .unwrap()
2772             .set_one_reg(
2773                 riscv64_reg_id!(KVM_REG_RISCV_CORE, a1),
2774                 &fdt_start.to_le_bytes(),
2775             )
2776             .map_err(|e| cpu::HypervisorCpuError::SetRiscvCoreRegister(e.into()))?;
2777 
2778         Ok(())
2779     }
2780 
2781     #[cfg(target_arch = "x86_64")]
2782     ///
2783     /// Get the current CPU state
2784     ///
2785     /// Ordering requirements:
2786     ///
2787     /// KVM_GET_MP_STATE calls kvm_apic_accept_events(), which might modify
2788     /// vCPU/LAPIC state. As such, it must be done before most everything
2789     /// else, otherwise we cannot restore everything and expect it to work.
2790     ///
2791     /// KVM_GET_VCPU_EVENTS/KVM_SET_VCPU_EVENTS is unsafe if other vCPUs are
2792     /// still running.
2793     ///
2794     /// KVM_GET_LAPIC may change state of LAPIC before returning it.
2795     ///
2796     /// GET_VCPU_EVENTS should probably be last to save. The code looks as
2797     /// it might as well be affected by internal state modifications of the
2798     /// GET ioctls.
2799     ///
2800     /// SREGS saves/restores a pending interrupt, similar to what
2801     /// VCPU_EVENTS also does.
2802     ///
2803     /// GET_MSRS requires a prepopulated data structure to do something
2804     /// meaningful. For SET_MSRS it will then contain good data.
2805     ///
2806     /// # Example
2807     ///
2808     /// ```rust
2809     /// # use hypervisor::kvm::KvmHypervisor;
2810     /// # use std::sync::Arc;
2811     /// let kvm = KvmHypervisor::new().unwrap();
2812     /// let hv = Arc::new(kvm);
2813     /// let vm = hv.create_vm().expect("new VM fd creation failed");
2814     /// vm.enable_split_irq().unwrap();
2815     /// let vcpu = vm.create_vcpu(0, None).unwrap();
2816     /// let state = vcpu.state().unwrap();
2817     /// ```
2818     fn state(&self) -> cpu::Result<CpuState> {
2819         let cpuid = self.get_cpuid2(kvm_bindings::KVM_MAX_CPUID_ENTRIES)?;
2820         let mp_state = self.get_mp_state()?.into();
2821         let regs = self.get_regs()?;
2822         let sregs = self.get_sregs()?;
2823         let xsave = self.get_xsave()?;
2824         let xcrs = self.get_xcrs()?;
2825         let lapic_state = self.get_lapic()?;
2826         let fpu = self.get_fpu()?;
2827 
2828         // Try to get all MSRs based on the list previously retrieved from KVM.
2829         // If the number of MSRs obtained from GET_MSRS is different from the
2830         // expected amount, we fallback onto a slower method by getting MSRs
2831         // by chunks. This is the only way to make sure we try to get as many
2832         // MSRs as possible, even if some MSRs are not supported.
2833         let mut msr_entries = self.msrs.clone();
2834 
2835         // Save extra MSRs if the Hyper-V synthetic interrupt controller is
2836         // emulated.
2837         if self.hyperv_synic.load(Ordering::Acquire) {
2838             let hyperv_synic_msrs = vec![
2839                 0x40000020, 0x40000021, 0x40000080, 0x40000081, 0x40000082, 0x40000083, 0x40000084,
2840                 0x40000090, 0x40000091, 0x40000092, 0x40000093, 0x40000094, 0x40000095, 0x40000096,
2841                 0x40000097, 0x40000098, 0x40000099, 0x4000009a, 0x4000009b, 0x4000009c, 0x4000009d,
2842                 0x4000009e, 0x4000009f, 0x400000b0, 0x400000b1, 0x400000b2, 0x400000b3, 0x400000b4,
2843                 0x400000b5, 0x400000b6, 0x400000b7,
2844             ];
2845             for index in hyperv_synic_msrs {
2846                 let msr = kvm_msr_entry {
2847                     index,
2848                     ..Default::default()
2849                 };
2850                 msr_entries.push(msr.into());
2851             }
2852         }
2853 
2854         let expected_num_msrs = msr_entries.len();
2855         let num_msrs = self.get_msrs(&mut msr_entries)?;
2856         let msrs = if num_msrs != expected_num_msrs {
2857             let mut faulty_msr_index = num_msrs;
2858             let mut msr_entries_tmp = msr_entries[..faulty_msr_index].to_vec();
2859 
2860             loop {
2861                 warn!(
2862                     "Detected faulty MSR 0x{:x} while getting MSRs",
2863                     msr_entries[faulty_msr_index].index
2864                 );
2865 
2866                 // Skip the first bad MSR
2867                 let start_pos = faulty_msr_index + 1;
2868 
2869                 let mut sub_msr_entries = msr_entries[start_pos..].to_vec();
2870                 let num_msrs = self.get_msrs(&mut sub_msr_entries)?;
2871 
2872                 msr_entries_tmp.extend(&sub_msr_entries[..num_msrs]);
2873 
2874                 if num_msrs == sub_msr_entries.len() {
2875                     break;
2876                 }
2877 
2878                 faulty_msr_index = start_pos + num_msrs;
2879             }
2880 
2881             msr_entries_tmp
2882         } else {
2883             msr_entries
2884         };
2885 
2886         let vcpu_events = self.get_vcpu_events()?;
2887         let tsc_khz = self.tsc_khz()?;
2888 
2889         Ok(VcpuKvmState {
2890             cpuid,
2891             msrs,
2892             vcpu_events,
2893             regs: regs.into(),
2894             sregs: sregs.into(),
2895             fpu,
2896             lapic_state,
2897             xsave,
2898             xcrs,
2899             mp_state,
2900             tsc_khz,
2901         }
2902         .into())
2903     }
2904 
2905     ///
2906     /// Get the current AArch64 CPU state
2907     ///
2908     #[cfg(target_arch = "aarch64")]
2909     fn state(&self) -> cpu::Result<CpuState> {
2910         let mut state = VcpuKvmState {
2911             mp_state: self.get_mp_state()?.into(),
2912             ..Default::default()
2913         };
2914         // Get core registers
2915         state.core_regs = self.get_regs()?.into();
2916 
2917         // Get systerm register
2918         // Call KVM_GET_REG_LIST to get all registers available to the guest.
2919         // For ArmV8 there are around 500 registers.
2920         let mut sys_regs: Vec<Register> = Vec::new();
2921         let mut reg_list = RegList::new(500).unwrap();
2922         self.fd
2923             .lock()
2924             .unwrap()
2925             .get_reg_list(&mut reg_list)
2926             .map_err(|e| cpu::HypervisorCpuError::GetRegList(e.into()))?;
2927 
2928         // At this point reg_list should contain: core registers and system
2929         // registers.
2930         // The register list contains the number of registers and their ids. We
2931         // will be needing to call KVM_GET_ONE_REG on each id in order to save
2932         // all of them. We carve out from the list  the core registers which are
2933         // represented in the kernel by kvm_regs structure and for which we can
2934         // calculate the id based on the offset in the structure.
2935         reg_list.retain(|regid| is_system_register(*regid));
2936 
2937         // Now, for the rest of the registers left in the previously fetched
2938         // register list, we are simply calling KVM_GET_ONE_REG.
2939         let indices = reg_list.as_slice();
2940         for index in indices.iter() {
2941             let mut bytes = [0_u8; 8];
2942             self.fd
2943                 .lock()
2944                 .unwrap()
2945                 .get_one_reg(*index, &mut bytes)
2946                 .map_err(|e| cpu::HypervisorCpuError::GetSysRegister(e.into()))?;
2947             sys_regs.push(kvm_bindings::kvm_one_reg {
2948                 id: *index,
2949                 addr: u64::from_le_bytes(bytes),
2950             });
2951         }
2952 
2953         state.sys_regs = sys_regs;
2954 
2955         Ok(state.into())
2956     }
2957 
2958     #[cfg(target_arch = "riscv64")]
2959     ///
2960     /// Get the current RISC-V 64-bit CPU state
2961     ///
2962     fn state(&self) -> cpu::Result<CpuState> {
2963         let mut state = VcpuKvmState {
2964             mp_state: self.get_mp_state()?.into(),
2965             ..Default::default()
2966         };
2967         // Get core registers
2968         state.core_regs = self.get_regs()?.into();
2969 
2970         // Get non-core register
2971         // Call KVM_GET_REG_LIST to get all registers available to the guest.
2972         // For RISC-V 64-bit there are around 200 registers.
2973         let mut sys_regs: Vec<Register> = Vec::new();
2974         let mut reg_list = RegList::new(200).unwrap();
2975         self.fd
2976             .lock()
2977             .unwrap()
2978             .get_reg_list(&mut reg_list)
2979             .map_err(|e| cpu::HypervisorCpuError::GetRegList(e.into()))?;
2980 
2981         // At this point reg_list should contain:
2982         // - core registers
2983         // - config registers
2984         // - timer registers
2985         // - control and status registers
2986         // - AIA control and status registers
2987         // - smstateen control and status registers
2988         // - sbi_sta control and status registers.
2989         //
2990         // The register list contains the number of registers and their ids. We
2991         // will be needing to call KVM_GET_ONE_REG on each id in order to save
2992         // all of them. We carve out from the list the core registers which are
2993         // represented in the kernel by `kvm_riscv_core` structure and for which
2994         // we can calculate the id based on the offset in the structure.
2995         reg_list.retain(|regid| is_non_core_register(*regid));
2996 
2997         // Now, for the rest of the registers left in the previously fetched
2998         // register list, we are simply calling KVM_GET_ONE_REG.
2999         let indices = reg_list.as_slice();
3000         for index in indices.iter() {
3001             let mut bytes = [0_u8; 8];
3002             self.fd
3003                 .lock()
3004                 .unwrap()
3005                 .get_one_reg(*index, &mut bytes)
3006                 .map_err(|e| cpu::HypervisorCpuError::GetSysRegister(e.into()))?;
3007             sys_regs.push(kvm_bindings::kvm_one_reg {
3008                 id: *index,
3009                 addr: u64::from_le_bytes(bytes),
3010             });
3011         }
3012 
3013         state.non_core_regs = sys_regs;
3014 
3015         Ok(state.into())
3016     }
3017 
3018     #[cfg(target_arch = "x86_64")]
3019     ///
3020     /// Restore the previously saved CPU state
3021     ///
3022     /// Ordering requirements:
3023     ///
3024     /// KVM_GET_VCPU_EVENTS/KVM_SET_VCPU_EVENTS is unsafe if other vCPUs are
3025     /// still running.
3026     ///
3027     /// Some SET ioctls (like set_mp_state) depend on kvm_vcpu_is_bsp(), so
3028     /// if we ever change the BSP, we have to do that before restoring anything.
3029     /// The same seems to be true for CPUID stuff.
3030     ///
3031     /// SREGS saves/restores a pending interrupt, similar to what
3032     /// VCPU_EVENTS also does.
3033     ///
3034     /// SET_REGS clears pending exceptions unconditionally, thus, it must be
3035     /// done before SET_VCPU_EVENTS, which restores it.
3036     ///
3037     /// SET_LAPIC must come after SET_SREGS, because the latter restores
3038     /// the apic base msr.
3039     ///
3040     /// SET_LAPIC must come before SET_MSRS, because the TSC deadline MSR
3041     /// only restores successfully, when the LAPIC is correctly configured.
3042     ///
3043     /// Arguments: CpuState
3044     /// # Example
3045     ///
3046     /// ```rust
3047     /// # use hypervisor::kvm::KvmHypervisor;
3048     /// # use std::sync::Arc;
3049     /// let kvm = KvmHypervisor::new().unwrap();
3050     /// let hv = Arc::new(kvm);
3051     /// let vm = hv.create_vm().expect("new VM fd creation failed");
3052     /// vm.enable_split_irq().unwrap();
3053     /// let vcpu = vm.create_vcpu(0, None).unwrap();
3054     /// let state = vcpu.state().unwrap();
3055     /// vcpu.set_state(&state).unwrap();
3056     /// ```
3057     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
3058         let state: VcpuKvmState = state.clone().into();
3059         self.set_cpuid2(&state.cpuid)?;
3060         self.set_mp_state(state.mp_state.into())?;
3061         self.set_regs(&state.regs.into())?;
3062         self.set_sregs(&state.sregs.into())?;
3063         self.set_xsave(&state.xsave)?;
3064         self.set_xcrs(&state.xcrs)?;
3065         self.set_lapic(&state.lapic_state)?;
3066         self.set_fpu(&state.fpu)?;
3067 
3068         if let Some(freq) = state.tsc_khz {
3069             self.set_tsc_khz(freq)?;
3070         }
3071 
3072         // Try to set all MSRs previously stored.
3073         // If the number of MSRs set from SET_MSRS is different from the
3074         // expected amount, we fallback onto a slower method by setting MSRs
3075         // by chunks. This is the only way to make sure we try to set as many
3076         // MSRs as possible, even if some MSRs are not supported.
3077         let expected_num_msrs = state.msrs.len();
3078         let num_msrs = self.set_msrs(&state.msrs)?;
3079         if num_msrs != expected_num_msrs {
3080             let mut faulty_msr_index = num_msrs;
3081 
3082             loop {
3083                 warn!(
3084                     "Detected faulty MSR 0x{:x} while setting MSRs",
3085                     state.msrs[faulty_msr_index].index
3086                 );
3087 
3088                 // Skip the first bad MSR
3089                 let start_pos = faulty_msr_index + 1;
3090 
3091                 let sub_msr_entries = state.msrs[start_pos..].to_vec();
3092 
3093                 let num_msrs = self.set_msrs(&sub_msr_entries)?;
3094 
3095                 if num_msrs == sub_msr_entries.len() {
3096                     break;
3097                 }
3098 
3099                 faulty_msr_index = start_pos + num_msrs;
3100             }
3101         }
3102 
3103         self.set_vcpu_events(&state.vcpu_events)?;
3104 
3105         Ok(())
3106     }
3107 
3108     ///
3109     /// Restore the previously saved AArch64 CPU state
3110     ///
3111     #[cfg(target_arch = "aarch64")]
3112     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
3113         let state: VcpuKvmState = state.clone().into();
3114         // Set core registers
3115         self.set_regs(&state.core_regs.into())?;
3116         // Set system registers
3117         for reg in &state.sys_regs {
3118             self.fd
3119                 .lock()
3120                 .unwrap()
3121                 .set_one_reg(reg.id, &reg.addr.to_le_bytes())
3122                 .map_err(|e| cpu::HypervisorCpuError::SetSysRegister(e.into()))?;
3123         }
3124 
3125         self.set_mp_state(state.mp_state.into())?;
3126 
3127         Ok(())
3128     }
3129 
3130     #[cfg(target_arch = "riscv64")]
3131     ///
3132     /// Restore the previously saved RISC-V 64-bit CPU state
3133     ///
3134     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
3135         let state: VcpuKvmState = state.clone().into();
3136         // Set core registers
3137         self.set_regs(&state.core_regs.into())?;
3138         // Set system registers
3139         for reg in &state.non_core_regs {
3140             self.fd
3141                 .lock()
3142                 .unwrap()
3143                 .set_one_reg(reg.id, &reg.addr.to_le_bytes())
3144                 .map_err(|e| cpu::HypervisorCpuError::SetSysRegister(e.into()))?;
3145         }
3146 
3147         self.set_mp_state(state.mp_state.into())?;
3148 
3149         Ok(())
3150     }
3151 
3152     ///
3153     /// Initialize TDX for this CPU
3154     ///
3155     #[cfg(feature = "tdx")]
3156     fn tdx_init(&self, hob_address: u64) -> cpu::Result<()> {
3157         tdx_command(
3158             &self.fd.lock().unwrap().as_raw_fd(),
3159             TdxCommand::InitVcpu,
3160             0,
3161             hob_address,
3162         )
3163         .map_err(cpu::HypervisorCpuError::InitializeTdx)
3164     }
3165 
3166     ///
3167     /// Set the "immediate_exit" state
3168     ///
3169     fn set_immediate_exit(&self, exit: bool) {
3170         self.fd.lock().unwrap().set_kvm_immediate_exit(exit.into());
3171     }
3172 
3173     ///
3174     /// Returns the details about TDX exit reason
3175     ///
3176     #[cfg(feature = "tdx")]
3177     fn get_tdx_exit_details(&mut self) -> cpu::Result<TdxExitDetails> {
3178         let mut fd = self.fd.as_ref().lock().unwrap();
3179         let kvm_run = fd.get_kvm_run();
3180         // SAFETY: accessing a union field in a valid structure
3181         let tdx_vmcall = unsafe {
3182             &mut (*((&mut kvm_run.__bindgen_anon_1) as *mut kvm_run__bindgen_ty_1
3183                 as *mut KvmTdxExit))
3184                 .u
3185                 .vmcall
3186         };
3187 
3188         tdx_vmcall.status_code = TDG_VP_VMCALL_INVALID_OPERAND;
3189 
3190         if tdx_vmcall.type_ != 0 {
3191             return Err(cpu::HypervisorCpuError::UnknownTdxVmCall);
3192         }
3193 
3194         match tdx_vmcall.subfunction {
3195             TDG_VP_VMCALL_GET_QUOTE => Ok(TdxExitDetails::GetQuote),
3196             TDG_VP_VMCALL_SETUP_EVENT_NOTIFY_INTERRUPT => {
3197                 Ok(TdxExitDetails::SetupEventNotifyInterrupt)
3198             }
3199             _ => Err(cpu::HypervisorCpuError::UnknownTdxVmCall),
3200         }
3201     }
3202 
3203     ///
3204     /// Set the status code for TDX exit
3205     ///
3206     #[cfg(feature = "tdx")]
3207     fn set_tdx_status(&mut self, status: TdxExitStatus) {
3208         let mut fd = self.fd.as_ref().lock().unwrap();
3209         let kvm_run = fd.get_kvm_run();
3210         // SAFETY: accessing a union field in a valid structure
3211         let tdx_vmcall = unsafe {
3212             &mut (*((&mut kvm_run.__bindgen_anon_1) as *mut kvm_run__bindgen_ty_1
3213                 as *mut KvmTdxExit))
3214                 .u
3215                 .vmcall
3216         };
3217 
3218         tdx_vmcall.status_code = match status {
3219             TdxExitStatus::Success => TDG_VP_VMCALL_SUCCESS,
3220             TdxExitStatus::InvalidOperand => TDG_VP_VMCALL_INVALID_OPERAND,
3221         };
3222     }
3223 
3224     #[cfg(target_arch = "x86_64")]
3225     ///
3226     /// Return the list of initial MSR entries for a VCPU
3227     ///
3228     fn boot_msr_entries(&self) -> Vec<MsrEntry> {
3229         use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB};
3230 
3231         [
3232             msr!(msr_index::MSR_IA32_SYSENTER_CS),
3233             msr!(msr_index::MSR_IA32_SYSENTER_ESP),
3234             msr!(msr_index::MSR_IA32_SYSENTER_EIP),
3235             msr!(msr_index::MSR_STAR),
3236             msr!(msr_index::MSR_CSTAR),
3237             msr!(msr_index::MSR_LSTAR),
3238             msr!(msr_index::MSR_KERNEL_GS_BASE),
3239             msr!(msr_index::MSR_SYSCALL_MASK),
3240             msr!(msr_index::MSR_IA32_TSC),
3241             msr_data!(
3242                 msr_index::MSR_IA32_MISC_ENABLE,
3243                 msr_index::MSR_IA32_MISC_ENABLE_FAST_STRING as u64
3244             ),
3245             msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB),
3246         ]
3247         .to_vec()
3248     }
3249 
3250     #[cfg(target_arch = "aarch64")]
3251     fn has_pmu_support(&self) -> bool {
3252         let cpu_attr = kvm_bindings::kvm_device_attr {
3253             group: kvm_bindings::KVM_ARM_VCPU_PMU_V3_CTRL,
3254             attr: u64::from(kvm_bindings::KVM_ARM_VCPU_PMU_V3_INIT),
3255             addr: 0x0,
3256             flags: 0,
3257         };
3258         self.fd.lock().unwrap().has_device_attr(&cpu_attr).is_ok()
3259     }
3260 
3261     #[cfg(target_arch = "aarch64")]
3262     fn init_pmu(&self, irq: u32) -> cpu::Result<()> {
3263         let cpu_attr = kvm_bindings::kvm_device_attr {
3264             group: kvm_bindings::KVM_ARM_VCPU_PMU_V3_CTRL,
3265             attr: u64::from(kvm_bindings::KVM_ARM_VCPU_PMU_V3_INIT),
3266             addr: 0x0,
3267             flags: 0,
3268         };
3269         let cpu_attr_irq = kvm_bindings::kvm_device_attr {
3270             group: kvm_bindings::KVM_ARM_VCPU_PMU_V3_CTRL,
3271             attr: u64::from(kvm_bindings::KVM_ARM_VCPU_PMU_V3_IRQ),
3272             addr: &irq as *const u32 as u64,
3273             flags: 0,
3274         };
3275         self.fd
3276             .lock()
3277             .unwrap()
3278             .set_device_attr(&cpu_attr_irq)
3279             .map_err(|_| cpu::HypervisorCpuError::InitializePmu)?;
3280         self.fd
3281             .lock()
3282             .unwrap()
3283             .set_device_attr(&cpu_attr)
3284             .map_err(|_| cpu::HypervisorCpuError::InitializePmu)
3285     }
3286 
3287     #[cfg(target_arch = "x86_64")]
3288     ///
3289     /// Get the frequency of the TSC if available
3290     ///
3291     fn tsc_khz(&self) -> cpu::Result<Option<u32>> {
3292         match self.fd.lock().unwrap().get_tsc_khz() {
3293             Err(e) => {
3294                 if e.errno() == libc::EIO {
3295                     Ok(None)
3296                 } else {
3297                     Err(cpu::HypervisorCpuError::GetTscKhz(e.into()))
3298                 }
3299             }
3300             Ok(v) => Ok(Some(v)),
3301         }
3302     }
3303 
3304     #[cfg(target_arch = "x86_64")]
3305     ///
3306     /// Set the frequency of the TSC if available
3307     ///
3308     fn set_tsc_khz(&self, freq: u32) -> cpu::Result<()> {
3309         match self.fd.lock().unwrap().set_tsc_khz(freq) {
3310             Err(e) => {
3311                 if e.errno() == libc::EIO {
3312                     Ok(())
3313                 } else {
3314                     Err(cpu::HypervisorCpuError::SetTscKhz(e.into()))
3315                 }
3316             }
3317             Ok(_) => Ok(()),
3318         }
3319     }
3320 
3321     #[cfg(target_arch = "x86_64")]
3322     ///
3323     /// Trigger NMI interrupt
3324     ///
3325     fn nmi(&self) -> cpu::Result<()> {
3326         match self.fd.lock().unwrap().nmi() {
3327             Err(e) => {
3328                 if e.errno() == libc::EIO {
3329                     Ok(())
3330                 } else {
3331                     Err(cpu::HypervisorCpuError::Nmi(e.into()))
3332                 }
3333             }
3334             Ok(_) => Ok(()),
3335         }
3336     }
3337 }
3338 
3339 impl KvmVcpu {
3340     #[cfg(target_arch = "x86_64")]
3341     ///
3342     /// X86 specific call that returns the vcpu's current "xsave struct".
3343     ///
3344     fn get_xsave(&self) -> cpu::Result<XsaveState> {
3345         Ok(self
3346             .fd
3347             .lock()
3348             .unwrap()
3349             .get_xsave()
3350             .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))?
3351             .into())
3352     }
3353 
3354     #[cfg(target_arch = "x86_64")]
3355     ///
3356     /// X86 specific call that sets the vcpu's current "xsave struct".
3357     ///
3358     fn set_xsave(&self, xsave: &XsaveState) -> cpu::Result<()> {
3359         let xsave: kvm_bindings::kvm_xsave = (*xsave).clone().into();
3360         self.fd
3361             .lock()
3362             .unwrap()
3363             .set_xsave(&xsave)
3364             .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into()))
3365     }
3366 
3367     #[cfg(target_arch = "x86_64")]
3368     ///
3369     /// X86 specific call that returns the vcpu's current "xcrs".
3370     ///
3371     fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> {
3372         self.fd
3373             .lock()
3374             .unwrap()
3375             .get_xcrs()
3376             .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into()))
3377     }
3378 
3379     #[cfg(target_arch = "x86_64")]
3380     ///
3381     /// X86 specific call that sets the vcpu's current "xcrs".
3382     ///
3383     fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> {
3384         self.fd
3385             .lock()
3386             .unwrap()
3387             .set_xcrs(xcrs)
3388             .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into()))
3389     }
3390 
3391     #[cfg(target_arch = "x86_64")]
3392     ///
3393     /// Returns currently pending exceptions, interrupts, and NMIs as well as related
3394     /// states of the vcpu.
3395     ///
3396     fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> {
3397         self.fd
3398             .lock()
3399             .unwrap()
3400             .get_vcpu_events()
3401             .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into()))
3402     }
3403 
3404     #[cfg(target_arch = "x86_64")]
3405     ///
3406     /// Sets pending exceptions, interrupts, and NMIs as well as related states
3407     /// of the vcpu.
3408     ///
3409     fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> {
3410         self.fd
3411             .lock()
3412             .unwrap()
3413             .set_vcpu_events(events)
3414             .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into()))
3415     }
3416 }
3417 
3418 #[cfg(test)]
3419 mod tests {
3420     #[test]
3421     #[cfg(target_arch = "riscv64")]
3422     fn test_get_and_set_regs() {
3423         use super::*;
3424 
3425         let kvm = KvmHypervisor::new().unwrap();
3426         let hypervisor = Arc::new(kvm);
3427         let vm = hypervisor.create_vm().expect("new VM fd creation failed");
3428         let vcpu0 = vm.create_vcpu(0, None).unwrap();
3429 
3430         let core_regs = StandardRegisters::from(kvm_riscv_core {
3431             regs: user_regs_struct {
3432                 pc: 0x00,
3433                 ra: 0x01,
3434                 sp: 0x02,
3435                 gp: 0x03,
3436                 tp: 0x04,
3437                 t0: 0x05,
3438                 t1: 0x06,
3439                 t2: 0x07,
3440                 s0: 0x08,
3441                 s1: 0x09,
3442                 a0: 0x0a,
3443                 a1: 0x0b,
3444                 a2: 0x0c,
3445                 a3: 0x0d,
3446                 a4: 0x0e,
3447                 a5: 0x0f,
3448                 a6: 0x10,
3449                 a7: 0x11,
3450                 s2: 0x12,
3451                 s3: 0x13,
3452                 s4: 0x14,
3453                 s5: 0x15,
3454                 s6: 0x16,
3455                 s7: 0x17,
3456                 s8: 0x18,
3457                 s9: 0x19,
3458                 s10: 0x1a,
3459                 s11: 0x1b,
3460                 t3: 0x1c,
3461                 t4: 0x1d,
3462                 t5: 0x1e,
3463                 t6: 0x1f,
3464             },
3465             mode: 0x00,
3466         });
3467 
3468         vcpu0.set_regs(&core_regs).unwrap();
3469         assert_eq!(vcpu0.get_regs().unwrap(), core_regs);
3470     }
3471 }
3472