xref: /cloud-hypervisor/hypervisor/src/kvm/mod.rs (revision cfa758fbb113c77fb65ed41cec48a60efbbd0ca1)
1 // Copyright © 2019 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
4 //
5 // Copyright © 2020, Microsoft Corporation
6 //
7 // Copyright 2018-2019 CrowdStrike, Inc.
8 //
9 //
10 
11 use kvm_ioctls::{NoDatamatch, VcpuFd, VmFd};
12 use std::result;
13 use std::sync::Arc;
14 #[cfg(target_arch = "x86_64")]
15 use vm_memory::Address;
16 use vmm_sys_util::eventfd::EventFd;
17 
18 #[cfg(target_arch = "aarch64")]
19 pub use crate::aarch64::{check_required_kvm_extensions, VcpuInit, VcpuKvmState as CpuState};
20 use crate::cpu;
21 use crate::hypervisor;
22 use crate::vm;
23 // x86_64 dependencies
24 #[cfg(target_arch = "x86_64")]
25 pub mod x86_64;
26 
27 #[cfg(target_arch = "x86_64")]
28 use x86_64::{
29     check_required_kvm_extensions, FpuState, SpecialRegisters, StandardRegisters, KVM_TSS_ADDRESS,
30 };
31 
32 #[cfg(target_arch = "x86_64")]
33 pub use x86_64::{
34     CpuId, ExtendedControlRegisters, LapicState, MsrEntries, VcpuKvmState as CpuState, Xsave,
35 };
36 
37 #[cfg(target_arch = "x86_64")]
38 use kvm_bindings::{kvm_enable_cap, MsrList, KVM_CAP_SPLIT_IRQCHIP};
39 
40 #[cfg(target_arch = "x86_64")]
41 use crate::arch::x86::NUM_IOAPIC_PINS;
42 
43 // aarch64 dependencies
44 #[cfg(target_arch = "aarch64")]
45 pub mod aarch64;
46 
47 pub use kvm_bindings;
48 pub use kvm_bindings::{
49     kvm_create_device, kvm_device_type_KVM_DEV_TYPE_VFIO, kvm_irq_routing, kvm_irq_routing_entry,
50     kvm_userspace_memory_region, KVM_IRQ_ROUTING_MSI, KVM_MEM_READONLY,
51 };
52 pub use kvm_ioctls;
53 pub use kvm_ioctls::{Cap, Kvm};
54 
55 ///
56 /// Export generically-named wrappers of kvm-bindings for Unix-based platforms
57 ///
58 pub use {
59     kvm_bindings::kvm_clock_data as ClockData, kvm_bindings::kvm_create_device as CreateDevice,
60     kvm_bindings::kvm_irq_routing as IrqRouting, kvm_bindings::kvm_mp_state as MpState,
61     kvm_bindings::kvm_userspace_memory_region as MemoryRegion,
62     kvm_bindings::kvm_vcpu_events as VcpuEvents, kvm_ioctls::DeviceFd, kvm_ioctls::IoEventAddress,
63     kvm_ioctls::VcpuExit,
64 };
65 
66 /// Wrapper over KVM VM ioctls.
67 pub struct KvmVm {
68     fd: Arc<VmFd>,
69     #[cfg(target_arch = "x86_64")]
70     msrs: MsrEntries,
71 }
72 ///
73 /// Implementation of Vm trait for KVM
74 /// Example:
75 /// #[cfg(feature = "kvm")]
76 /// extern crate hypervisor
77 /// let kvm = hypervisor::kvm::KvmHypervisor::new().unwrap();
78 /// let hypervisor: Arc<dyn hypervisor::Hypervisor> = Arc::new(kvm);
79 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
80 /// vm.set/get().unwrap()
81 ///
82 impl vm::Vm for KvmVm {
83     #[cfg(target_arch = "x86_64")]
84     ///
85     /// Sets the address of the three-page region in the VM's address space.
86     ///
87     fn set_tss_address(&self, offset: usize) -> vm::Result<()> {
88         self.fd
89             .set_tss_address(offset)
90             .map_err(|e| vm::HypervisorVmError::SetTssAddress(e.into()))
91     }
92     ///
93     /// Creates an in-kernel interrupt controller.
94     ///
95     fn create_irq_chip(&self) -> vm::Result<()> {
96         self.fd
97             .create_irq_chip()
98             .map_err(|e| vm::HypervisorVmError::CreateIrq(e.into()))
99     }
100     ///
101     /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
102     ///
103     fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
104         self.fd
105             .register_irqfd(fd, gsi)
106             .map_err(|e| vm::HypervisorVmError::RegisterIrqFd(e.into()))
107     }
108     ///
109     /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
110     ///
111     fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
112         self.fd
113             .unregister_irqfd(fd, gsi)
114             .map_err(|e| vm::HypervisorVmError::UnregisterIrqFd(e.into()))
115     }
116     ///
117     /// Creates a VcpuFd object from a vcpu RawFd.
118     ///
119     fn create_vcpu(&self, id: u8) -> vm::Result<Arc<dyn cpu::Vcpu>> {
120         let vc = self
121             .fd
122             .create_vcpu(id)
123             .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?;
124         let vcpu = KvmVcpu {
125             fd: vc,
126             #[cfg(target_arch = "x86_64")]
127             msrs: self.msrs.clone(),
128         };
129         Ok(Arc::new(vcpu))
130     }
131     ///
132     /// Registers an event to be signaled whenever a certain address is written to.
133     ///
134     fn register_ioevent(
135         &self,
136         fd: &EventFd,
137         addr: &IoEventAddress,
138         datamatch: Option<vm::DataMatch>,
139     ) -> vm::Result<()> {
140         if let Some(dm) = datamatch {
141             match dm {
142                 vm::DataMatch::DataMatch32(kvm_dm32) => self
143                     .fd
144                     .register_ioevent(fd, addr, kvm_dm32)
145                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
146                 vm::DataMatch::DataMatch64(kvm_dm64) => self
147                     .fd
148                     .register_ioevent(fd, addr, kvm_dm64)
149                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
150             }
151         } else {
152             self.fd
153                 .register_ioevent(fd, addr, NoDatamatch)
154                 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into()))
155         }
156     }
157     ///
158     /// Unregisters an event from a certain address it has been previously registered to.
159     ///
160     fn unregister_ioevent(&self, fd: &EventFd, addr: &IoEventAddress) -> vm::Result<()> {
161         self.fd
162             .unregister_ioevent(fd, addr, NoDatamatch)
163             .map_err(|e| vm::HypervisorVmError::UnregisterIoEvent(e.into()))
164     }
165     ///
166     /// Sets the GSI routing table entries, overwriting any previously set
167     /// entries, as per the `KVM_SET_GSI_ROUTING` ioctl.
168     ///
169     fn set_gsi_routing(&self, irq_routing: &IrqRouting) -> vm::Result<()> {
170         self.fd
171             .set_gsi_routing(irq_routing)
172             .map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into()))
173     }
174     ///
175     /// Creates a memory region structure that can be used with set_user_memory_region
176     ///
177     fn make_user_memory_region(
178         &self,
179         slot: u32,
180         guest_phys_addr: u64,
181         memory_size: u64,
182         userspace_addr: u64,
183         readonly: bool,
184     ) -> MemoryRegion {
185         MemoryRegion {
186             slot,
187             guest_phys_addr,
188             memory_size,
189             userspace_addr,
190             flags: if readonly { KVM_MEM_READONLY } else { 0 },
191         }
192     }
193     ///
194     /// Creates/modifies a guest physical memory slot.
195     ///
196     fn set_user_memory_region(&self, user_memory_region: MemoryRegion) -> vm::Result<()> {
197         // Safe because guest regions are guaranteed not to overlap.
198         unsafe {
199             self.fd
200                 .set_user_memory_region(user_memory_region)
201                 .map_err(|e| vm::HypervisorVmError::SetUserMemory(e.into()))
202         }
203     }
204     ///
205     /// Creates an emulated device in the kernel.
206     ///
207     /// See the documentation for `KVM_CREATE_DEVICE`.
208     fn create_device(&self, device: &mut CreateDevice) -> vm::Result<DeviceFd> {
209         self.fd
210             .create_device(device)
211             .map_err(|e| vm::HypervisorVmError::CreateDevice(e.into()))
212     }
213     ///
214     /// Returns the preferred CPU target type which can be emulated by KVM on underlying host.
215     ///
216     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
217     fn get_preferred_target(&self, kvi: &mut VcpuInit) -> vm::Result<()> {
218         self.fd
219             .get_preferred_target(kvi)
220             .map_err(|e| vm::HypervisorVmError::GetPreferredTarget(e.into()))
221     }
222     #[cfg(target_arch = "x86_64")]
223     fn enable_split_irq(&self) -> vm::Result<()> {
224         // Set TSS
225         self.fd
226             .set_tss_address(KVM_TSS_ADDRESS.raw_value() as usize)
227             .map_err(|e| vm::HypervisorVmError::EnableSplitIrq(e.into()))?;
228         // Create split irqchip
229         // Only the local APIC is emulated in kernel, both PICs and IOAPIC
230         // are not.
231         let mut cap: kvm_enable_cap = Default::default();
232         cap.cap = KVM_CAP_SPLIT_IRQCHIP;
233         cap.args[0] = NUM_IOAPIC_PINS as u64;
234         self.fd
235             .enable_cap(&cap)
236             .map_err(|e| vm::HypervisorVmError::EnableSplitIrq(e.into()))?;
237         Ok(())
238     }
239     /// Retrieve guest clock.
240     #[cfg(target_arch = "x86_64")]
241     fn get_clock(&self) -> vm::Result<ClockData> {
242         self.fd
243             .get_clock()
244             .map_err(|e| vm::HypervisorVmError::GetClock(e.into()))
245     }
246     /// Set guest clock.
247     #[cfg(target_arch = "x86_64")]
248     fn set_clock(&self, data: &ClockData) -> vm::Result<()> {
249         self.fd
250             .set_clock(data)
251             .map_err(|e| vm::HypervisorVmError::SetClock(e.into()))
252     }
253 }
254 /// Wrapper over KVM system ioctls.
255 pub struct KvmHypervisor {
256     kvm: Kvm,
257 }
258 /// Enum for KVM related error
259 #[derive(Debug)]
260 pub enum KvmError {
261     CapabilityMissing(Cap),
262 }
263 pub type KvmResult<T> = result::Result<T, KvmError>;
264 impl KvmHypervisor {
265     /// Create a hypervisor based on Kvm
266     pub fn new() -> hypervisor::Result<KvmHypervisor> {
267         let kvm_obj = Kvm::new().map_err(|e| hypervisor::HypervisorError::VmCreate(e.into()))?;
268         Ok(KvmHypervisor { kvm: kvm_obj })
269     }
270 }
271 /// Implementation of Hypervisor trait for KVM
272 /// Example:
273 /// #[cfg(feature = "kvm")]
274 /// extern crate hypervisor
275 /// let kvm = hypervisor::kvm::KvmHypervisor::new().unwrap();
276 /// let hypervisor: Arc<dyn hypervisor::Hypervisor> = Arc::new(kvm);
277 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
278 ///
279 impl hypervisor::Hypervisor for KvmHypervisor {
280     /// Create a KVM vm object and return the object as Vm trait object
281     /// Example
282     /// # extern crate hypervisor;
283     /// # use hypervisor::KvmHypervisor;
284     /// use hypervisor::KvmVm;
285     /// let hypervisor = KvmHypervisor::new().unwrap();
286     /// let vm = hypervisor.create_vm().unwrap()
287     ///
288     fn create_vm(&self) -> hypervisor::Result<Arc<dyn vm::Vm>> {
289         let fd: VmFd;
290         loop {
291             match self.kvm.create_vm() {
292                 Ok(res) => fd = res,
293                 Err(e) => {
294                     if e.errno() == libc::EINTR {
295                         // If the error returned is EINTR, which means the
296                         // ioctl has been interrupted, we have to retry as
297                         // this can't be considered as a regular error.
298                         continue;
299                     } else {
300                         return Err(hypervisor::HypervisorError::VmCreate(e.into()));
301                     }
302                 }
303             }
304             break;
305         }
306 
307         let vm_fd = Arc::new(fd);
308 
309         #[cfg(target_arch = "x86_64")]
310         {
311             let msr_list = self.get_msr_list()?;
312             let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize;
313             let mut msrs = MsrEntries::new(num_msrs);
314             let indices = msr_list.as_slice();
315             let msr_entries = msrs.as_mut_slice();
316             for (pos, index) in indices.iter().enumerate() {
317                 msr_entries[pos].index = *index;
318             }
319 
320             Ok(Arc::new(KvmVm { fd: vm_fd, msrs }))
321         }
322 
323         #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
324         {
325             Ok(Arc::new(KvmVm { fd: vm_fd }))
326         }
327     }
328 
329     fn check_required_extensions(&self) -> hypervisor::Result<()> {
330         check_required_kvm_extensions(&self.kvm).expect("Missing KVM capabilities");
331         Ok(())
332     }
333 
334     ///
335     /// Returns the KVM API version.
336     ///
337     fn get_api_version(&self) -> i32 {
338         self.kvm.get_api_version()
339     }
340     ///
341     ///  Returns the size of the memory mapping required to use the vcpu's `kvm_run` structure.
342     ///
343     fn get_vcpu_mmap_size(&self) -> hypervisor::Result<usize> {
344         self.kvm
345             .get_vcpu_mmap_size()
346             .map_err(|e| hypervisor::HypervisorError::GetVcpuMmap(e.into()))
347     }
348     ///
349     /// Gets the recommended maximum number of VCPUs per VM.
350     ///
351     fn get_max_vcpus(&self) -> hypervisor::Result<usize> {
352         Ok(self.kvm.get_max_vcpus())
353     }
354     ///
355     /// Gets the recommended number of VCPUs per VM.
356     ///
357     fn get_nr_vcpus(&self) -> hypervisor::Result<usize> {
358         Ok(self.kvm.get_nr_vcpus())
359     }
360     #[cfg(target_arch = "x86_64")]
361     ///
362     /// Checks if a particular `Cap` is available.
363     ///
364     fn check_capability(&self, c: Cap) -> bool {
365         self.kvm.check_extension(c)
366     }
367     #[cfg(target_arch = "x86_64")]
368     ///
369     /// X86 specific call to get the system supported CPUID values.
370     ///
371     fn get_cpuid(&self) -> hypervisor::Result<CpuId> {
372         self.kvm
373             .get_supported_cpuid(kvm_bindings::KVM_MAX_CPUID_ENTRIES)
374             .map_err(|e| hypervisor::HypervisorError::GetCpuId(e.into()))
375     }
376     #[cfg(target_arch = "x86_64")]
377     ///
378     /// Retrieve the list of MSRs supported by KVM.
379     ///
380     fn get_msr_list(&self) -> hypervisor::Result<MsrList> {
381         self.kvm
382             .get_msr_index_list()
383             .map_err(|e| hypervisor::HypervisorError::GetMsrList(e.into()))
384     }
385 }
386 /// Vcpu struct for KVM
387 pub struct KvmVcpu {
388     fd: VcpuFd,
389     #[cfg(target_arch = "x86_64")]
390     msrs: MsrEntries,
391 }
392 /// Implementation of Vcpu trait for KVM
393 /// Example:
394 /// #[cfg(feature = "kvm")]
395 /// extern crate hypervisor
396 /// let kvm = hypervisor::kvm::KvmHypervisor::new().unwrap();
397 /// let hypervisor: Arc<dyn hypervisor::Hypervisor> = Arc::new(kvm);
398 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
399 /// let vcpu = vm.create_vcpu(0).unwrap();
400 /// vcpu.get/set().unwrap()
401 ///
402 impl cpu::Vcpu for KvmVcpu {
403     #[cfg(target_arch = "x86_64")]
404     ///
405     /// Returns the vCPU general purpose registers.
406     ///
407     fn get_regs(&self) -> cpu::Result<StandardRegisters> {
408         self.fd
409             .get_regs()
410             .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))
411     }
412     #[cfg(target_arch = "x86_64")]
413     ///
414     /// Sets the vCPU general purpose registers using the `KVM_SET_REGS` ioctl.
415     ///
416     fn set_regs(&self, regs: &StandardRegisters) -> cpu::Result<()> {
417         self.fd
418             .set_regs(regs)
419             .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into()))
420     }
421     #[cfg(target_arch = "x86_64")]
422     ///
423     /// Returns the vCPU special registers.
424     ///
425     fn get_sregs(&self) -> cpu::Result<SpecialRegisters> {
426         self.fd
427             .get_sregs()
428             .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))
429     }
430     #[cfg(target_arch = "x86_64")]
431     ///
432     /// Sets the vCPU special registers using the `KVM_SET_SREGS` ioctl.
433     ///
434     fn set_sregs(&self, sregs: &SpecialRegisters) -> cpu::Result<()> {
435         self.fd
436             .set_sregs(sregs)
437             .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into()))
438     }
439     #[cfg(target_arch = "x86_64")]
440     ///
441     /// Returns the floating point state (FPU) from the vCPU.
442     ///
443     fn get_fpu(&self) -> cpu::Result<FpuState> {
444         self.fd
445             .get_fpu()
446             .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))
447     }
448     #[cfg(target_arch = "x86_64")]
449     ///
450     /// Set the floating point state (FPU) of a vCPU using the `KVM_SET_FPU` ioct.
451     ///
452     fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> {
453         self.fd
454             .set_fpu(fpu)
455             .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into()))
456     }
457     #[cfg(target_arch = "x86_64")]
458     ///
459     /// X86 specific call to setup the CPUID registers.
460     ///
461     fn set_cpuid2(&self, cpuid: &CpuId) -> cpu::Result<()> {
462         self.fd
463             .set_cpuid2(cpuid)
464             .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
465     }
466     ///
467     /// X86 specific call to retrieve the CPUID registers.
468     ///
469     #[cfg(target_arch = "x86_64")]
470     fn get_cpuid2(&self, num_entries: usize) -> cpu::Result<CpuId> {
471         self.fd
472             .get_cpuid2(num_entries)
473             .map_err(|e| cpu::HypervisorCpuError::GetCpuid(e.into()))
474     }
475     #[cfg(target_arch = "x86_64")]
476     ///
477     /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
478     ///
479     fn get_lapic(&self) -> cpu::Result<LapicState> {
480         self.fd
481             .get_lapic()
482             .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))
483     }
484     #[cfg(target_arch = "x86_64")]
485     ///
486     /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
487     ///
488     fn set_lapic(&self, klapic: &LapicState) -> cpu::Result<()> {
489         self.fd
490             .set_lapic(klapic)
491             .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into()))
492     }
493     #[cfg(target_arch = "x86_64")]
494     ///
495     /// Returns the model-specific registers (MSR) for this vCPU.
496     ///
497     fn get_msrs(&self, msrs: &mut MsrEntries) -> cpu::Result<usize> {
498         self.fd
499             .get_msrs(msrs)
500             .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))
501     }
502     #[cfg(target_arch = "x86_64")]
503     ///
504     /// Setup the model-specific registers (MSR) for this vCPU.
505     /// Returns the number of MSR entries actually written.
506     ///
507     fn set_msrs(&self, msrs: &MsrEntries) -> cpu::Result<usize> {
508         self.fd
509             .set_msrs(msrs)
510             .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
511     }
512     ///
513     /// Returns the vcpu's current "multiprocessing state".
514     ///
515     fn get_mp_state(&self) -> cpu::Result<MpState> {
516         self.fd
517             .get_mp_state()
518             .map_err(|e| cpu::HypervisorCpuError::GetMpState(e.into()))
519     }
520     ///
521     /// Sets the vcpu's current "multiprocessing state".
522     ///
523     fn set_mp_state(&self, mp_state: MpState) -> cpu::Result<()> {
524         self.fd
525             .set_mp_state(mp_state)
526             .map_err(|e| cpu::HypervisorCpuError::SetMpState(e.into()))
527     }
528     #[cfg(target_arch = "x86_64")]
529     ///
530     /// X86 specific call that returns the vcpu's current "xsave struct".
531     ///
532     fn get_xsave(&self) -> cpu::Result<Xsave> {
533         self.fd
534             .get_xsave()
535             .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))
536     }
537     #[cfg(target_arch = "x86_64")]
538     ///
539     /// X86 specific call that sets the vcpu's current "xsave struct".
540     ///
541     fn set_xsave(&self, xsave: &Xsave) -> cpu::Result<()> {
542         self.fd
543             .set_xsave(xsave)
544             .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into()))
545     }
546     #[cfg(target_arch = "x86_64")]
547     ///
548     /// X86 specific call that returns the vcpu's current "xcrs".
549     ///
550     fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> {
551         self.fd
552             .get_xcrs()
553             .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into()))
554     }
555     #[cfg(target_arch = "x86_64")]
556     ///
557     /// X86 specific call that sets the vcpu's current "xcrs".
558     ///
559     fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> {
560         self.fd
561             .set_xcrs(&xcrs)
562             .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into()))
563     }
564     ///
565     /// Triggers the running of the current virtual CPU returning an exit reason.
566     ///
567     fn run(&self) -> std::result::Result<VcpuExit, vmm_sys_util::errno::Error> {
568         self.fd.run()
569     }
570     #[cfg(target_arch = "x86_64")]
571     ///
572     /// Returns currently pending exceptions, interrupts, and NMIs as well as related
573     /// states of the vcpu.
574     ///
575     fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> {
576         self.fd
577             .get_vcpu_events()
578             .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into()))
579     }
580     #[cfg(target_arch = "x86_64")]
581     ///
582     /// Sets pending exceptions, interrupts, and NMIs as well as related states
583     /// of the vcpu.
584     ///
585     fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> {
586         self.fd
587             .set_vcpu_events(events)
588             .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into()))
589     }
590     #[cfg(target_arch = "x86_64")]
591     ///
592     /// Let the guest know that it has been paused, which prevents from
593     /// potential soft lockups when being resumed.
594     ///
595     fn notify_guest_clock_paused(&self) -> cpu::Result<()> {
596         self.fd
597             .kvmclock_ctrl()
598             .map_err(|e| cpu::HypervisorCpuError::NotifyGuestClockPaused(e.into()))
599     }
600     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
601     fn vcpu_init(&self, kvi: &VcpuInit) -> cpu::Result<()> {
602         self.fd
603             .vcpu_init(kvi)
604             .map_err(|e| cpu::HypervisorCpuError::VcpuInit(e.into()))
605     }
606     ///
607     /// Sets the value of one register for this vCPU.
608     ///
609     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
610     fn set_one_reg(&self, reg_id: u64, data: u64) -> cpu::Result<()> {
611         self.fd
612             .set_one_reg(reg_id, data)
613             .map_err(|e| cpu::HypervisorCpuError::SetOneReg(e.into()))
614     }
615     ///
616     /// Gets the value of one register for this vCPU.
617     ///
618     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
619     fn get_one_reg(&self, reg_id: u64) -> cpu::Result<u64> {
620         self.fd
621             .get_one_reg(reg_id)
622             .map_err(|e| cpu::HypervisorCpuError::GetOneReg(e.into()))
623     }
624     #[cfg(target_arch = "x86_64")]
625     ///
626     /// Get the current CPU state
627     ///
628     /// Ordering requirements:
629     ///
630     /// KVM_GET_MP_STATE calls kvm_apic_accept_events(), which might modify
631     /// vCPU/LAPIC state. As such, it must be done before most everything
632     /// else, otherwise we cannot restore everything and expect it to work.
633     ///
634     /// KVM_GET_VCPU_EVENTS/KVM_SET_VCPU_EVENTS is unsafe if other vCPUs are
635     /// still running.
636     ///
637     /// KVM_GET_LAPIC may change state of LAPIC before returning it.
638     ///
639     /// GET_VCPU_EVENTS should probably be last to save. The code looks as
640     /// it might as well be affected by internal state modifications of the
641     /// GET ioctls.
642     ///
643     /// SREGS saves/restores a pending interrupt, similar to what
644     /// VCPU_EVENTS also does.
645     ///
646     /// GET_MSRS requires a pre-populated data structure to do something
647     /// meaningful. For SET_MSRS it will then contain good data.
648     ///
649     /// # Example
650     ///
651     /// ```rust
652     /// # extern crate hypervisor;
653     /// # use hypervisor::KvmHypervisor;
654     /// # use std::sync::Arc;
655     /// let kvm = hypervisor::kvm::KvmHypervisor::new().unwrap();
656     /// let hv: Arc<dyn hypervisor::Hypervisor> = Arc::new(kvm);
657     /// let vm = hv.create_vm().expect("new VM fd creation failed");
658     /// vm.enable_split_irq().unwrap();
659     /// let vcpu = vm.create_vcpu(0).unwrap();
660     /// let state = vcpu.state().unwrap();
661     /// ```
662     fn state(&self) -> cpu::Result<CpuState> {
663         let mp_state = self.get_mp_state()?;
664         let regs = self.get_regs()?;
665         let sregs = self.get_sregs()?;
666         let xsave = self.get_xsave()?;
667         let xcrs = self.get_xcrs()?;
668         let lapic_state = self.get_lapic()?;
669         let fpu = self.get_fpu()?;
670         let mut msrs = self.msrs.clone();
671         self.get_msrs(&mut msrs)?;
672         let vcpu_events = self.get_vcpu_events()?;
673 
674         Ok(CpuState {
675             msrs,
676             vcpu_events,
677             regs,
678             sregs,
679             fpu,
680             lapic_state,
681             xsave,
682             xcrs,
683             mp_state,
684         })
685     }
686     #[cfg(target_arch = "aarch64")]
687     fn state(&self) -> cpu::Result<CpuState> {
688         unimplemented!();
689     }
690     #[cfg(target_arch = "x86_64")]
691     ///
692     /// Restore the previously saved CPU state
693     ///
694     /// Ordering requirements:
695     ///
696     /// KVM_GET_VCPU_EVENTS/KVM_SET_VCPU_EVENTS is unsafe if other vCPUs are
697     /// still running.
698     ///
699     /// Some SET ioctls (like set_mp_state) depend on kvm_vcpu_is_bsp(), so
700     /// if we ever change the BSP, we have to do that before restoring anything.
701     /// The same seems to be true for CPUID stuff.
702     ///
703     /// SREGS saves/restores a pending interrupt, similar to what
704     /// VCPU_EVENTS also does.
705     ///
706     /// SET_REGS clears pending exceptions unconditionally, thus, it must be
707     /// done before SET_VCPU_EVENTS, which restores it.
708     ///
709     /// SET_LAPIC must come after SET_SREGS, because the latter restores
710     /// the apic base msr.
711     ///
712     /// SET_LAPIC must come before SET_MSRS, because the TSC deadline MSR
713     /// only restores successfully, when the LAPIC is correctly configured.
714     ///
715     /// Arguments: CpuState
716     /// # Example
717     ///
718     /// ```rust
719     /// # extern crate hypervisor;
720     /// # use hypervisor::KvmHypervisor;
721     /// # use std::sync::Arc;
722     /// let kvm = hypervisor::kvm::KvmHypervisor::new().unwrap();
723     /// let hv: Arc<dyn hypervisor::Hypervisor> = Arc::new(kvm);
724     /// let vm = hv.create_vm().expect("new VM fd creation failed");
725     /// vm.enable_split_irq().unwrap();
726     /// let vcpu = vm.create_vcpu(0).unwrap();
727     /// let state = vcpu.state().unwrap();
728     /// vcpu.set_state(&state).unwrap();
729     /// ```
730     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
731         self.set_mp_state(state.mp_state)?;
732         self.set_regs(&state.regs)?;
733         self.set_sregs(&state.sregs)?;
734         self.set_xsave(&state.xsave)?;
735         self.set_xcrs(&state.xcrs)?;
736         self.set_lapic(&state.lapic_state)?;
737         self.set_fpu(&state.fpu)?;
738         self.set_msrs(&state.msrs)?;
739         self.set_vcpu_events(&state.vcpu_events)?;
740 
741         Ok(())
742     }
743     #[allow(unused_variables)]
744     #[cfg(target_arch = "aarch64")]
745     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
746         Ok(())
747     }
748 }
749