xref: /cloud-hypervisor/vmm/src/lib.rs (revision eea9bcea38e0c5649f444c829f3a4f9c22aa486c)
1 // Copyright © 2019 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5 
6 #![allow(clippy::significant_drop_in_scrutinee)]
7 
8 #[macro_use]
9 extern crate event_monitor;
10 #[macro_use]
11 extern crate log;
12 
13 use crate::api::{
14     ApiError, ApiRequest, ApiResponse, ApiResponsePayload, VmInfo, VmReceiveMigrationData,
15     VmSendMigrationData, VmmPingResponse,
16 };
17 use crate::config::{
18     add_to_config, DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, RestoreConfig,
19     UserDeviceConfig, VdpaConfig, VmConfig, VsockConfig,
20 };
21 #[cfg(feature = "guest_debug")]
22 use crate::coredump::GuestDebuggable;
23 #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
24 use crate::migration::get_vm_snapshot;
25 use crate::migration::{recv_vm_config, recv_vm_state};
26 use crate::seccomp_filters::{get_seccomp_filter, Thread};
27 use crate::vm::{Error as VmError, Vm, VmState};
28 use anyhow::anyhow;
29 use libc::{EFD_NONBLOCK, SIGINT, SIGTERM};
30 use memory_manager::MemoryManagerSnapshotData;
31 use pci::PciBdf;
32 use seccompiler::{apply_filter, SeccompAction};
33 use serde::ser::{SerializeStruct, Serializer};
34 use serde::{Deserialize, Serialize};
35 use signal_hook::iterator::{Handle, Signals};
36 use std::collections::HashMap;
37 use std::fs::File;
38 use std::io;
39 use std::io::{Read, Write};
40 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
41 use std::os::unix::net::UnixListener;
42 use std::os::unix::net::UnixStream;
43 use std::panic::AssertUnwindSafe;
44 use std::path::PathBuf;
45 use std::sync::mpsc::{Receiver, RecvError, SendError, Sender};
46 use std::sync::{Arc, Mutex};
47 use std::{result, thread};
48 use thiserror::Error;
49 use tracer::trace_scoped;
50 use vm_memory::bitmap::AtomicBitmap;
51 use vm_migration::{protocol::*, Migratable};
52 use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
53 use vmm_sys_util::eventfd::EventFd;
54 use vmm_sys_util::signal::unblock_signal;
55 use vmm_sys_util::sock_ctrl_msg::ScmSocket;
56 use vmm_sys_util::terminal::Terminal;
57 
58 mod acpi;
59 pub mod api;
60 mod clone3;
61 pub mod config;
62 #[cfg(feature = "guest_debug")]
63 mod coredump;
64 pub mod cpu;
65 pub mod device_manager;
66 pub mod device_tree;
67 #[cfg(feature = "guest_debug")]
68 mod gdb;
69 pub mod interrupt;
70 pub mod memory_manager;
71 pub mod migration;
72 mod pci_segment;
73 pub mod seccomp_filters;
74 mod serial_manager;
75 mod sigwinch_listener;
76 pub mod vm;
77 
78 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
79 type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>;
80 
81 /// Errors associated with VMM management
82 #[derive(Debug, Error)]
83 #[allow(clippy::large_enum_variant)]
84 pub enum Error {
85     /// API request receive error
86     #[error("Error receiving API request: {0}")]
87     ApiRequestRecv(#[source] RecvError),
88 
89     /// API response send error
90     #[error("Error sending API request: {0}")]
91     ApiResponseSend(#[source] SendError<ApiResponse>),
92 
93     /// Cannot bind to the UNIX domain socket path
94     #[error("Error binding to UNIX domain socket: {0}")]
95     Bind(#[source] io::Error),
96 
97     /// Cannot clone EventFd.
98     #[error("Error cloning EventFd: {0}")]
99     EventFdClone(#[source] io::Error),
100 
101     /// Cannot create EventFd.
102     #[error("Error creating EventFd: {0}")]
103     EventFdCreate(#[source] io::Error),
104 
105     /// Cannot read from EventFd.
106     #[error("Error reading from EventFd: {0}")]
107     EventFdRead(#[source] io::Error),
108 
109     /// Cannot create epoll context.
110     #[error("Error creating epoll context: {0}")]
111     Epoll(#[source] io::Error),
112 
113     /// Cannot create HTTP thread
114     #[error("Error spawning HTTP thread: {0}")]
115     HttpThreadSpawn(#[source] io::Error),
116 
117     /// Cannot handle the VM STDIN stream
118     #[error("Error handling VM stdin: {0:?}")]
119     Stdin(VmError),
120 
121     /// Cannot handle the VM pty stream
122     #[error("Error handling VM pty: {0:?}")]
123     Pty(VmError),
124 
125     /// Cannot reboot the VM
126     #[error("Error rebooting VM: {0:?}")]
127     VmReboot(VmError),
128 
129     /// Cannot create VMM thread
130     #[error("Error spawning VMM thread {0:?}")]
131     VmmThreadSpawn(#[source] io::Error),
132 
133     /// Cannot shut the VMM down
134     #[error("Error shutting down VMM: {0:?}")]
135     VmmShutdown(VmError),
136 
137     /// Cannot create seccomp filter
138     #[error("Error creating seccomp filter: {0}")]
139     CreateSeccompFilter(seccompiler::Error),
140 
141     /// Cannot apply seccomp filter
142     #[error("Error applying seccomp filter: {0}")]
143     ApplySeccompFilter(seccompiler::Error),
144 
145     /// Error activating virtio devices
146     #[error("Error activating virtio devices: {0:?}")]
147     ActivateVirtioDevices(VmError),
148 
149     /// Error creating API server
150     #[error("Error creating API server {0:?}")]
151     CreateApiServer(micro_http::ServerError),
152 
153     /// Error binding API server socket
154     #[error("Error creation API server's socket {0:?}")]
155     CreateApiServerSocket(#[source] io::Error),
156 
157     #[cfg(feature = "guest_debug")]
158     #[error("Failed to start the GDB thread: {0}")]
159     GdbThreadSpawn(io::Error),
160 
161     /// GDB request receive error
162     #[cfg(feature = "guest_debug")]
163     #[error("Error receiving GDB request: {0}")]
164     GdbRequestRecv(#[source] RecvError),
165 
166     /// GDB response send error
167     #[cfg(feature = "guest_debug")]
168     #[error("Error sending GDB request: {0}")]
169     GdbResponseSend(#[source] SendError<gdb::GdbResponse>),
170 
171     #[error("Cannot spawn a signal handler thread: {0}")]
172     SignalHandlerSpawn(#[source] io::Error),
173 
174     #[error("Failed to join on threads: {0:?}")]
175     ThreadCleanup(std::boxed::Box<dyn std::any::Any + std::marker::Send>),
176 }
177 pub type Result<T> = result::Result<T, Error>;
178 
179 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
180 #[repr(u64)]
181 pub enum EpollDispatch {
182     Exit = 0,
183     Reset = 1,
184     Api = 2,
185     ActivateVirtioDevices = 3,
186     Debug = 4,
187     Unknown,
188 }
189 
190 impl From<u64> for EpollDispatch {
191     fn from(v: u64) -> Self {
192         use EpollDispatch::*;
193         match v {
194             0 => Exit,
195             1 => Reset,
196             2 => Api,
197             3 => ActivateVirtioDevices,
198             4 => Debug,
199             _ => Unknown,
200         }
201     }
202 }
203 
204 pub struct EpollContext {
205     epoll_file: File,
206 }
207 
208 impl EpollContext {
209     pub fn new() -> result::Result<EpollContext, io::Error> {
210         let epoll_fd = epoll::create(true)?;
211         // Use 'File' to enforce closing on 'epoll_fd'
212         // SAFETY: the epoll_fd returned by epoll::create is valid and owned by us.
213         let epoll_file = unsafe { File::from_raw_fd(epoll_fd) };
214 
215         Ok(EpollContext { epoll_file })
216     }
217 
218     pub fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error>
219     where
220         T: AsRawFd,
221     {
222         let dispatch_index = token as u64;
223         epoll::ctl(
224             self.epoll_file.as_raw_fd(),
225             epoll::ControlOptions::EPOLL_CTL_ADD,
226             fd.as_raw_fd(),
227             epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index),
228         )?;
229 
230         Ok(())
231     }
232 }
233 
234 impl AsRawFd for EpollContext {
235     fn as_raw_fd(&self) -> RawFd {
236         self.epoll_file.as_raw_fd()
237     }
238 }
239 
240 pub struct PciDeviceInfo {
241     pub id: String,
242     pub bdf: PciBdf,
243 }
244 
245 impl Serialize for PciDeviceInfo {
246     fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
247     where
248         S: Serializer,
249     {
250         let bdf_str = self.bdf.to_string();
251 
252         // Serialize the structure.
253         let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?;
254         state.serialize_field("id", &self.id)?;
255         state.serialize_field("bdf", &bdf_str)?;
256         state.end()
257     }
258 }
259 
260 #[allow(unused_variables)]
261 #[allow(clippy::too_many_arguments)]
262 pub fn start_vmm_thread(
263     vmm_version: String,
264     http_path: &Option<String>,
265     http_fd: Option<RawFd>,
266     api_event: EventFd,
267     api_sender: Sender<ApiRequest>,
268     api_receiver: Receiver<ApiRequest>,
269     #[cfg(feature = "guest_debug")] debug_path: Option<PathBuf>,
270     #[cfg(feature = "guest_debug")] debug_event: EventFd,
271     #[cfg(feature = "guest_debug")] vm_debug_event: EventFd,
272     seccomp_action: &SeccompAction,
273     hypervisor: Arc<dyn hypervisor::Hypervisor>,
274 ) -> Result<thread::JoinHandle<Result<()>>> {
275     #[cfg(feature = "guest_debug")]
276     let gdb_hw_breakpoints = hypervisor.get_guest_debug_hw_bps();
277     #[cfg(feature = "guest_debug")]
278     let (gdb_sender, gdb_receiver) = std::sync::mpsc::channel();
279     #[cfg(feature = "guest_debug")]
280     let gdb_debug_event = debug_event.try_clone().map_err(Error::EventFdClone)?;
281     #[cfg(feature = "guest_debug")]
282     let gdb_vm_debug_event = vm_debug_event.try_clone().map_err(Error::EventFdClone)?;
283 
284     let http_api_event = api_event.try_clone().map_err(Error::EventFdClone)?;
285     let hypervisor_type = hypervisor.hypervisor_type();
286 
287     // Retrieve seccomp filter
288     let vmm_seccomp_filter = get_seccomp_filter(seccomp_action, Thread::Vmm, hypervisor_type)
289         .map_err(Error::CreateSeccompFilter)?;
290 
291     let vmm_seccomp_action = seccomp_action.clone();
292     let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
293     let thread = {
294         let exit_evt = exit_evt.try_clone().map_err(Error::EventFdClone)?;
295         thread::Builder::new()
296             .name("vmm".to_string())
297             .spawn(move || {
298                 // Apply seccomp filter for VMM thread.
299                 if !vmm_seccomp_filter.is_empty() {
300                     apply_filter(&vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?;
301                 }
302 
303                 let mut vmm = Vmm::new(
304                     vmm_version.to_string(),
305                     api_event,
306                     #[cfg(feature = "guest_debug")]
307                     debug_event,
308                     #[cfg(feature = "guest_debug")]
309                     vm_debug_event,
310                     vmm_seccomp_action,
311                     hypervisor,
312                     exit_evt,
313                 )?;
314 
315                 vmm.setup_signal_handler()?;
316 
317                 vmm.control_loop(
318                     Arc::new(api_receiver),
319                     #[cfg(feature = "guest_debug")]
320                     Arc::new(gdb_receiver),
321                 )
322             })
323             .map_err(Error::VmmThreadSpawn)?
324     };
325 
326     // The VMM thread is started, we can start serving HTTP requests
327     if let Some(http_path) = http_path {
328         api::start_http_path_thread(
329             http_path,
330             http_api_event,
331             api_sender,
332             seccomp_action,
333             exit_evt,
334             hypervisor_type,
335         )?;
336     } else if let Some(http_fd) = http_fd {
337         api::start_http_fd_thread(
338             http_fd,
339             http_api_event,
340             api_sender,
341             seccomp_action,
342             exit_evt,
343             hypervisor_type,
344         )?;
345     }
346 
347     #[cfg(feature = "guest_debug")]
348     if let Some(debug_path) = debug_path {
349         let target = gdb::GdbStub::new(
350             gdb_sender,
351             gdb_debug_event,
352             gdb_vm_debug_event,
353             gdb_hw_breakpoints,
354         );
355         thread::Builder::new()
356             .name("gdb".to_owned())
357             .spawn(move || gdb::gdb_thread(target, &debug_path))
358             .map_err(Error::GdbThreadSpawn)?;
359     }
360 
361     Ok(thread)
362 }
363 
364 #[derive(Clone, Deserialize, Serialize)]
365 struct VmMigrationConfig {
366     vm_config: Arc<Mutex<VmConfig>>,
367     #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
368     common_cpuid: Vec<hypervisor::arch::x86::CpuIdEntry>,
369     memory_manager_data: MemoryManagerSnapshotData,
370 }
371 
372 pub struct Vmm {
373     epoll: EpollContext,
374     exit_evt: EventFd,
375     reset_evt: EventFd,
376     api_evt: EventFd,
377     #[cfg(feature = "guest_debug")]
378     debug_evt: EventFd,
379     #[cfg(feature = "guest_debug")]
380     vm_debug_evt: EventFd,
381     version: String,
382     vm: Option<Vm>,
383     vm_config: Option<Arc<Mutex<VmConfig>>>,
384     seccomp_action: SeccompAction,
385     hypervisor: Arc<dyn hypervisor::Hypervisor>,
386     activate_evt: EventFd,
387     signals: Option<Handle>,
388     threads: Vec<thread::JoinHandle<()>>,
389 }
390 
391 impl Vmm {
392     pub const HANDLED_SIGNALS: [i32; 2] = [SIGTERM, SIGINT];
393 
394     fn signal_handler(mut signals: Signals, on_tty: bool, exit_evt: &EventFd) {
395         for sig in &Self::HANDLED_SIGNALS {
396             unblock_signal(*sig).unwrap();
397         }
398 
399         for signal in signals.forever() {
400             match signal {
401                 SIGTERM | SIGINT => {
402                     if exit_evt.write(1).is_err() {
403                         // Resetting the terminal is usually done as the VMM exits
404                         if on_tty {
405                             io::stdin()
406                                 .lock()
407                                 .set_canon_mode()
408                                 .expect("failed to restore terminal mode");
409                         }
410                         std::process::exit(1);
411                     }
412                 }
413                 _ => (),
414             }
415         }
416     }
417 
418     fn setup_signal_handler(&mut self) -> Result<()> {
419         let signals = Signals::new(Self::HANDLED_SIGNALS);
420         match signals {
421             Ok(signals) => {
422                 self.signals = Some(signals.handle());
423                 let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?;
424                 let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0;
425 
426                 let signal_handler_seccomp_filter = get_seccomp_filter(
427                     &self.seccomp_action,
428                     Thread::SignalHandler,
429                     self.hypervisor.hypervisor_type(),
430                 )
431                 .map_err(Error::CreateSeccompFilter)?;
432                 self.threads.push(
433                     thread::Builder::new()
434                         .name("vmm_signal_handler".to_string())
435                         .spawn(move || {
436                             if !signal_handler_seccomp_filter.is_empty() {
437                                 if let Err(e) = apply_filter(&signal_handler_seccomp_filter)
438                                     .map_err(Error::ApplySeccompFilter)
439                                 {
440                                     error!("Error applying seccomp filter: {:?}", e);
441                                     exit_evt.write(1).ok();
442                                     return;
443                                 }
444                             }
445                             std::panic::catch_unwind(AssertUnwindSafe(|| {
446                                 Vmm::signal_handler(signals, on_tty, &exit_evt);
447                             }))
448                             .map_err(|_| {
449                                 error!("signal_handler thead panicked");
450                                 exit_evt.write(1).ok()
451                             })
452                             .ok();
453                         })
454                         .map_err(Error::SignalHandlerSpawn)?,
455                 );
456             }
457             Err(e) => error!("Signal not found {}", e),
458         }
459         Ok(())
460     }
461 
462     fn new(
463         vmm_version: String,
464         api_evt: EventFd,
465         #[cfg(feature = "guest_debug")] debug_evt: EventFd,
466         #[cfg(feature = "guest_debug")] vm_debug_evt: EventFd,
467         seccomp_action: SeccompAction,
468         hypervisor: Arc<dyn hypervisor::Hypervisor>,
469         exit_evt: EventFd,
470     ) -> Result<Self> {
471         let mut epoll = EpollContext::new().map_err(Error::Epoll)?;
472         let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
473         let activate_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
474 
475         epoll
476             .add_event(&exit_evt, EpollDispatch::Exit)
477             .map_err(Error::Epoll)?;
478 
479         epoll
480             .add_event(&reset_evt, EpollDispatch::Reset)
481             .map_err(Error::Epoll)?;
482 
483         epoll
484             .add_event(&activate_evt, EpollDispatch::ActivateVirtioDevices)
485             .map_err(Error::Epoll)?;
486 
487         epoll
488             .add_event(&api_evt, EpollDispatch::Api)
489             .map_err(Error::Epoll)?;
490 
491         #[cfg(feature = "guest_debug")]
492         epoll
493             .add_event(&debug_evt, EpollDispatch::Debug)
494             .map_err(Error::Epoll)?;
495 
496         Ok(Vmm {
497             epoll,
498             exit_evt,
499             reset_evt,
500             api_evt,
501             #[cfg(feature = "guest_debug")]
502             debug_evt,
503             #[cfg(feature = "guest_debug")]
504             vm_debug_evt,
505             version: vmm_version,
506             vm: None,
507             vm_config: None,
508             seccomp_action,
509             hypervisor,
510             activate_evt,
511             signals: None,
512             threads: vec![],
513         })
514     }
515 
516     fn vm_create(&mut self, config: Arc<Mutex<VmConfig>>) -> result::Result<(), VmError> {
517         // We only store the passed VM config.
518         // The VM will be created when being asked to boot it.
519         if self.vm_config.is_none() {
520             self.vm_config = Some(config);
521             Ok(())
522         } else {
523             Err(VmError::VmAlreadyCreated)
524         }
525     }
526 
527     fn vm_boot(&mut self) -> result::Result<(), VmError> {
528         tracer::start();
529         let r = {
530             trace_scoped!("vm_boot");
531             // If we don't have a config, we can not boot a VM.
532             if self.vm_config.is_none() {
533                 return Err(VmError::VmMissingConfig);
534             };
535 
536             // Create a new VM if we don't have one yet.
537             if self.vm.is_none() {
538                 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
539                 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
540                 #[cfg(feature = "guest_debug")]
541                 let vm_debug_evt = self
542                     .vm_debug_evt
543                     .try_clone()
544                     .map_err(VmError::EventFdClone)?;
545                 let activate_evt = self
546                     .activate_evt
547                     .try_clone()
548                     .map_err(VmError::EventFdClone)?;
549 
550                 if let Some(ref vm_config) = self.vm_config {
551                     let vm = Vm::new(
552                         Arc::clone(vm_config),
553                         exit_evt,
554                         reset_evt,
555                         #[cfg(feature = "guest_debug")]
556                         vm_debug_evt,
557                         &self.seccomp_action,
558                         self.hypervisor.clone(),
559                         activate_evt,
560                         None,
561                         None,
562                         None,
563                     )?;
564 
565                     self.vm = Some(vm);
566                 }
567             }
568 
569             // Now we can boot the VM.
570             if let Some(ref mut vm) = self.vm {
571                 vm.boot()
572             } else {
573                 Err(VmError::VmNotCreated)
574             }
575         };
576         tracer::end();
577         r
578     }
579 
580     fn vm_pause(&mut self) -> result::Result<(), VmError> {
581         if let Some(ref mut vm) = self.vm {
582             vm.pause().map_err(VmError::Pause)
583         } else {
584             Err(VmError::VmNotRunning)
585         }
586     }
587 
588     fn vm_resume(&mut self) -> result::Result<(), VmError> {
589         if let Some(ref mut vm) = self.vm {
590             vm.resume().map_err(VmError::Resume)
591         } else {
592             Err(VmError::VmNotRunning)
593         }
594     }
595 
596     fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> {
597         if let Some(ref mut vm) = self.vm {
598             vm.snapshot()
599                 .map_err(VmError::Snapshot)
600                 .and_then(|snapshot| {
601                     vm.send(&snapshot, destination_url)
602                         .map_err(VmError::SnapshotSend)
603                 })
604         } else {
605             Err(VmError::VmNotRunning)
606         }
607     }
608 
609     fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> {
610         if self.vm.is_some() || self.vm_config.is_some() {
611             return Err(VmError::VmAlreadyCreated);
612         }
613 
614         let source_url = restore_cfg.source_url.as_path().to_str();
615         if source_url.is_none() {
616             return Err(VmError::InvalidRestoreSourceUrl);
617         }
618         // Safe to unwrap as we checked it was Some(&str).
619         let source_url = source_url.unwrap();
620 
621         let vm_config = Arc::new(Mutex::new(
622             recv_vm_config(source_url).map_err(VmError::Restore)?,
623         ));
624         let snapshot = recv_vm_state(source_url).map_err(VmError::Restore)?;
625         #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
626         let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?;
627 
628         #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
629         self.vm_check_cpuid_compatibility(&vm_config, &vm_snapshot.common_cpuid)
630             .map_err(VmError::Restore)?;
631 
632         self.vm_config = Some(Arc::clone(&vm_config));
633 
634         let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
635         let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
636         #[cfg(feature = "guest_debug")]
637         let debug_evt = self
638             .vm_debug_evt
639             .try_clone()
640             .map_err(VmError::EventFdClone)?;
641         let activate_evt = self
642             .activate_evt
643             .try_clone()
644             .map_err(VmError::EventFdClone)?;
645 
646         let vm = Vm::new_from_snapshot(
647             &snapshot,
648             vm_config,
649             exit_evt,
650             reset_evt,
651             #[cfg(feature = "guest_debug")]
652             debug_evt,
653             Some(source_url),
654             restore_cfg.prefault,
655             &self.seccomp_action,
656             self.hypervisor.clone(),
657             activate_evt,
658         )?;
659         self.vm = Some(vm);
660 
661         // Now we can restore the rest of the VM.
662         if let Some(ref mut vm) = self.vm {
663             vm.restore(snapshot).map_err(VmError::Restore)
664         } else {
665             Err(VmError::VmNotCreated)
666         }
667     }
668 
669     #[cfg(feature = "guest_debug")]
670     fn vm_coredump(&mut self, destination_url: &str) -> result::Result<(), VmError> {
671         if let Some(ref mut vm) = self.vm {
672             vm.coredump(destination_url).map_err(VmError::Coredump)
673         } else {
674             Err(VmError::VmNotRunning)
675         }
676     }
677 
678     fn vm_shutdown(&mut self) -> result::Result<(), VmError> {
679         if let Some(ref mut vm) = self.vm.take() {
680             vm.shutdown()
681         } else {
682             Err(VmError::VmNotRunning)
683         }
684     }
685 
686     fn vm_reboot(&mut self) -> result::Result<(), VmError> {
687         // First we stop the current VM
688         let (config, serial_pty, console_pty, console_resize_pipe) =
689             if let Some(mut vm) = self.vm.take() {
690                 let config = vm.get_config();
691                 let serial_pty = vm.serial_pty();
692                 let console_pty = vm.console_pty();
693                 let console_resize_pipe = vm
694                     .console_resize_pipe()
695                     .as_ref()
696                     .map(|pipe| pipe.try_clone().unwrap());
697                 vm.shutdown()?;
698                 (config, serial_pty, console_pty, console_resize_pipe)
699             } else {
700                 return Err(VmError::VmNotCreated);
701             };
702 
703         let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
704         let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
705         #[cfg(feature = "guest_debug")]
706         let debug_evt = self
707             .vm_debug_evt
708             .try_clone()
709             .map_err(VmError::EventFdClone)?;
710         let activate_evt = self
711             .activate_evt
712             .try_clone()
713             .map_err(VmError::EventFdClone)?;
714 
715         // The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be
716         // an event sitting in the shared reset_evt. Without doing this we get very early reboots
717         // during the boot process.
718         if self.reset_evt.read().is_ok() {
719             warn!("Spurious second reset event received. Ignoring.");
720         }
721 
722         // Then we create the new VM
723         let mut vm = Vm::new(
724             config,
725             exit_evt,
726             reset_evt,
727             #[cfg(feature = "guest_debug")]
728             debug_evt,
729             &self.seccomp_action,
730             self.hypervisor.clone(),
731             activate_evt,
732             serial_pty,
733             console_pty,
734             console_resize_pipe,
735         )?;
736 
737         // And we boot it
738         vm.boot()?;
739 
740         self.vm = Some(vm);
741 
742         Ok(())
743     }
744 
745     fn vm_info(&self) -> result::Result<VmInfo, VmError> {
746         match &self.vm_config {
747             Some(config) => {
748                 let state = match &self.vm {
749                     Some(vm) => vm.get_state()?,
750                     None => VmState::Created,
751                 };
752 
753                 let config = Arc::clone(config);
754 
755                 let mut memory_actual_size = config.lock().unwrap().memory.total_size();
756                 if let Some(vm) = &self.vm {
757                     memory_actual_size -= vm.balloon_size();
758                 }
759 
760                 let device_tree = self.vm.as_ref().map(|vm| vm.device_tree());
761 
762                 Ok(VmInfo {
763                     config,
764                     state,
765                     memory_actual_size,
766                     device_tree,
767                 })
768             }
769             None => Err(VmError::VmNotCreated),
770         }
771     }
772 
773     fn vmm_ping(&self) -> VmmPingResponse {
774         VmmPingResponse {
775             version: self.version.clone(),
776         }
777     }
778 
779     fn vm_delete(&mut self) -> result::Result<(), VmError> {
780         if self.vm_config.is_none() {
781             return Ok(());
782         }
783 
784         // If a VM is booted, we first try to shut it down.
785         if self.vm.is_some() {
786             self.vm_shutdown()?;
787         }
788 
789         self.vm_config = None;
790 
791         event!("vm", "deleted");
792 
793         Ok(())
794     }
795 
796     fn vmm_shutdown(&mut self) -> result::Result<(), VmError> {
797         self.vm_delete()?;
798         event!("vmm", "shutdown");
799         Ok(())
800     }
801 
802     fn vm_resize(
803         &mut self,
804         desired_vcpus: Option<u8>,
805         desired_ram: Option<u64>,
806         desired_balloon: Option<u64>,
807     ) -> result::Result<(), VmError> {
808         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
809 
810         if let Some(ref mut vm) = self.vm {
811             if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) {
812                 error!("Error when resizing VM: {:?}", e);
813                 Err(e)
814             } else {
815                 Ok(())
816             }
817         } else {
818             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
819             if let Some(desired_vcpus) = desired_vcpus {
820                 config.cpus.boot_vcpus = desired_vcpus;
821             }
822             if let Some(desired_ram) = desired_ram {
823                 config.memory.size = desired_ram;
824             }
825             if let Some(desired_balloon) = desired_balloon {
826                 if let Some(balloon_config) = &mut config.balloon {
827                     balloon_config.size = desired_balloon;
828                 }
829             }
830             Ok(())
831         }
832     }
833 
834     fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> {
835         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
836 
837         if let Some(ref mut vm) = self.vm {
838             if let Err(e) = vm.resize_zone(id, desired_ram) {
839                 error!("Error when resizing VM: {:?}", e);
840                 Err(e)
841             } else {
842                 Ok(())
843             }
844         } else {
845             // Update VmConfig by setting the new desired ram.
846             let memory_config = &mut self.vm_config.as_ref().unwrap().lock().unwrap().memory;
847 
848             if let Some(zones) = &mut memory_config.zones {
849                 for zone in zones.iter_mut() {
850                     if zone.id == id {
851                         zone.size = desired_ram;
852                         return Ok(());
853                     }
854                 }
855             }
856 
857             error!("Could not find the memory zone {} for the resize", id);
858             Err(VmError::ResizeZone)
859         }
860     }
861 
862     fn vm_add_device(
863         &mut self,
864         device_cfg: DeviceConfig,
865     ) -> result::Result<Option<Vec<u8>>, VmError> {
866         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
867 
868         {
869             // Validate the configuration change in a cloned configuration
870             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
871             add_to_config(&mut config.devices, device_cfg.clone());
872             config.validate().map_err(VmError::ConfigValidation)?;
873         }
874 
875         if let Some(ref mut vm) = self.vm {
876             let info = vm.add_device(device_cfg).map_err(|e| {
877                 error!("Error when adding new device to the VM: {:?}", e);
878                 e
879             })?;
880             serde_json::to_vec(&info)
881                 .map(Some)
882                 .map_err(VmError::SerializeJson)
883         } else {
884             // Update VmConfig by adding the new device.
885             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
886             add_to_config(&mut config.devices, device_cfg);
887             Ok(None)
888         }
889     }
890 
891     fn vm_add_user_device(
892         &mut self,
893         device_cfg: UserDeviceConfig,
894     ) -> result::Result<Option<Vec<u8>>, VmError> {
895         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
896 
897         {
898             // Validate the configuration change in a cloned configuration
899             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
900             add_to_config(&mut config.user_devices, device_cfg.clone());
901             config.validate().map_err(VmError::ConfigValidation)?;
902         }
903 
904         if let Some(ref mut vm) = self.vm {
905             let info = vm.add_user_device(device_cfg).map_err(|e| {
906                 error!("Error when adding new user device to the VM: {:?}", e);
907                 e
908             })?;
909             serde_json::to_vec(&info)
910                 .map(Some)
911                 .map_err(VmError::SerializeJson)
912         } else {
913             // Update VmConfig by adding the new device.
914             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
915             add_to_config(&mut config.user_devices, device_cfg);
916             Ok(None)
917         }
918     }
919 
920     fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> {
921         if let Some(ref mut vm) = self.vm {
922             if let Err(e) = vm.remove_device(id) {
923                 error!("Error when removing new device to the VM: {:?}", e);
924                 Err(e)
925             } else {
926                 Ok(())
927             }
928         } else {
929             Err(VmError::VmNotRunning)
930         }
931     }
932 
933     fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Option<Vec<u8>>, VmError> {
934         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
935 
936         {
937             // Validate the configuration change in a cloned configuration
938             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
939             add_to_config(&mut config.disks, disk_cfg.clone());
940             config.validate().map_err(VmError::ConfigValidation)?;
941         }
942 
943         if let Some(ref mut vm) = self.vm {
944             let info = vm.add_disk(disk_cfg).map_err(|e| {
945                 error!("Error when adding new disk to the VM: {:?}", e);
946                 e
947             })?;
948             serde_json::to_vec(&info)
949                 .map(Some)
950                 .map_err(VmError::SerializeJson)
951         } else {
952             // Update VmConfig by adding the new device.
953             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
954             add_to_config(&mut config.disks, disk_cfg);
955             Ok(None)
956         }
957     }
958 
959     fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Option<Vec<u8>>, VmError> {
960         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
961 
962         {
963             // Validate the configuration change in a cloned configuration
964             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
965             add_to_config(&mut config.fs, fs_cfg.clone());
966             config.validate().map_err(VmError::ConfigValidation)?;
967         }
968 
969         if let Some(ref mut vm) = self.vm {
970             let info = vm.add_fs(fs_cfg).map_err(|e| {
971                 error!("Error when adding new fs to the VM: {:?}", e);
972                 e
973             })?;
974             serde_json::to_vec(&info)
975                 .map(Some)
976                 .map_err(VmError::SerializeJson)
977         } else {
978             // Update VmConfig by adding the new device.
979             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
980             add_to_config(&mut config.fs, fs_cfg);
981             Ok(None)
982         }
983     }
984 
985     fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Option<Vec<u8>>, VmError> {
986         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
987 
988         {
989             // Validate the configuration change in a cloned configuration
990             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
991             add_to_config(&mut config.pmem, pmem_cfg.clone());
992             config.validate().map_err(VmError::ConfigValidation)?;
993         }
994 
995         if let Some(ref mut vm) = self.vm {
996             let info = vm.add_pmem(pmem_cfg).map_err(|e| {
997                 error!("Error when adding new pmem device to the VM: {:?}", e);
998                 e
999             })?;
1000             serde_json::to_vec(&info)
1001                 .map(Some)
1002                 .map_err(VmError::SerializeJson)
1003         } else {
1004             // Update VmConfig by adding the new device.
1005             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
1006             add_to_config(&mut config.pmem, pmem_cfg);
1007             Ok(None)
1008         }
1009     }
1010 
1011     fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Option<Vec<u8>>, VmError> {
1012         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
1013 
1014         {
1015             // Validate the configuration change in a cloned configuration
1016             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
1017             add_to_config(&mut config.net, net_cfg.clone());
1018             config.validate().map_err(VmError::ConfigValidation)?;
1019         }
1020 
1021         if let Some(ref mut vm) = self.vm {
1022             let info = vm.add_net(net_cfg).map_err(|e| {
1023                 error!("Error when adding new network device to the VM: {:?}", e);
1024                 e
1025             })?;
1026             serde_json::to_vec(&info)
1027                 .map(Some)
1028                 .map_err(VmError::SerializeJson)
1029         } else {
1030             // Update VmConfig by adding the new device.
1031             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
1032             add_to_config(&mut config.net, net_cfg);
1033             Ok(None)
1034         }
1035     }
1036 
1037     fn vm_add_vdpa(&mut self, vdpa_cfg: VdpaConfig) -> result::Result<Option<Vec<u8>>, VmError> {
1038         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
1039 
1040         {
1041             // Validate the configuration change in a cloned configuration
1042             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
1043             add_to_config(&mut config.vdpa, vdpa_cfg.clone());
1044             config.validate().map_err(VmError::ConfigValidation)?;
1045         }
1046 
1047         if let Some(ref mut vm) = self.vm {
1048             let info = vm.add_vdpa(vdpa_cfg).map_err(|e| {
1049                 error!("Error when adding new vDPA device to the VM: {:?}", e);
1050                 e
1051             })?;
1052             serde_json::to_vec(&info)
1053                 .map(Some)
1054                 .map_err(VmError::SerializeJson)
1055         } else {
1056             // Update VmConfig by adding the new device.
1057             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
1058             add_to_config(&mut config.vdpa, vdpa_cfg);
1059             Ok(None)
1060         }
1061     }
1062 
1063     fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Option<Vec<u8>>, VmError> {
1064         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
1065 
1066         {
1067             // Validate the configuration change in a cloned configuration
1068             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
1069 
1070             if config.vsock.is_some() {
1071                 return Err(VmError::TooManyVsockDevices);
1072             }
1073 
1074             config.vsock = Some(vsock_cfg.clone());
1075             config.validate().map_err(VmError::ConfigValidation)?;
1076         }
1077 
1078         if let Some(ref mut vm) = self.vm {
1079             let info = vm.add_vsock(vsock_cfg).map_err(|e| {
1080                 error!("Error when adding new vsock device to the VM: {:?}", e);
1081                 e
1082             })?;
1083             serde_json::to_vec(&info)
1084                 .map(Some)
1085                 .map_err(VmError::SerializeJson)
1086         } else {
1087             // Update VmConfig by adding the new device.
1088             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
1089             config.vsock = Some(vsock_cfg);
1090             Ok(None)
1091         }
1092     }
1093 
1094     fn vm_counters(&mut self) -> result::Result<Option<Vec<u8>>, VmError> {
1095         if let Some(ref mut vm) = self.vm {
1096             let info = vm.counters().map_err(|e| {
1097                 error!("Error when getting counters from the VM: {:?}", e);
1098                 e
1099             })?;
1100             serde_json::to_vec(&info)
1101                 .map(Some)
1102                 .map_err(VmError::SerializeJson)
1103         } else {
1104             Err(VmError::VmNotRunning)
1105         }
1106     }
1107 
1108     fn vm_power_button(&mut self) -> result::Result<(), VmError> {
1109         if let Some(ref mut vm) = self.vm {
1110             vm.power_button()
1111         } else {
1112             Err(VmError::VmNotRunning)
1113         }
1114     }
1115 
1116     fn vm_receive_config<T>(
1117         &mut self,
1118         req: &Request,
1119         socket: &mut T,
1120         existing_memory_files: Option<HashMap<u32, File>>,
1121     ) -> std::result::Result<Vm, MigratableError>
1122     where
1123         T: Read + Write,
1124     {
1125         // Read in config data along with memory manager data
1126         let mut data: Vec<u8> = Vec::new();
1127         data.resize_with(req.length() as usize, Default::default);
1128         socket
1129             .read_exact(&mut data)
1130             .map_err(MigratableError::MigrateSocket)?;
1131 
1132         let vm_migration_config: VmMigrationConfig =
1133             serde_json::from_slice(&data).map_err(|e| {
1134                 MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e))
1135             })?;
1136 
1137         #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
1138         self.vm_check_cpuid_compatibility(
1139             &vm_migration_config.vm_config,
1140             &vm_migration_config.common_cpuid,
1141         )?;
1142 
1143         let exit_evt = self.exit_evt.try_clone().map_err(|e| {
1144             MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e))
1145         })?;
1146         let reset_evt = self.reset_evt.try_clone().map_err(|e| {
1147             MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e))
1148         })?;
1149         #[cfg(feature = "guest_debug")]
1150         let debug_evt = self.vm_debug_evt.try_clone().map_err(|e| {
1151             MigratableError::MigrateReceive(anyhow!("Error cloning debug EventFd: {}", e))
1152         })?;
1153         let activate_evt = self.activate_evt.try_clone().map_err(|e| {
1154             MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e))
1155         })?;
1156 
1157         self.vm_config = Some(vm_migration_config.vm_config);
1158         let vm = Vm::new_from_migration(
1159             self.vm_config.clone().unwrap(),
1160             exit_evt,
1161             reset_evt,
1162             #[cfg(feature = "guest_debug")]
1163             debug_evt,
1164             &self.seccomp_action,
1165             self.hypervisor.clone(),
1166             activate_evt,
1167             &vm_migration_config.memory_manager_data,
1168             existing_memory_files,
1169         )
1170         .map_err(|e| {
1171             MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e))
1172         })?;
1173 
1174         Response::ok().write_to(socket)?;
1175 
1176         Ok(vm)
1177     }
1178 
1179     fn vm_receive_state<T>(
1180         &mut self,
1181         req: &Request,
1182         socket: &mut T,
1183         mut vm: Vm,
1184     ) -> std::result::Result<(), MigratableError>
1185     where
1186         T: Read + Write,
1187     {
1188         // Read in state data
1189         let mut data: Vec<u8> = Vec::new();
1190         data.resize_with(req.length() as usize, Default::default);
1191         socket
1192             .read_exact(&mut data)
1193             .map_err(MigratableError::MigrateSocket)?;
1194         let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| {
1195             MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e))
1196         })?;
1197 
1198         // Create VM
1199         vm.restore(snapshot).map_err(|e| {
1200             Response::error().write_to(socket).ok();
1201             e
1202         })?;
1203         self.vm = Some(vm);
1204 
1205         Response::ok().write_to(socket)?;
1206 
1207         Ok(())
1208     }
1209 
1210     fn vm_receive_memory<T>(
1211         &mut self,
1212         req: &Request,
1213         socket: &mut T,
1214         vm: &mut Vm,
1215     ) -> std::result::Result<(), MigratableError>
1216     where
1217         T: Read + Write,
1218     {
1219         // Read table
1220         let table = MemoryRangeTable::read_from(socket, req.length())?;
1221 
1222         // And then read the memory itself
1223         vm.receive_memory_regions(&table, socket).map_err(|e| {
1224             Response::error().write_to(socket).ok();
1225             e
1226         })?;
1227         Response::ok().write_to(socket)?;
1228         Ok(())
1229     }
1230 
1231     fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> {
1232         url.strip_prefix("unix:")
1233             .ok_or_else(|| {
1234                 MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url))
1235             })
1236             .map(|s| s.into())
1237     }
1238 
1239     fn vm_receive_migration(
1240         &mut self,
1241         receive_data_migration: VmReceiveMigrationData,
1242     ) -> result::Result<(), MigratableError> {
1243         info!(
1244             "Receiving migration: receiver_url = {}",
1245             receive_data_migration.receiver_url
1246         );
1247 
1248         let path = Self::socket_url_to_path(&receive_data_migration.receiver_url)?;
1249         let listener = UnixListener::bind(&path).map_err(|e| {
1250             MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e))
1251         })?;
1252         let (mut socket, _addr) = listener.accept().map_err(|e| {
1253             MigratableError::MigrateReceive(anyhow!("Error accepting on UNIX socket: {}", e))
1254         })?;
1255         std::fs::remove_file(&path).map_err(|e| {
1256             MigratableError::MigrateReceive(anyhow!("Error unlinking UNIX socket: {}", e))
1257         })?;
1258 
1259         let mut started = false;
1260         let mut vm: Option<Vm> = None;
1261         let mut existing_memory_files = None;
1262         loop {
1263             let req = Request::read_from(&mut socket)?;
1264             match req.command() {
1265                 Command::Invalid => info!("Invalid Command Received"),
1266                 Command::Start => {
1267                     info!("Start Command Received");
1268                     started = true;
1269 
1270                     Response::ok().write_to(&mut socket)?;
1271                 }
1272                 Command::Config => {
1273                     info!("Config Command Received");
1274 
1275                     if !started {
1276                         warn!("Migration not started yet");
1277                         Response::error().write_to(&mut socket)?;
1278                         continue;
1279                     }
1280                     vm = Some(self.vm_receive_config(
1281                         &req,
1282                         &mut socket,
1283                         existing_memory_files.take(),
1284                     )?);
1285                 }
1286                 Command::State => {
1287                     info!("State Command Received");
1288 
1289                     if !started {
1290                         warn!("Migration not started yet");
1291                         Response::error().write_to(&mut socket)?;
1292                         continue;
1293                     }
1294                     if let Some(vm) = vm.take() {
1295                         self.vm_receive_state(&req, &mut socket, vm)?;
1296                     } else {
1297                         warn!("Configuration not sent yet");
1298                         Response::error().write_to(&mut socket)?;
1299                     }
1300                 }
1301                 Command::Memory => {
1302                     info!("Memory Command Received");
1303 
1304                     if !started {
1305                         warn!("Migration not started yet");
1306                         Response::error().write_to(&mut socket)?;
1307                         continue;
1308                     }
1309                     if let Some(ref mut vm) = vm.as_mut() {
1310                         self.vm_receive_memory(&req, &mut socket, vm)?;
1311                     } else {
1312                         warn!("Configuration not sent yet");
1313                         Response::error().write_to(&mut socket)?;
1314                     }
1315                 }
1316                 Command::MemoryFd => {
1317                     info!("MemoryFd Command Received");
1318 
1319                     if !started {
1320                         warn!("Migration not started yet");
1321                         Response::error().write_to(&mut socket)?;
1322                         continue;
1323                     }
1324 
1325                     let mut buf = [0u8; 4];
1326                     let (_, file) = socket.recv_with_fd(&mut buf).map_err(|e| {
1327                         MigratableError::MigrateReceive(anyhow!(
1328                             "Error receiving slot from socket: {}",
1329                             e
1330                         ))
1331                     })?;
1332 
1333                     if existing_memory_files.is_none() {
1334                         existing_memory_files = Some(HashMap::default())
1335                     }
1336 
1337                     if let Some(ref mut existing_memory_files) = existing_memory_files {
1338                         let slot = u32::from_le_bytes(buf);
1339                         existing_memory_files.insert(slot, file.unwrap());
1340                     }
1341 
1342                     Response::ok().write_to(&mut socket)?;
1343                 }
1344                 Command::Complete => {
1345                     info!("Complete Command Received");
1346                     if let Some(ref mut vm) = self.vm.as_mut() {
1347                         vm.resume()?;
1348                         Response::ok().write_to(&mut socket)?;
1349                     } else {
1350                         warn!("VM not created yet");
1351                         Response::error().write_to(&mut socket)?;
1352                     }
1353                     break;
1354                 }
1355                 Command::Abandon => {
1356                     info!("Abandon Command Received");
1357                     self.vm = None;
1358                     self.vm_config = None;
1359                     Response::ok().write_to(&mut socket).ok();
1360                     break;
1361                 }
1362             }
1363         }
1364 
1365         Ok(())
1366     }
1367 
1368     // Returns true if there were dirty pages to send
1369     fn vm_maybe_send_dirty_pages<T>(
1370         vm: &mut Vm,
1371         socket: &mut T,
1372     ) -> result::Result<bool, MigratableError>
1373     where
1374         T: Read + Write,
1375     {
1376         // Send (dirty) memory table
1377         let table = vm.dirty_log()?;
1378 
1379         // But if there are no regions go straight to pause
1380         if table.regions().is_empty() {
1381             return Ok(false);
1382         }
1383 
1384         Request::memory(table.length()).write_to(socket).unwrap();
1385         table.write_to(socket)?;
1386         // And then the memory itself
1387         vm.send_memory_regions(&table, socket)?;
1388         let res = Response::read_from(socket)?;
1389         if res.status() != Status::Ok {
1390             warn!("Error during dirty memory migration");
1391             Request::abandon().write_to(socket)?;
1392             Response::read_from(socket).ok();
1393             return Err(MigratableError::MigrateSend(anyhow!(
1394                 "Error during dirty memory migration"
1395             )));
1396         }
1397 
1398         Ok(true)
1399     }
1400 
1401     fn send_migration(
1402         vm: &mut Vm,
1403         #[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc<
1404             dyn hypervisor::Hypervisor,
1405         >,
1406         send_data_migration: VmSendMigrationData,
1407     ) -> result::Result<(), MigratableError> {
1408         let path = Self::socket_url_to_path(&send_data_migration.destination_url)?;
1409         let mut socket = UnixStream::connect(&path).map_err(|e| {
1410             MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e))
1411         })?;
1412 
1413         // Start the migration
1414         Request::start().write_to(&mut socket)?;
1415         let res = Response::read_from(&mut socket)?;
1416         if res.status() != Status::Ok {
1417             warn!("Error starting migration");
1418             Request::abandon().write_to(&mut socket)?;
1419             Response::read_from(&mut socket).ok();
1420             return Err(MigratableError::MigrateSend(anyhow!(
1421                 "Error starting migration"
1422             )));
1423         }
1424 
1425         // Send config
1426         let vm_config = vm.get_config();
1427         #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
1428         let common_cpuid = {
1429             let phys_bits = vm::physical_bits(vm_config.lock().unwrap().cpus.max_phys_bits);
1430             arch::generate_common_cpuid(
1431                 hypervisor,
1432                 None,
1433                 None,
1434                 phys_bits,
1435                 vm_config.lock().unwrap().cpus.kvm_hyperv,
1436                 #[cfg(feature = "tdx")]
1437                 vm_config.lock().unwrap().is_tdx_enabled(),
1438             )
1439             .map_err(|e| {
1440                 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid': {:?}", e))
1441             })?
1442         };
1443 
1444         if send_data_migration.local {
1445             vm.send_memory_fds(&mut socket)?;
1446         }
1447 
1448         let vm_migration_config = VmMigrationConfig {
1449             vm_config,
1450             #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
1451             common_cpuid,
1452             memory_manager_data: vm.memory_manager_data(),
1453         };
1454         let config_data = serde_json::to_vec(&vm_migration_config).unwrap();
1455         Request::config(config_data.len() as u64).write_to(&mut socket)?;
1456         socket
1457             .write_all(&config_data)
1458             .map_err(MigratableError::MigrateSocket)?;
1459         let res = Response::read_from(&mut socket)?;
1460         if res.status() != Status::Ok {
1461             warn!("Error during config migration");
1462             Request::abandon().write_to(&mut socket)?;
1463             Response::read_from(&mut socket).ok();
1464             return Err(MigratableError::MigrateSend(anyhow!(
1465                 "Error during config migration"
1466             )));
1467         }
1468 
1469         // Let every Migratable object know about the migration being started.
1470         vm.start_migration()?;
1471 
1472         if send_data_migration.local {
1473             // Now pause VM
1474             vm.pause()?;
1475         } else {
1476             // Start logging dirty pages
1477             vm.start_dirty_log()?;
1478 
1479             // Send memory table
1480             let table = vm.memory_range_table()?;
1481             Request::memory(table.length())
1482                 .write_to(&mut socket)
1483                 .unwrap();
1484             table.write_to(&mut socket)?;
1485             // And then the memory itself
1486             vm.send_memory_regions(&table, &mut socket)?;
1487             let res = Response::read_from(&mut socket)?;
1488             if res.status() != Status::Ok {
1489                 warn!("Error during memory migration");
1490                 Request::abandon().write_to(&mut socket)?;
1491                 Response::read_from(&mut socket).ok();
1492                 return Err(MigratableError::MigrateSend(anyhow!(
1493                     "Error during memory migration"
1494                 )));
1495             }
1496 
1497             // Try at most 5 passes of dirty memory sending
1498             const MAX_DIRTY_MIGRATIONS: usize = 5;
1499             for i in 0..MAX_DIRTY_MIGRATIONS {
1500                 info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS);
1501                 if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? {
1502                     break;
1503                 }
1504             }
1505 
1506             // Now pause VM
1507             vm.pause()?;
1508 
1509             // Send last batch of dirty pages
1510             Self::vm_maybe_send_dirty_pages(vm, &mut socket)?;
1511 
1512             // Stop logging dirty pages
1513             vm.stop_dirty_log()?;
1514         }
1515         // Capture snapshot and send it
1516         let vm_snapshot = vm.snapshot()?;
1517         let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap();
1518         Request::state(snapshot_data.len() as u64).write_to(&mut socket)?;
1519         socket
1520             .write_all(&snapshot_data)
1521             .map_err(MigratableError::MigrateSocket)?;
1522         let res = Response::read_from(&mut socket)?;
1523         if res.status() != Status::Ok {
1524             warn!("Error during state migration");
1525             Request::abandon().write_to(&mut socket)?;
1526             Response::read_from(&mut socket).ok();
1527             return Err(MigratableError::MigrateSend(anyhow!(
1528                 "Error during state migration"
1529             )));
1530         }
1531 
1532         // Complete the migration
1533         Request::complete().write_to(&mut socket)?;
1534         let res = Response::read_from(&mut socket)?;
1535         if res.status() != Status::Ok {
1536             warn!("Error completing migration");
1537             Request::abandon().write_to(&mut socket)?;
1538             Response::read_from(&mut socket).ok();
1539             return Err(MigratableError::MigrateSend(anyhow!(
1540                 "Error completing migration"
1541             )));
1542         }
1543         info!("Migration complete");
1544 
1545         // Let every Migratable object know about the migration being complete
1546         vm.complete_migration()
1547     }
1548 
1549     fn vm_send_migration(
1550         &mut self,
1551         send_data_migration: VmSendMigrationData,
1552     ) -> result::Result<(), MigratableError> {
1553         info!(
1554             "Sending migration: destination_url = {}, local = {}",
1555             send_data_migration.destination_url, send_data_migration.local
1556         );
1557 
1558         if !self
1559             .vm_config
1560             .as_ref()
1561             .unwrap()
1562             .lock()
1563             .unwrap()
1564             .memory
1565             .shared
1566             && send_data_migration.local
1567         {
1568             return Err(MigratableError::MigrateSend(anyhow!(
1569                 "Local migration requires shared memory enabled"
1570             )));
1571         }
1572 
1573         if let Some(vm) = self.vm.as_mut() {
1574             Self::send_migration(
1575                 vm,
1576                 #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
1577                 self.hypervisor.clone(),
1578                 send_data_migration,
1579             )
1580             .map_err(|migration_err| {
1581                 error!("Migration failed: {:?}", migration_err);
1582 
1583                 // Stop logging dirty pages
1584                 if let Err(e) = vm.stop_dirty_log() {
1585                     return e;
1586                 }
1587 
1588                 if vm.get_state().unwrap() == VmState::Paused {
1589                     if let Err(e) = vm.resume() {
1590                         return e;
1591                     }
1592                 }
1593 
1594                 migration_err
1595             })?;
1596 
1597             // Shutdown the VM after the migration succeeded
1598             self.exit_evt.write(1).map_err(|e| {
1599                 MigratableError::MigrateSend(anyhow!(
1600                     "Failed shutting down the VM after migration: {:?}",
1601                     e
1602                 ))
1603             })
1604         } else {
1605             Err(MigratableError::MigrateSend(anyhow!("VM is not running")))
1606         }
1607     }
1608 
1609     #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
1610     fn vm_check_cpuid_compatibility(
1611         &self,
1612         src_vm_config: &Arc<Mutex<VmConfig>>,
1613         src_vm_cpuid: &[hypervisor::arch::x86::CpuIdEntry],
1614     ) -> result::Result<(), MigratableError> {
1615         // We check the `CPUID` compatibility of between the source vm and destination, which is
1616         // mostly about feature compatibility and "topology/sgx" leaves are not relevant.
1617         let dest_cpuid = &{
1618             let vm_config = &src_vm_config.lock().unwrap();
1619 
1620             let phys_bits = vm::physical_bits(vm_config.cpus.max_phys_bits);
1621             arch::generate_common_cpuid(
1622                 self.hypervisor.clone(),
1623                 None,
1624                 None,
1625                 phys_bits,
1626                 vm_config.cpus.kvm_hyperv,
1627                 #[cfg(feature = "tdx")]
1628                 vm_config.is_tdx_enabled(),
1629             )
1630             .map_err(|e| {
1631                 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e))
1632             })?
1633         };
1634         arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| {
1635             MigratableError::MigrateReceive(anyhow!(
1636                 "Error checking cpu feature compatibility': {:?}",
1637                 e
1638             ))
1639         })
1640     }
1641 
1642     fn control_loop(
1643         &mut self,
1644         api_receiver: Arc<Receiver<ApiRequest>>,
1645         #[cfg(feature = "guest_debug")] gdb_receiver: Arc<Receiver<gdb::GdbRequest>>,
1646     ) -> Result<()> {
1647         const EPOLL_EVENTS_LEN: usize = 100;
1648 
1649         let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN];
1650         let epoll_fd = self.epoll.as_raw_fd();
1651 
1652         'outer: loop {
1653             let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) {
1654                 Ok(res) => res,
1655                 Err(e) => {
1656                     if e.kind() == io::ErrorKind::Interrupted {
1657                         // It's well defined from the epoll_wait() syscall
1658                         // documentation that the epoll loop can be interrupted
1659                         // before any of the requested events occurred or the
1660                         // timeout expired. In both those cases, epoll_wait()
1661                         // returns an error of type EINTR, but this should not
1662                         // be considered as a regular error. Instead it is more
1663                         // appropriate to retry, by calling into epoll_wait().
1664                         continue;
1665                     }
1666                     return Err(Error::Epoll(e));
1667                 }
1668             };
1669 
1670             for event in events.iter().take(num_events) {
1671                 let dispatch_event: EpollDispatch = event.data.into();
1672                 match dispatch_event {
1673                     EpollDispatch::Unknown => {
1674                         let event = event.data;
1675                         warn!("Unknown VMM loop event: {}", event);
1676                     }
1677                     EpollDispatch::Exit => {
1678                         info!("VM exit event");
1679                         // Consume the event.
1680                         self.exit_evt.read().map_err(Error::EventFdRead)?;
1681                         self.vmm_shutdown().map_err(Error::VmmShutdown)?;
1682 
1683                         break 'outer;
1684                     }
1685                     EpollDispatch::Reset => {
1686                         info!("VM reset event");
1687                         // Consume the event.
1688                         self.reset_evt.read().map_err(Error::EventFdRead)?;
1689                         self.vm_reboot().map_err(Error::VmReboot)?;
1690                     }
1691                     EpollDispatch::ActivateVirtioDevices => {
1692                         if let Some(ref vm) = self.vm {
1693                             let count = self.activate_evt.read().map_err(Error::EventFdRead)?;
1694                             info!(
1695                                 "Trying to activate pending virtio devices: count = {}",
1696                                 count
1697                             );
1698                             vm.activate_virtio_devices()
1699                                 .map_err(Error::ActivateVirtioDevices)?;
1700                         }
1701                     }
1702                     EpollDispatch::Api => {
1703                         // Consume the events.
1704                         for _ in 0..self.api_evt.read().map_err(Error::EventFdRead)? {
1705                             // Read from the API receiver channel
1706                             let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?;
1707 
1708                             info!("API request event: {:?}", api_request);
1709                             match api_request {
1710                                 ApiRequest::VmCreate(config, sender) => {
1711                                     let response = self
1712                                         .vm_create(config)
1713                                         .map_err(ApiError::VmCreate)
1714                                         .map(|_| ApiResponsePayload::Empty);
1715 
1716                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1717                                 }
1718                                 ApiRequest::VmDelete(sender) => {
1719                                     let response = self
1720                                         .vm_delete()
1721                                         .map_err(ApiError::VmDelete)
1722                                         .map(|_| ApiResponsePayload::Empty);
1723 
1724                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1725                                 }
1726                                 ApiRequest::VmBoot(sender) => {
1727                                     let response = self
1728                                         .vm_boot()
1729                                         .map_err(ApiError::VmBoot)
1730                                         .map(|_| ApiResponsePayload::Empty);
1731 
1732                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1733                                 }
1734                                 ApiRequest::VmShutdown(sender) => {
1735                                     let response = self
1736                                         .vm_shutdown()
1737                                         .map_err(ApiError::VmShutdown)
1738                                         .map(|_| ApiResponsePayload::Empty);
1739 
1740                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1741                                 }
1742                                 ApiRequest::VmReboot(sender) => {
1743                                     let response = self
1744                                         .vm_reboot()
1745                                         .map_err(ApiError::VmReboot)
1746                                         .map(|_| ApiResponsePayload::Empty);
1747 
1748                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1749                                 }
1750                                 ApiRequest::VmInfo(sender) => {
1751                                     let response = self
1752                                         .vm_info()
1753                                         .map_err(ApiError::VmInfo)
1754                                         .map(ApiResponsePayload::VmInfo);
1755 
1756                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1757                                 }
1758                                 ApiRequest::VmmPing(sender) => {
1759                                     let response = ApiResponsePayload::VmmPing(self.vmm_ping());
1760 
1761                                     sender.send(Ok(response)).map_err(Error::ApiResponseSend)?;
1762                                 }
1763                                 ApiRequest::VmPause(sender) => {
1764                                     let response = self
1765                                         .vm_pause()
1766                                         .map_err(ApiError::VmPause)
1767                                         .map(|_| ApiResponsePayload::Empty);
1768 
1769                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1770                                 }
1771                                 ApiRequest::VmResume(sender) => {
1772                                     let response = self
1773                                         .vm_resume()
1774                                         .map_err(ApiError::VmResume)
1775                                         .map(|_| ApiResponsePayload::Empty);
1776 
1777                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1778                                 }
1779                                 ApiRequest::VmSnapshot(snapshot_data, sender) => {
1780                                     let response = self
1781                                         .vm_snapshot(&snapshot_data.destination_url)
1782                                         .map_err(ApiError::VmSnapshot)
1783                                         .map(|_| ApiResponsePayload::Empty);
1784 
1785                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1786                                 }
1787                                 ApiRequest::VmRestore(restore_data, sender) => {
1788                                     let response = self
1789                                         .vm_restore(restore_data.as_ref().clone())
1790                                         .map_err(ApiError::VmRestore)
1791                                         .map(|_| ApiResponsePayload::Empty);
1792 
1793                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1794                                 }
1795                                 #[cfg(feature = "guest_debug")]
1796                                 ApiRequest::VmCoredump(coredump_data, sender) => {
1797                                     let response = self
1798                                         .vm_coredump(&coredump_data.destination_url)
1799                                         .map_err(ApiError::VmCoredump)
1800                                         .map(|_| ApiResponsePayload::Empty);
1801 
1802                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1803                                 }
1804                                 ApiRequest::VmmShutdown(sender) => {
1805                                     let response = self
1806                                         .vmm_shutdown()
1807                                         .map_err(ApiError::VmmShutdown)
1808                                         .map(|_| ApiResponsePayload::Empty);
1809 
1810                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1811 
1812                                     break 'outer;
1813                                 }
1814                                 ApiRequest::VmResize(resize_data, sender) => {
1815                                     let response = self
1816                                         .vm_resize(
1817                                             resize_data.desired_vcpus,
1818                                             resize_data.desired_ram,
1819                                             resize_data.desired_balloon,
1820                                         )
1821                                         .map_err(ApiError::VmResize)
1822                                         .map(|_| ApiResponsePayload::Empty);
1823                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1824                                 }
1825                                 ApiRequest::VmResizeZone(resize_zone_data, sender) => {
1826                                     let response = self
1827                                         .vm_resize_zone(
1828                                             resize_zone_data.id.clone(),
1829                                             resize_zone_data.desired_ram,
1830                                         )
1831                                         .map_err(ApiError::VmResizeZone)
1832                                         .map(|_| ApiResponsePayload::Empty);
1833                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1834                                 }
1835                                 ApiRequest::VmAddDevice(add_device_data, sender) => {
1836                                     let response = self
1837                                         .vm_add_device(add_device_data.as_ref().clone())
1838                                         .map_err(ApiError::VmAddDevice)
1839                                         .map(ApiResponsePayload::VmAction);
1840                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1841                                 }
1842                                 ApiRequest::VmAddUserDevice(add_device_data, sender) => {
1843                                     let response = self
1844                                         .vm_add_user_device(add_device_data.as_ref().clone())
1845                                         .map_err(ApiError::VmAddUserDevice)
1846                                         .map(ApiResponsePayload::VmAction);
1847                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1848                                 }
1849                                 ApiRequest::VmRemoveDevice(remove_device_data, sender) => {
1850                                     let response = self
1851                                         .vm_remove_device(remove_device_data.id.clone())
1852                                         .map_err(ApiError::VmRemoveDevice)
1853                                         .map(|_| ApiResponsePayload::Empty);
1854                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1855                                 }
1856                                 ApiRequest::VmAddDisk(add_disk_data, sender) => {
1857                                     let response = self
1858                                         .vm_add_disk(add_disk_data.as_ref().clone())
1859                                         .map_err(ApiError::VmAddDisk)
1860                                         .map(ApiResponsePayload::VmAction);
1861                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1862                                 }
1863                                 ApiRequest::VmAddFs(add_fs_data, sender) => {
1864                                     let response = self
1865                                         .vm_add_fs(add_fs_data.as_ref().clone())
1866                                         .map_err(ApiError::VmAddFs)
1867                                         .map(ApiResponsePayload::VmAction);
1868                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1869                                 }
1870                                 ApiRequest::VmAddPmem(add_pmem_data, sender) => {
1871                                     let response = self
1872                                         .vm_add_pmem(add_pmem_data.as_ref().clone())
1873                                         .map_err(ApiError::VmAddPmem)
1874                                         .map(ApiResponsePayload::VmAction);
1875                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1876                                 }
1877                                 ApiRequest::VmAddNet(add_net_data, sender) => {
1878                                     let response = self
1879                                         .vm_add_net(add_net_data.as_ref().clone())
1880                                         .map_err(ApiError::VmAddNet)
1881                                         .map(ApiResponsePayload::VmAction);
1882                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1883                                 }
1884                                 ApiRequest::VmAddVdpa(add_vdpa_data, sender) => {
1885                                     let response = self
1886                                         .vm_add_vdpa(add_vdpa_data.as_ref().clone())
1887                                         .map_err(ApiError::VmAddVdpa)
1888                                         .map(ApiResponsePayload::VmAction);
1889                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1890                                 }
1891                                 ApiRequest::VmAddVsock(add_vsock_data, sender) => {
1892                                     let response = self
1893                                         .vm_add_vsock(add_vsock_data.as_ref().clone())
1894                                         .map_err(ApiError::VmAddVsock)
1895                                         .map(ApiResponsePayload::VmAction);
1896                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1897                                 }
1898                                 ApiRequest::VmCounters(sender) => {
1899                                     let response = self
1900                                         .vm_counters()
1901                                         .map_err(ApiError::VmInfo)
1902                                         .map(ApiResponsePayload::VmAction);
1903                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1904                                 }
1905                                 ApiRequest::VmReceiveMigration(receive_migration_data, sender) => {
1906                                     let response = self
1907                                         .vm_receive_migration(
1908                                             receive_migration_data.as_ref().clone(),
1909                                         )
1910                                         .map_err(ApiError::VmReceiveMigration)
1911                                         .map(|_| ApiResponsePayload::Empty);
1912                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1913                                 }
1914                                 ApiRequest::VmSendMigration(send_migration_data, sender) => {
1915                                     let response = self
1916                                         .vm_send_migration(send_migration_data.as_ref().clone())
1917                                         .map_err(ApiError::VmSendMigration)
1918                                         .map(|_| ApiResponsePayload::Empty);
1919                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1920                                 }
1921                                 ApiRequest::VmPowerButton(sender) => {
1922                                     let response = self
1923                                         .vm_power_button()
1924                                         .map_err(ApiError::VmPowerButton)
1925                                         .map(|_| ApiResponsePayload::Empty);
1926 
1927                                     sender.send(response).map_err(Error::ApiResponseSend)?;
1928                                 }
1929                             }
1930                         }
1931                     }
1932                     #[cfg(feature = "guest_debug")]
1933                     EpollDispatch::Debug => {
1934                         // Consume the events.
1935                         for _ in 0..self.debug_evt.read().map_err(Error::EventFdRead)? {
1936                             // Read from the API receiver channel
1937                             let gdb_request = gdb_receiver.recv().map_err(Error::GdbRequestRecv)?;
1938 
1939                             let response = if let Some(ref mut vm) = self.vm {
1940                                 vm.debug_request(&gdb_request.payload, gdb_request.cpu_id)
1941                             } else {
1942                                 Err(VmError::VmNotRunning)
1943                             }
1944                             .map_err(gdb::Error::Vm);
1945 
1946                             gdb_request
1947                                 .sender
1948                                 .send(response)
1949                                 .map_err(Error::GdbResponseSend)?;
1950                         }
1951                     }
1952                     #[cfg(not(feature = "guest_debug"))]
1953                     EpollDispatch::Debug => {}
1954                 }
1955             }
1956         }
1957 
1958         // Trigger the termination of the signal_handler thread
1959         if let Some(signals) = self.signals.take() {
1960             signals.close();
1961         }
1962 
1963         // Wait for all the threads to finish
1964         for thread in self.threads.drain(..) {
1965             thread.join().map_err(Error::ThreadCleanup)?
1966         }
1967 
1968         Ok(())
1969     }
1970 }
1971 
1972 const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager";
1973 const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager";
1974 const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager";
1975 
1976 #[cfg(test)]
1977 mod unit_tests {
1978     use super::*;
1979     use config::{
1980         CmdlineConfig, ConsoleConfig, ConsoleOutputMode, CpusConfig, HotplugMethod, MemoryConfig,
1981         PayloadConfig, RngConfig, VmConfig,
1982     };
1983 
1984     fn create_dummy_vmm() -> Vmm {
1985         Vmm::new(
1986             "dummy".to_string(),
1987             EventFd::new(EFD_NONBLOCK).unwrap(),
1988             #[cfg(feature = "guest_debug")]
1989             EventFd::new(EFD_NONBLOCK).unwrap(),
1990             #[cfg(feature = "guest_debug")]
1991             EventFd::new(EFD_NONBLOCK).unwrap(),
1992             SeccompAction::Allow,
1993             hypervisor::new().unwrap(),
1994             EventFd::new(EFD_NONBLOCK).unwrap(),
1995         )
1996         .unwrap()
1997     }
1998 
1999     fn create_dummy_vm_config() -> Arc<Mutex<VmConfig>> {
2000         Arc::new(Mutex::new(VmConfig {
2001             cpus: CpusConfig {
2002                 boot_vcpus: 1,
2003                 max_vcpus: 1,
2004                 topology: None,
2005                 kvm_hyperv: false,
2006                 max_phys_bits: 46,
2007                 affinity: None,
2008                 features: config::CpuFeatures::default(),
2009             },
2010             memory: MemoryConfig {
2011                 size: 536_870_912,
2012                 mergeable: false,
2013                 hotplug_method: HotplugMethod::Acpi,
2014                 hotplug_size: None,
2015                 hotplugged_size: None,
2016                 shared: true,
2017                 hugepages: false,
2018                 hugepage_size: None,
2019                 prefault: false,
2020                 zones: None,
2021             },
2022             kernel: None,
2023             cmdline: CmdlineConfig {
2024                 args: String::default(),
2025             },
2026             initramfs: None,
2027             payload: Some(PayloadConfig {
2028                 kernel: Some(PathBuf::from("/path/to/kernel")),
2029                 ..Default::default()
2030             }),
2031             disks: None,
2032             net: None,
2033             rng: RngConfig {
2034                 src: PathBuf::from("/dev/urandom"),
2035                 iommu: false,
2036             },
2037             balloon: None,
2038             fs: None,
2039             pmem: None,
2040             serial: ConsoleConfig {
2041                 file: None,
2042                 mode: ConsoleOutputMode::Null,
2043                 iommu: false,
2044             },
2045             console: ConsoleConfig {
2046                 file: None,
2047                 mode: ConsoleOutputMode::Tty,
2048                 iommu: false,
2049             },
2050             devices: None,
2051             user_devices: None,
2052             vdpa: None,
2053             vsock: None,
2054             iommu: false,
2055             #[cfg(target_arch = "x86_64")]
2056             sgx_epc: None,
2057             numa: None,
2058             watchdog: false,
2059             #[cfg(feature = "guest_debug")]
2060             gdb: false,
2061             platform: None,
2062         }))
2063     }
2064 
2065     #[test]
2066     fn test_vmm_vm_create() {
2067         let mut vmm = create_dummy_vmm();
2068         let config = create_dummy_vm_config();
2069 
2070         assert!(matches!(vmm.vm_create(config.clone()), Ok(())));
2071         assert!(matches!(
2072             vmm.vm_create(config),
2073             Err(VmError::VmAlreadyCreated)
2074         ));
2075     }
2076 
2077     #[test]
2078     fn test_vmm_vm_cold_add_device() {
2079         let mut vmm = create_dummy_vmm();
2080         let device_config = DeviceConfig::parse("path=/path/to/device").unwrap();
2081 
2082         assert!(matches!(
2083             vmm.vm_add_device(device_config.clone()),
2084             Err(VmError::VmNotCreated)
2085         ));
2086 
2087         let _ = vmm.vm_create(create_dummy_vm_config());
2088         assert!(vmm
2089             .vm_config
2090             .as_ref()
2091             .unwrap()
2092             .lock()
2093             .unwrap()
2094             .devices
2095             .is_none());
2096 
2097         let result = vmm.vm_add_device(device_config.clone());
2098         assert!(result.is_ok());
2099         assert!(result.unwrap().is_none());
2100         assert_eq!(
2101             vmm.vm_config
2102                 .as_ref()
2103                 .unwrap()
2104                 .lock()
2105                 .unwrap()
2106                 .devices
2107                 .clone()
2108                 .unwrap()
2109                 .len(),
2110             1
2111         );
2112         assert_eq!(
2113             vmm.vm_config
2114                 .as_ref()
2115                 .unwrap()
2116                 .lock()
2117                 .unwrap()
2118                 .devices
2119                 .clone()
2120                 .unwrap()[0],
2121             device_config
2122         );
2123     }
2124 
2125     #[test]
2126     fn test_vmm_vm_cold_add_user_device() {
2127         let mut vmm = create_dummy_vmm();
2128         let user_device_config =
2129             UserDeviceConfig::parse("socket=/path/to/socket,id=8,pci_segment=2").unwrap();
2130 
2131         assert!(matches!(
2132             vmm.vm_add_user_device(user_device_config.clone()),
2133             Err(VmError::VmNotCreated)
2134         ));
2135 
2136         let _ = vmm.vm_create(create_dummy_vm_config());
2137         assert!(vmm
2138             .vm_config
2139             .as_ref()
2140             .unwrap()
2141             .lock()
2142             .unwrap()
2143             .user_devices
2144             .is_none());
2145 
2146         let result = vmm.vm_add_user_device(user_device_config.clone());
2147         assert!(result.is_ok());
2148         assert!(result.unwrap().is_none());
2149         assert_eq!(
2150             vmm.vm_config
2151                 .as_ref()
2152                 .unwrap()
2153                 .lock()
2154                 .unwrap()
2155                 .user_devices
2156                 .clone()
2157                 .unwrap()
2158                 .len(),
2159             1
2160         );
2161         assert_eq!(
2162             vmm.vm_config
2163                 .as_ref()
2164                 .unwrap()
2165                 .lock()
2166                 .unwrap()
2167                 .user_devices
2168                 .clone()
2169                 .unwrap()[0],
2170             user_device_config
2171         );
2172     }
2173 
2174     #[test]
2175     fn test_vmm_vm_cold_add_disk() {
2176         let mut vmm = create_dummy_vmm();
2177         let disk_config = DiskConfig::parse("path=/path/to_file").unwrap();
2178 
2179         assert!(matches!(
2180             vmm.vm_add_disk(disk_config.clone()),
2181             Err(VmError::VmNotCreated)
2182         ));
2183 
2184         let _ = vmm.vm_create(create_dummy_vm_config());
2185         assert!(vmm
2186             .vm_config
2187             .as_ref()
2188             .unwrap()
2189             .lock()
2190             .unwrap()
2191             .disks
2192             .is_none());
2193 
2194         let result = vmm.vm_add_disk(disk_config.clone());
2195         assert!(result.is_ok());
2196         assert!(result.unwrap().is_none());
2197         assert_eq!(
2198             vmm.vm_config
2199                 .as_ref()
2200                 .unwrap()
2201                 .lock()
2202                 .unwrap()
2203                 .disks
2204                 .clone()
2205                 .unwrap()
2206                 .len(),
2207             1
2208         );
2209         assert_eq!(
2210             vmm.vm_config
2211                 .as_ref()
2212                 .unwrap()
2213                 .lock()
2214                 .unwrap()
2215                 .disks
2216                 .clone()
2217                 .unwrap()[0],
2218             disk_config
2219         );
2220     }
2221 
2222     #[test]
2223     fn test_vmm_vm_cold_add_fs() {
2224         let mut vmm = create_dummy_vmm();
2225         let fs_config = FsConfig::parse("tag=mytag,socket=/tmp/sock").unwrap();
2226 
2227         assert!(matches!(
2228             vmm.vm_add_fs(fs_config.clone()),
2229             Err(VmError::VmNotCreated)
2230         ));
2231 
2232         let _ = vmm.vm_create(create_dummy_vm_config());
2233         assert!(vmm.vm_config.as_ref().unwrap().lock().unwrap().fs.is_none());
2234 
2235         let result = vmm.vm_add_fs(fs_config.clone());
2236         assert!(result.is_ok());
2237         assert!(result.unwrap().is_none());
2238         assert_eq!(
2239             vmm.vm_config
2240                 .as_ref()
2241                 .unwrap()
2242                 .lock()
2243                 .unwrap()
2244                 .fs
2245                 .clone()
2246                 .unwrap()
2247                 .len(),
2248             1
2249         );
2250         assert_eq!(
2251             vmm.vm_config
2252                 .as_ref()
2253                 .unwrap()
2254                 .lock()
2255                 .unwrap()
2256                 .fs
2257                 .clone()
2258                 .unwrap()[0],
2259             fs_config
2260         );
2261     }
2262 
2263     #[test]
2264     fn test_vmm_vm_cold_add_pmem() {
2265         let mut vmm = create_dummy_vmm();
2266         let pmem_config = PmemConfig::parse("file=/tmp/pmem,size=128M").unwrap();
2267 
2268         assert!(matches!(
2269             vmm.vm_add_pmem(pmem_config.clone()),
2270             Err(VmError::VmNotCreated)
2271         ));
2272 
2273         let _ = vmm.vm_create(create_dummy_vm_config());
2274         assert!(vmm
2275             .vm_config
2276             .as_ref()
2277             .unwrap()
2278             .lock()
2279             .unwrap()
2280             .pmem
2281             .is_none());
2282 
2283         let result = vmm.vm_add_pmem(pmem_config.clone());
2284         assert!(result.is_ok());
2285         assert!(result.unwrap().is_none());
2286         assert_eq!(
2287             vmm.vm_config
2288                 .as_ref()
2289                 .unwrap()
2290                 .lock()
2291                 .unwrap()
2292                 .pmem
2293                 .clone()
2294                 .unwrap()
2295                 .len(),
2296             1
2297         );
2298         assert_eq!(
2299             vmm.vm_config
2300                 .as_ref()
2301                 .unwrap()
2302                 .lock()
2303                 .unwrap()
2304                 .pmem
2305                 .clone()
2306                 .unwrap()[0],
2307             pmem_config
2308         );
2309     }
2310 
2311     #[test]
2312     fn test_vmm_vm_cold_add_net() {
2313         let mut vmm = create_dummy_vmm();
2314         let net_config = NetConfig::parse(
2315             "mac=de:ad:be:ef:12:34,host_mac=12:34:de:ad:be:ef,vhost_user=true,socket=/tmp/sock",
2316         )
2317         .unwrap();
2318 
2319         assert!(matches!(
2320             vmm.vm_add_net(net_config.clone()),
2321             Err(VmError::VmNotCreated)
2322         ));
2323 
2324         let _ = vmm.vm_create(create_dummy_vm_config());
2325         assert!(vmm
2326             .vm_config
2327             .as_ref()
2328             .unwrap()
2329             .lock()
2330             .unwrap()
2331             .net
2332             .is_none());
2333 
2334         let result = vmm.vm_add_net(net_config.clone());
2335         assert!(result.is_ok());
2336         assert!(result.unwrap().is_none());
2337         assert_eq!(
2338             vmm.vm_config
2339                 .as_ref()
2340                 .unwrap()
2341                 .lock()
2342                 .unwrap()
2343                 .net
2344                 .clone()
2345                 .unwrap()
2346                 .len(),
2347             1
2348         );
2349         assert_eq!(
2350             vmm.vm_config
2351                 .as_ref()
2352                 .unwrap()
2353                 .lock()
2354                 .unwrap()
2355                 .net
2356                 .clone()
2357                 .unwrap()[0],
2358             net_config
2359         );
2360     }
2361 
2362     #[test]
2363     fn test_vmm_vm_cold_add_vdpa() {
2364         let mut vmm = create_dummy_vmm();
2365         let vdpa_config = VdpaConfig::parse("path=/dev/vhost-vdpa,num_queues=2").unwrap();
2366 
2367         assert!(matches!(
2368             vmm.vm_add_vdpa(vdpa_config.clone()),
2369             Err(VmError::VmNotCreated)
2370         ));
2371 
2372         let _ = vmm.vm_create(create_dummy_vm_config());
2373         assert!(vmm
2374             .vm_config
2375             .as_ref()
2376             .unwrap()
2377             .lock()
2378             .unwrap()
2379             .vdpa
2380             .is_none());
2381 
2382         let result = vmm.vm_add_vdpa(vdpa_config.clone());
2383         assert!(result.is_ok());
2384         assert!(result.unwrap().is_none());
2385         assert_eq!(
2386             vmm.vm_config
2387                 .as_ref()
2388                 .unwrap()
2389                 .lock()
2390                 .unwrap()
2391                 .vdpa
2392                 .clone()
2393                 .unwrap()
2394                 .len(),
2395             1
2396         );
2397         assert_eq!(
2398             vmm.vm_config
2399                 .as_ref()
2400                 .unwrap()
2401                 .lock()
2402                 .unwrap()
2403                 .vdpa
2404                 .clone()
2405                 .unwrap()[0],
2406             vdpa_config
2407         );
2408     }
2409 
2410     #[test]
2411     fn test_vmm_vm_cold_add_vsock() {
2412         let mut vmm = create_dummy_vmm();
2413         let vsock_config = VsockConfig::parse("socket=/tmp/sock,cid=1,iommu=on").unwrap();
2414 
2415         assert!(matches!(
2416             vmm.vm_add_vsock(vsock_config.clone()),
2417             Err(VmError::VmNotCreated)
2418         ));
2419 
2420         let _ = vmm.vm_create(create_dummy_vm_config());
2421         assert!(vmm
2422             .vm_config
2423             .as_ref()
2424             .unwrap()
2425             .lock()
2426             .unwrap()
2427             .vsock
2428             .is_none());
2429 
2430         let result = vmm.vm_add_vsock(vsock_config.clone());
2431         assert!(result.is_ok());
2432         assert!(result.unwrap().is_none());
2433         assert_eq!(
2434             vmm.vm_config
2435                 .as_ref()
2436                 .unwrap()
2437                 .lock()
2438                 .unwrap()
2439                 .vsock
2440                 .clone()
2441                 .unwrap(),
2442             vsock_config
2443         );
2444     }
2445 }
2446