xref: /cloud-hypervisor/vmm/src/lib.rs (revision 7d7bfb2034001d4cb15df2ddc56d2d350c8da30f)
1 // Copyright © 2019 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5 
6 #[macro_use]
7 extern crate event_monitor;
8 #[macro_use]
9 extern crate lazy_static;
10 #[macro_use]
11 extern crate log;
12 #[macro_use]
13 extern crate serde_derive;
14 
15 use crate::api::{
16     ApiError, ApiRequest, ApiResponse, ApiResponsePayload, VmInfo, VmReceiveMigrationData,
17     VmSendMigrationData, VmmPingResponse,
18 };
19 use crate::config::{
20     add_to_config, DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, RestoreConfig,
21     UserDeviceConfig, VdpaConfig, VmConfig, VsockConfig,
22 };
23 #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
24 use crate::migration::get_vm_snapshot;
25 use crate::migration::{recv_vm_config, recv_vm_state};
26 use crate::seccomp_filters::{get_seccomp_filter, Thread};
27 use crate::vm::{Error as VmError, Vm, VmState};
28 use anyhow::anyhow;
29 use libc::EFD_NONBLOCK;
30 use memory_manager::MemoryManagerSnapshotData;
31 use pci::PciBdf;
32 use seccompiler::{apply_filter, SeccompAction};
33 use serde::ser::{Serialize, SerializeStruct, Serializer};
34 use std::collections::HashMap;
35 use std::fs::File;
36 use std::io;
37 use std::io::{Read, Write};
38 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
39 use std::os::unix::net::UnixListener;
40 use std::os::unix::net::UnixStream;
41 use std::path::PathBuf;
42 use std::sync::mpsc::{Receiver, RecvError, SendError, Sender};
43 use std::sync::{Arc, Mutex};
44 use std::{result, thread};
45 use thiserror::Error;
46 use vm_memory::bitmap::AtomicBitmap;
47 use vm_migration::{protocol::*, Migratable};
48 use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
49 use vmm_sys_util::eventfd::EventFd;
50 use vmm_sys_util::sock_ctrl_msg::ScmSocket;
51 
52 mod acpi;
53 pub mod api;
54 mod clone3;
55 pub mod config;
56 pub mod cpu;
57 pub mod device_manager;
58 pub mod device_tree;
59 #[cfg(feature = "gdb")]
60 mod gdb;
61 pub mod interrupt;
62 pub mod memory_manager;
63 pub mod migration;
64 mod pci_segment;
65 pub mod seccomp_filters;
66 mod serial_buffer;
67 mod serial_manager;
68 mod sigwinch_listener;
69 pub mod vm;
70 
71 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
72 type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>;
73 
74 /// Errors associated with VMM management
75 #[derive(Debug, Error)]
76 #[allow(clippy::large_enum_variant)]
77 pub enum Error {
78     /// API request receive error
79     #[error("Error receiving API request: {0}")]
80     ApiRequestRecv(#[source] RecvError),
81 
82     /// API response send error
83     #[error("Error sending API request: {0}")]
84     ApiResponseSend(#[source] SendError<ApiResponse>),
85 
86     /// Cannot bind to the UNIX domain socket path
87     #[error("Error binding to UNIX domain socket: {0}")]
88     Bind(#[source] io::Error),
89 
90     /// Cannot clone EventFd.
91     #[error("Error cloning EventFd: {0}")]
92     EventFdClone(#[source] io::Error),
93 
94     /// Cannot create EventFd.
95     #[error("Error creating EventFd: {0}")]
96     EventFdCreate(#[source] io::Error),
97 
98     /// Cannot read from EventFd.
99     #[error("Error reading from EventFd: {0}")]
100     EventFdRead(#[source] io::Error),
101 
102     /// Cannot create epoll context.
103     #[error("Error creating epoll context: {0}")]
104     Epoll(#[source] io::Error),
105 
106     /// Cannot create HTTP thread
107     #[error("Error spawning HTTP thread: {0}")]
108     HttpThreadSpawn(#[source] io::Error),
109 
110     /// Cannot handle the VM STDIN stream
111     #[error("Error handling VM stdin: {0:?}")]
112     Stdin(VmError),
113 
114     /// Cannot handle the VM pty stream
115     #[error("Error handling VM pty: {0:?}")]
116     Pty(VmError),
117 
118     /// Cannot reboot the VM
119     #[error("Error rebooting VM: {0:?}")]
120     VmReboot(VmError),
121 
122     /// Cannot create VMM thread
123     #[error("Error spawning VMM thread {0:?}")]
124     VmmThreadSpawn(#[source] io::Error),
125 
126     /// Cannot shut the VMM down
127     #[error("Error shutting down VMM: {0:?}")]
128     VmmShutdown(VmError),
129 
130     /// Cannot create seccomp filter
131     #[error("Error creating seccomp filter: {0}")]
132     CreateSeccompFilter(seccompiler::Error),
133 
134     /// Cannot apply seccomp filter
135     #[error("Error applying seccomp filter: {0}")]
136     ApplySeccompFilter(seccompiler::Error),
137 
138     /// Error activating virtio devices
139     #[error("Error activating virtio devices: {0:?}")]
140     ActivateVirtioDevices(VmError),
141 
142     /// Error creating API server
143     #[error("Error creating API server {0:?}")]
144     CreateApiServer(micro_http::ServerError),
145 
146     /// Error binding API server socket
147     #[error("Error creation API server's socket {0:?}")]
148     CreateApiServerSocket(#[source] io::Error),
149 
150     #[cfg(feature = "gdb")]
151     #[error("Failed to start the GDB thread: {0}")]
152     GdbThreadSpawn(io::Error),
153 
154     /// GDB request receive error
155     #[cfg(feature = "gdb")]
156     #[error("Error receiving GDB request: {0}")]
157     GdbRequestRecv(#[source] RecvError),
158 
159     /// GDB response send error
160     #[cfg(feature = "gdb")]
161     #[error("Error sending GDB request: {0}")]
162     GdbResponseSend(#[source] SendError<gdb::GdbResponse>),
163 }
164 pub type Result<T> = result::Result<T, Error>;
165 
166 #[derive(Debug, Clone, Copy, PartialEq)]
167 #[repr(u64)]
168 pub enum EpollDispatch {
169     Exit = 0,
170     Reset = 1,
171     Api = 2,
172     ActivateVirtioDevices = 3,
173     Debug = 4,
174     Unknown,
175 }
176 
177 impl From<u64> for EpollDispatch {
178     fn from(v: u64) -> Self {
179         use EpollDispatch::*;
180         match v {
181             0 => Exit,
182             1 => Reset,
183             2 => Api,
184             3 => ActivateVirtioDevices,
185             4 => Debug,
186             _ => Unknown,
187         }
188     }
189 }
190 
191 pub struct EpollContext {
192     epoll_file: File,
193 }
194 
195 impl EpollContext {
196     pub fn new() -> result::Result<EpollContext, io::Error> {
197         let epoll_fd = epoll::create(true)?;
198         // Use 'File' to enforce closing on 'epoll_fd'
199         // SAFETY: the epoll_fd returned by epoll::create is valid and owned by us.
200         let epoll_file = unsafe { File::from_raw_fd(epoll_fd) };
201 
202         Ok(EpollContext { epoll_file })
203     }
204 
205     fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error>
206     where
207         T: AsRawFd,
208     {
209         let dispatch_index = token as u64;
210         epoll::ctl(
211             self.epoll_file.as_raw_fd(),
212             epoll::ControlOptions::EPOLL_CTL_ADD,
213             fd.as_raw_fd(),
214             epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index),
215         )?;
216 
217         Ok(())
218     }
219 }
220 
221 impl AsRawFd for EpollContext {
222     fn as_raw_fd(&self) -> RawFd {
223         self.epoll_file.as_raw_fd()
224     }
225 }
226 
227 pub struct PciDeviceInfo {
228     pub id: String,
229     pub bdf: PciBdf,
230 }
231 
232 impl Serialize for PciDeviceInfo {
233     fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
234     where
235         S: Serializer,
236     {
237         let bdf_str = self.bdf.to_string();
238 
239         // Serialize the structure.
240         let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?;
241         state.serialize_field("id", &self.id)?;
242         state.serialize_field("bdf", &bdf_str)?;
243         state.end()
244     }
245 }
246 
247 #[allow(unused_variables)]
248 #[allow(clippy::too_many_arguments)]
249 pub fn start_vmm_thread(
250     vmm_version: String,
251     http_path: &Option<String>,
252     http_fd: Option<RawFd>,
253     api_event: EventFd,
254     api_sender: Sender<ApiRequest>,
255     api_receiver: Receiver<ApiRequest>,
256     #[cfg(feature = "gdb")] debug_path: Option<PathBuf>,
257     #[cfg(feature = "gdb")] debug_event: EventFd,
258     #[cfg(feature = "gdb")] vm_debug_event: EventFd,
259     seccomp_action: &SeccompAction,
260     hypervisor: Arc<dyn hypervisor::Hypervisor>,
261 ) -> Result<thread::JoinHandle<Result<()>>> {
262     #[cfg(feature = "gdb")]
263     let (gdb_sender, gdb_receiver) = std::sync::mpsc::channel();
264     #[cfg(feature = "gdb")]
265     let gdb_debug_event = debug_event.try_clone().map_err(Error::EventFdClone)?;
266     #[cfg(feature = "gdb")]
267     let gdb_vm_debug_event = vm_debug_event.try_clone().map_err(Error::EventFdClone)?;
268 
269     let http_api_event = api_event.try_clone().map_err(Error::EventFdClone)?;
270 
271     // Retrieve seccomp filter
272     let vmm_seccomp_filter =
273         get_seccomp_filter(seccomp_action, Thread::Vmm).map_err(Error::CreateSeccompFilter)?;
274 
275     let vmm_seccomp_action = seccomp_action.clone();
276     let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
277     let thread = {
278         let exit_evt = exit_evt.try_clone().map_err(Error::EventFdClone)?;
279         thread::Builder::new()
280             .name("vmm".to_string())
281             .spawn(move || {
282                 // Apply seccomp filter for VMM thread.
283                 if !vmm_seccomp_filter.is_empty() {
284                     apply_filter(&vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?;
285                 }
286 
287                 let mut vmm = Vmm::new(
288                     vmm_version.to_string(),
289                     api_event,
290                     #[cfg(feature = "gdb")]
291                     debug_event,
292                     #[cfg(feature = "gdb")]
293                     vm_debug_event,
294                     vmm_seccomp_action,
295                     hypervisor,
296                     exit_evt,
297                 )?;
298 
299                 vmm.control_loop(
300                     Arc::new(api_receiver),
301                     #[cfg(feature = "gdb")]
302                     Arc::new(gdb_receiver),
303                 )
304             })
305             .map_err(Error::VmmThreadSpawn)?
306     };
307 
308     // The VMM thread is started, we can start serving HTTP requests
309     if let Some(http_path) = http_path {
310         api::start_http_path_thread(
311             http_path,
312             http_api_event,
313             api_sender,
314             seccomp_action,
315             exit_evt,
316         )?;
317     } else if let Some(http_fd) = http_fd {
318         api::start_http_fd_thread(
319             http_fd,
320             http_api_event,
321             api_sender,
322             seccomp_action,
323             exit_evt,
324         )?;
325     }
326 
327     #[cfg(feature = "gdb")]
328     if let Some(debug_path) = debug_path {
329         let target = gdb::GdbStub::new(gdb_sender, gdb_debug_event, gdb_vm_debug_event);
330         thread::Builder::new()
331             .name("gdb".to_owned())
332             .spawn(move || gdb::gdb_thread(target, &debug_path))
333             .map_err(Error::GdbThreadSpawn)?;
334     }
335 
336     Ok(thread)
337 }
338 
339 #[derive(Clone, Deserialize, Serialize)]
340 struct VmMigrationConfig {
341     vm_config: Arc<Mutex<VmConfig>>,
342     #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
343     common_cpuid: hypervisor::CpuId,
344     memory_manager_data: MemoryManagerSnapshotData,
345 }
346 
347 pub struct Vmm {
348     epoll: EpollContext,
349     exit_evt: EventFd,
350     reset_evt: EventFd,
351     api_evt: EventFd,
352     #[cfg(feature = "gdb")]
353     debug_evt: EventFd,
354     #[cfg(feature = "gdb")]
355     vm_debug_evt: EventFd,
356     version: String,
357     vm: Option<Vm>,
358     vm_config: Option<Arc<Mutex<VmConfig>>>,
359     seccomp_action: SeccompAction,
360     hypervisor: Arc<dyn hypervisor::Hypervisor>,
361     activate_evt: EventFd,
362 }
363 
364 impl Vmm {
365     fn new(
366         vmm_version: String,
367         api_evt: EventFd,
368         #[cfg(feature = "gdb")] debug_evt: EventFd,
369         #[cfg(feature = "gdb")] vm_debug_evt: EventFd,
370         seccomp_action: SeccompAction,
371         hypervisor: Arc<dyn hypervisor::Hypervisor>,
372         exit_evt: EventFd,
373     ) -> Result<Self> {
374         let mut epoll = EpollContext::new().map_err(Error::Epoll)?;
375         let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
376         let activate_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
377 
378         epoll
379             .add_event(&exit_evt, EpollDispatch::Exit)
380             .map_err(Error::Epoll)?;
381 
382         epoll
383             .add_event(&reset_evt, EpollDispatch::Reset)
384             .map_err(Error::Epoll)?;
385 
386         epoll
387             .add_event(&activate_evt, EpollDispatch::ActivateVirtioDevices)
388             .map_err(Error::Epoll)?;
389 
390         epoll
391             .add_event(&api_evt, EpollDispatch::Api)
392             .map_err(Error::Epoll)?;
393 
394         #[cfg(feature = "gdb")]
395         epoll
396             .add_event(&debug_evt, EpollDispatch::Debug)
397             .map_err(Error::Epoll)?;
398 
399         Ok(Vmm {
400             epoll,
401             exit_evt,
402             reset_evt,
403             api_evt,
404             #[cfg(feature = "gdb")]
405             debug_evt,
406             #[cfg(feature = "gdb")]
407             vm_debug_evt,
408             version: vmm_version,
409             vm: None,
410             vm_config: None,
411             seccomp_action,
412             hypervisor,
413             activate_evt,
414         })
415     }
416 
417     fn vm_create(&mut self, config: Arc<Mutex<VmConfig>>) -> result::Result<(), VmError> {
418         // We only store the passed VM config.
419         // The VM will be created when being asked to boot it.
420         if self.vm_config.is_none() {
421             self.vm_config = Some(config);
422             Ok(())
423         } else {
424             Err(VmError::VmAlreadyCreated)
425         }
426     }
427 
428     fn vm_boot(&mut self) -> result::Result<(), VmError> {
429         // If we don't have a config, we can not boot a VM.
430         if self.vm_config.is_none() {
431             return Err(VmError::VmMissingConfig);
432         };
433 
434         // Create a new VM if we don't have one yet.
435         if self.vm.is_none() {
436             let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
437             let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
438             #[cfg(feature = "gdb")]
439             let vm_debug_evt = self
440                 .vm_debug_evt
441                 .try_clone()
442                 .map_err(VmError::EventFdClone)?;
443             let activate_evt = self
444                 .activate_evt
445                 .try_clone()
446                 .map_err(VmError::EventFdClone)?;
447 
448             if let Some(ref vm_config) = self.vm_config {
449                 let vm = Vm::new(
450                     Arc::clone(vm_config),
451                     exit_evt,
452                     reset_evt,
453                     #[cfg(feature = "gdb")]
454                     vm_debug_evt,
455                     &self.seccomp_action,
456                     self.hypervisor.clone(),
457                     activate_evt,
458                     None,
459                     None,
460                     None,
461                 )?;
462 
463                 self.vm = Some(vm);
464             }
465         }
466 
467         // Now we can boot the VM.
468         if let Some(ref mut vm) = self.vm {
469             vm.boot()
470         } else {
471             Err(VmError::VmNotCreated)
472         }
473     }
474 
475     fn vm_pause(&mut self) -> result::Result<(), VmError> {
476         if let Some(ref mut vm) = self.vm {
477             vm.pause().map_err(VmError::Pause)
478         } else {
479             Err(VmError::VmNotRunning)
480         }
481     }
482 
483     fn vm_resume(&mut self) -> result::Result<(), VmError> {
484         if let Some(ref mut vm) = self.vm {
485             vm.resume().map_err(VmError::Resume)
486         } else {
487             Err(VmError::VmNotRunning)
488         }
489     }
490 
491     fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> {
492         if let Some(ref mut vm) = self.vm {
493             vm.snapshot()
494                 .map_err(VmError::Snapshot)
495                 .and_then(|snapshot| {
496                     vm.send(&snapshot, destination_url)
497                         .map_err(VmError::SnapshotSend)
498                 })
499         } else {
500             Err(VmError::VmNotRunning)
501         }
502     }
503 
504     fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> {
505         if self.vm.is_some() || self.vm_config.is_some() {
506             return Err(VmError::VmAlreadyCreated);
507         }
508 
509         let source_url = restore_cfg.source_url.as_path().to_str();
510         if source_url.is_none() {
511             return Err(VmError::RestoreSourceUrlPathToStr);
512         }
513         // Safe to unwrap as we checked it was Some(&str).
514         let source_url = source_url.unwrap();
515 
516         let vm_config = Arc::new(Mutex::new(
517             recv_vm_config(source_url).map_err(VmError::Restore)?,
518         ));
519         let snapshot = recv_vm_state(source_url).map_err(VmError::Restore)?;
520         #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
521         let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?;
522 
523         #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
524         self.vm_check_cpuid_compatibility(&vm_config, &vm_snapshot.common_cpuid)
525             .map_err(VmError::Restore)?;
526 
527         self.vm_config = Some(Arc::clone(&vm_config));
528 
529         let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
530         let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
531         #[cfg(feature = "gdb")]
532         let debug_evt = self
533             .vm_debug_evt
534             .try_clone()
535             .map_err(VmError::EventFdClone)?;
536         let activate_evt = self
537             .activate_evt
538             .try_clone()
539             .map_err(VmError::EventFdClone)?;
540 
541         let vm = Vm::new_from_snapshot(
542             &snapshot,
543             vm_config,
544             exit_evt,
545             reset_evt,
546             #[cfg(feature = "gdb")]
547             debug_evt,
548             Some(source_url),
549             restore_cfg.prefault,
550             &self.seccomp_action,
551             self.hypervisor.clone(),
552             activate_evt,
553         )?;
554         self.vm = Some(vm);
555 
556         // Now we can restore the rest of the VM.
557         if let Some(ref mut vm) = self.vm {
558             vm.restore(snapshot).map_err(VmError::Restore)
559         } else {
560             Err(VmError::VmNotCreated)
561         }
562     }
563 
564     fn vm_shutdown(&mut self) -> result::Result<(), VmError> {
565         if let Some(ref mut vm) = self.vm.take() {
566             vm.shutdown()
567         } else {
568             Err(VmError::VmNotRunning)
569         }
570     }
571 
572     fn vm_reboot(&mut self) -> result::Result<(), VmError> {
573         // First we stop the current VM
574         let (config, serial_pty, console_pty, console_resize_pipe) =
575             if let Some(mut vm) = self.vm.take() {
576                 let config = vm.get_config();
577                 let serial_pty = vm.serial_pty();
578                 let console_pty = vm.console_pty();
579                 let console_resize_pipe = vm
580                     .console_resize_pipe()
581                     .as_ref()
582                     .map(|pipe| pipe.try_clone().unwrap());
583                 vm.shutdown()?;
584                 (config, serial_pty, console_pty, console_resize_pipe)
585             } else {
586                 return Err(VmError::VmNotCreated);
587             };
588 
589         let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
590         let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
591         #[cfg(feature = "gdb")]
592         let debug_evt = self
593             .vm_debug_evt
594             .try_clone()
595             .map_err(VmError::EventFdClone)?;
596         let activate_evt = self
597             .activate_evt
598             .try_clone()
599             .map_err(VmError::EventFdClone)?;
600 
601         // The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be
602         // an event sitting in the shared reset_evt. Without doing this we get very early reboots
603         // during the boot process.
604         if self.reset_evt.read().is_ok() {
605             warn!("Spurious second reset event received. Ignoring.");
606         }
607 
608         // Then we create the new VM
609         let mut vm = Vm::new(
610             config,
611             exit_evt,
612             reset_evt,
613             #[cfg(feature = "gdb")]
614             debug_evt,
615             &self.seccomp_action,
616             self.hypervisor.clone(),
617             activate_evt,
618             serial_pty,
619             console_pty,
620             console_resize_pipe,
621         )?;
622 
623         // And we boot it
624         vm.boot()?;
625 
626         self.vm = Some(vm);
627 
628         Ok(())
629     }
630 
631     fn vm_info(&self) -> result::Result<VmInfo, VmError> {
632         match &self.vm_config {
633             Some(config) => {
634                 let state = match &self.vm {
635                     Some(vm) => vm.get_state()?,
636                     None => VmState::Created,
637                 };
638 
639                 let config = Arc::clone(config);
640 
641                 let mut memory_actual_size = config.lock().unwrap().memory.total_size();
642                 if let Some(vm) = &self.vm {
643                     memory_actual_size -= vm.balloon_size();
644                 }
645 
646                 let device_tree = self.vm.as_ref().map(|vm| vm.device_tree());
647 
648                 Ok(VmInfo {
649                     config,
650                     state,
651                     memory_actual_size,
652                     device_tree,
653                 })
654             }
655             None => Err(VmError::VmNotCreated),
656         }
657     }
658 
659     fn vmm_ping(&self) -> VmmPingResponse {
660         VmmPingResponse {
661             version: self.version.clone(),
662         }
663     }
664 
665     fn vm_delete(&mut self) -> result::Result<(), VmError> {
666         if self.vm_config.is_none() {
667             return Ok(());
668         }
669 
670         // If a VM is booted, we first try to shut it down.
671         if self.vm.is_some() {
672             self.vm_shutdown()?;
673         }
674 
675         self.vm_config = None;
676 
677         event!("vm", "deleted");
678 
679         Ok(())
680     }
681 
682     fn vmm_shutdown(&mut self) -> result::Result<(), VmError> {
683         self.vm_delete()?;
684         event!("vmm", "shutdown");
685         Ok(())
686     }
687 
688     fn vm_resize(
689         &mut self,
690         desired_vcpus: Option<u8>,
691         desired_ram: Option<u64>,
692         desired_balloon: Option<u64>,
693     ) -> result::Result<(), VmError> {
694         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
695 
696         if let Some(ref mut vm) = self.vm {
697             if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) {
698                 error!("Error when resizing VM: {:?}", e);
699                 Err(e)
700             } else {
701                 Ok(())
702             }
703         } else {
704             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
705             if let Some(desired_vcpus) = desired_vcpus {
706                 config.cpus.boot_vcpus = desired_vcpus;
707             }
708             if let Some(desired_ram) = desired_ram {
709                 config.memory.size = desired_ram;
710             }
711             if let Some(desired_balloon) = desired_balloon {
712                 if let Some(balloon_config) = &mut config.balloon {
713                     balloon_config.size = desired_balloon;
714                 }
715             }
716             Ok(())
717         }
718     }
719 
720     fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> {
721         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
722 
723         if let Some(ref mut vm) = self.vm {
724             if let Err(e) = vm.resize_zone(id, desired_ram) {
725                 error!("Error when resizing VM: {:?}", e);
726                 Err(e)
727             } else {
728                 Ok(())
729             }
730         } else {
731             // Update VmConfig by setting the new desired ram.
732             let memory_config = &mut self.vm_config.as_ref().unwrap().lock().unwrap().memory;
733 
734             if let Some(zones) = &mut memory_config.zones {
735                 for zone in zones.iter_mut() {
736                     if zone.id == id {
737                         zone.size = desired_ram;
738                         return Ok(());
739                     }
740                 }
741             }
742 
743             error!("Could not find the memory zone {} for the resize", id);
744             Err(VmError::ResizeZone)
745         }
746     }
747 
748     fn vm_add_device(
749         &mut self,
750         device_cfg: DeviceConfig,
751     ) -> result::Result<Option<Vec<u8>>, VmError> {
752         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
753 
754         {
755             // Validate the configuration change in a cloned configuration
756             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
757             add_to_config(&mut config.devices, device_cfg.clone());
758             config.validate().map_err(VmError::ConfigValidation)?;
759         }
760 
761         if let Some(ref mut vm) = self.vm {
762             let info = vm.add_device(device_cfg).map_err(|e| {
763                 error!("Error when adding new device to the VM: {:?}", e);
764                 e
765             })?;
766             serde_json::to_vec(&info)
767                 .map(Some)
768                 .map_err(VmError::SerializeJson)
769         } else {
770             // Update VmConfig by adding the new device.
771             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
772             add_to_config(&mut config.devices, device_cfg);
773             Ok(None)
774         }
775     }
776 
777     fn vm_add_user_device(
778         &mut self,
779         device_cfg: UserDeviceConfig,
780     ) -> result::Result<Option<Vec<u8>>, VmError> {
781         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
782 
783         {
784             // Validate the configuration change in a cloned configuration
785             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
786             add_to_config(&mut config.user_devices, device_cfg.clone());
787             config.validate().map_err(VmError::ConfigValidation)?;
788         }
789 
790         if let Some(ref mut vm) = self.vm {
791             let info = vm.add_user_device(device_cfg).map_err(|e| {
792                 error!("Error when adding new user device to the VM: {:?}", e);
793                 e
794             })?;
795             serde_json::to_vec(&info)
796                 .map(Some)
797                 .map_err(VmError::SerializeJson)
798         } else {
799             // Update VmConfig by adding the new device.
800             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
801             add_to_config(&mut config.user_devices, device_cfg);
802             Ok(None)
803         }
804     }
805 
806     fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> {
807         if let Some(ref mut vm) = self.vm {
808             if let Err(e) = vm.remove_device(id) {
809                 error!("Error when removing new device to the VM: {:?}", e);
810                 Err(e)
811             } else {
812                 Ok(())
813             }
814         } else {
815             Err(VmError::VmNotRunning)
816         }
817     }
818 
819     fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Option<Vec<u8>>, VmError> {
820         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
821 
822         {
823             // Validate the configuration change in a cloned configuration
824             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
825             add_to_config(&mut config.disks, disk_cfg.clone());
826             config.validate().map_err(VmError::ConfigValidation)?;
827         }
828 
829         if let Some(ref mut vm) = self.vm {
830             let info = vm.add_disk(disk_cfg).map_err(|e| {
831                 error!("Error when adding new disk to the VM: {:?}", e);
832                 e
833             })?;
834             serde_json::to_vec(&info)
835                 .map(Some)
836                 .map_err(VmError::SerializeJson)
837         } else {
838             // Update VmConfig by adding the new device.
839             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
840             add_to_config(&mut config.disks, disk_cfg);
841             Ok(None)
842         }
843     }
844 
845     fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Option<Vec<u8>>, VmError> {
846         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
847 
848         {
849             // Validate the configuration change in a cloned configuration
850             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
851             add_to_config(&mut config.fs, fs_cfg.clone());
852             config.validate().map_err(VmError::ConfigValidation)?;
853         }
854 
855         if let Some(ref mut vm) = self.vm {
856             let info = vm.add_fs(fs_cfg).map_err(|e| {
857                 error!("Error when adding new fs to the VM: {:?}", e);
858                 e
859             })?;
860             serde_json::to_vec(&info)
861                 .map(Some)
862                 .map_err(VmError::SerializeJson)
863         } else {
864             // Update VmConfig by adding the new device.
865             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
866             add_to_config(&mut config.fs, fs_cfg);
867             Ok(None)
868         }
869     }
870 
871     fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Option<Vec<u8>>, VmError> {
872         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
873 
874         {
875             // Validate the configuration change in a cloned configuration
876             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
877             add_to_config(&mut config.pmem, pmem_cfg.clone());
878             config.validate().map_err(VmError::ConfigValidation)?;
879         }
880 
881         if let Some(ref mut vm) = self.vm {
882             let info = vm.add_pmem(pmem_cfg).map_err(|e| {
883                 error!("Error when adding new pmem device to the VM: {:?}", e);
884                 e
885             })?;
886             serde_json::to_vec(&info)
887                 .map(Some)
888                 .map_err(VmError::SerializeJson)
889         } else {
890             // Update VmConfig by adding the new device.
891             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
892             add_to_config(&mut config.pmem, pmem_cfg);
893             Ok(None)
894         }
895     }
896 
897     fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Option<Vec<u8>>, VmError> {
898         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
899 
900         {
901             // Validate the configuration change in a cloned configuration
902             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
903             add_to_config(&mut config.net, net_cfg.clone());
904             config.validate().map_err(VmError::ConfigValidation)?;
905         }
906 
907         if let Some(ref mut vm) = self.vm {
908             let info = vm.add_net(net_cfg).map_err(|e| {
909                 error!("Error when adding new network device to the VM: {:?}", e);
910                 e
911             })?;
912             serde_json::to_vec(&info)
913                 .map(Some)
914                 .map_err(VmError::SerializeJson)
915         } else {
916             // Update VmConfig by adding the new device.
917             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
918             add_to_config(&mut config.net, net_cfg);
919             Ok(None)
920         }
921     }
922 
923     fn vm_add_vdpa(&mut self, vdpa_cfg: VdpaConfig) -> result::Result<Option<Vec<u8>>, VmError> {
924         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
925 
926         {
927             // Validate the configuration change in a cloned configuration
928             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
929             add_to_config(&mut config.vdpa, vdpa_cfg.clone());
930             config.validate().map_err(VmError::ConfigValidation)?;
931         }
932 
933         if let Some(ref mut vm) = self.vm {
934             let info = vm.add_vdpa(vdpa_cfg).map_err(|e| {
935                 error!("Error when adding new vDPA device to the VM: {:?}", e);
936                 e
937             })?;
938             serde_json::to_vec(&info)
939                 .map(Some)
940                 .map_err(VmError::SerializeJson)
941         } else {
942             // Update VmConfig by adding the new device.
943             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
944             add_to_config(&mut config.vdpa, vdpa_cfg);
945             Ok(None)
946         }
947     }
948 
949     fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Option<Vec<u8>>, VmError> {
950         self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
951 
952         {
953             // Validate the configuration change in a cloned configuration
954             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
955 
956             if config.vsock.is_some() {
957                 return Err(VmError::TooManyVsockDevices);
958             }
959 
960             config.vsock = Some(vsock_cfg.clone());
961             config.validate().map_err(VmError::ConfigValidation)?;
962         }
963 
964         if let Some(ref mut vm) = self.vm {
965             let info = vm.add_vsock(vsock_cfg).map_err(|e| {
966                 error!("Error when adding new vsock device to the VM: {:?}", e);
967                 e
968             })?;
969             serde_json::to_vec(&info)
970                 .map(Some)
971                 .map_err(VmError::SerializeJson)
972         } else {
973             // Update VmConfig by adding the new device.
974             let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
975             config.vsock = Some(vsock_cfg);
976             Ok(None)
977         }
978     }
979 
980     fn vm_counters(&mut self) -> result::Result<Option<Vec<u8>>, VmError> {
981         if let Some(ref mut vm) = self.vm {
982             let info = vm.counters().map_err(|e| {
983                 error!("Error when getting counters from the VM: {:?}", e);
984                 e
985             })?;
986             serde_json::to_vec(&info)
987                 .map(Some)
988                 .map_err(VmError::SerializeJson)
989         } else {
990             Err(VmError::VmNotRunning)
991         }
992     }
993 
994     fn vm_power_button(&mut self) -> result::Result<(), VmError> {
995         if let Some(ref mut vm) = self.vm {
996             vm.power_button()
997         } else {
998             Err(VmError::VmNotRunning)
999         }
1000     }
1001 
1002     fn vm_receive_config<T>(
1003         &mut self,
1004         req: &Request,
1005         socket: &mut T,
1006         existing_memory_files: Option<HashMap<u32, File>>,
1007     ) -> std::result::Result<Vm, MigratableError>
1008     where
1009         T: Read + Write,
1010     {
1011         // Read in config data along with memory manager data
1012         let mut data: Vec<u8> = Vec::new();
1013         data.resize_with(req.length() as usize, Default::default);
1014         socket
1015             .read_exact(&mut data)
1016             .map_err(MigratableError::MigrateSocket)?;
1017 
1018         let vm_migration_config: VmMigrationConfig =
1019             serde_json::from_slice(&data).map_err(|e| {
1020                 MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e))
1021             })?;
1022 
1023         #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
1024         self.vm_check_cpuid_compatibility(
1025             &vm_migration_config.vm_config,
1026             &vm_migration_config.common_cpuid,
1027         )?;
1028 
1029         let exit_evt = self.exit_evt.try_clone().map_err(|e| {
1030             MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e))
1031         })?;
1032         let reset_evt = self.reset_evt.try_clone().map_err(|e| {
1033             MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e))
1034         })?;
1035         #[cfg(feature = "gdb")]
1036         let debug_evt = self.vm_debug_evt.try_clone().map_err(|e| {
1037             MigratableError::MigrateReceive(anyhow!("Error cloning debug EventFd: {}", e))
1038         })?;
1039         let activate_evt = self.activate_evt.try_clone().map_err(|e| {
1040             MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e))
1041         })?;
1042 
1043         self.vm_config = Some(vm_migration_config.vm_config);
1044         let vm = Vm::new_from_migration(
1045             self.vm_config.clone().unwrap(),
1046             exit_evt,
1047             reset_evt,
1048             #[cfg(feature = "gdb")]
1049             debug_evt,
1050             &self.seccomp_action,
1051             self.hypervisor.clone(),
1052             activate_evt,
1053             &vm_migration_config.memory_manager_data,
1054             existing_memory_files,
1055         )
1056         .map_err(|e| {
1057             MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e))
1058         })?;
1059 
1060         Response::ok().write_to(socket)?;
1061 
1062         Ok(vm)
1063     }
1064 
1065     fn vm_receive_state<T>(
1066         &mut self,
1067         req: &Request,
1068         socket: &mut T,
1069         mut vm: Vm,
1070     ) -> std::result::Result<(), MigratableError>
1071     where
1072         T: Read + Write,
1073     {
1074         // Read in state data
1075         let mut data: Vec<u8> = Vec::new();
1076         data.resize_with(req.length() as usize, Default::default);
1077         socket
1078             .read_exact(&mut data)
1079             .map_err(MigratableError::MigrateSocket)?;
1080         let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| {
1081             MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e))
1082         })?;
1083 
1084         // Create VM
1085         vm.restore(snapshot).map_err(|e| {
1086             Response::error().write_to(socket).ok();
1087             e
1088         })?;
1089         self.vm = Some(vm);
1090 
1091         Response::ok().write_to(socket)?;
1092 
1093         Ok(())
1094     }
1095 
1096     fn vm_receive_memory<T>(
1097         &mut self,
1098         req: &Request,
1099         socket: &mut T,
1100         vm: &mut Vm,
1101     ) -> std::result::Result<(), MigratableError>
1102     where
1103         T: Read + Write,
1104     {
1105         // Read table
1106         let table = MemoryRangeTable::read_from(socket, req.length())?;
1107 
1108         // And then read the memory itself
1109         vm.receive_memory_regions(&table, socket).map_err(|e| {
1110             Response::error().write_to(socket).ok();
1111             e
1112         })?;
1113         Response::ok().write_to(socket)?;
1114         Ok(())
1115     }
1116 
1117     fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> {
1118         url.strip_prefix("unix:")
1119             .ok_or_else(|| {
1120                 MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url))
1121             })
1122             .map(|s| s.into())
1123     }
1124 
1125     fn vm_receive_migration(
1126         &mut self,
1127         receive_data_migration: VmReceiveMigrationData,
1128     ) -> result::Result<(), MigratableError> {
1129         info!(
1130             "Receiving migration: receiver_url = {}",
1131             receive_data_migration.receiver_url
1132         );
1133 
1134         let path = Self::socket_url_to_path(&receive_data_migration.receiver_url)?;
1135         let listener = UnixListener::bind(&path).map_err(|e| {
1136             MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e))
1137         })?;
1138         let (mut socket, _addr) = listener.accept().map_err(|e| {
1139             MigratableError::MigrateReceive(anyhow!("Error accepting on UNIX socket: {}", e))
1140         })?;
1141         std::fs::remove_file(&path).map_err(|e| {
1142             MigratableError::MigrateReceive(anyhow!("Error unlinking UNIX socket: {}", e))
1143         })?;
1144 
1145         let mut started = false;
1146         let mut vm: Option<Vm> = None;
1147         let mut existing_memory_files = None;
1148         loop {
1149             let req = Request::read_from(&mut socket)?;
1150             match req.command() {
1151                 Command::Invalid => info!("Invalid Command Received"),
1152                 Command::Start => {
1153                     info!("Start Command Received");
1154                     started = true;
1155 
1156                     Response::ok().write_to(&mut socket)?;
1157                 }
1158                 Command::Config => {
1159                     info!("Config Command Received");
1160 
1161                     if !started {
1162                         warn!("Migration not started yet");
1163                         Response::error().write_to(&mut socket)?;
1164                         continue;
1165                     }
1166                     vm = Some(self.vm_receive_config(
1167                         &req,
1168                         &mut socket,
1169                         existing_memory_files.take(),
1170                     )?);
1171                 }
1172                 Command::State => {
1173                     info!("State Command Received");
1174 
1175                     if !started {
1176                         warn!("Migration not started yet");
1177                         Response::error().write_to(&mut socket)?;
1178                         continue;
1179                     }
1180                     if let Some(vm) = vm.take() {
1181                         self.vm_receive_state(&req, &mut socket, vm)?;
1182                     } else {
1183                         warn!("Configuration not sent yet");
1184                         Response::error().write_to(&mut socket)?;
1185                     }
1186                 }
1187                 Command::Memory => {
1188                     info!("Memory Command Received");
1189 
1190                     if !started {
1191                         warn!("Migration not started yet");
1192                         Response::error().write_to(&mut socket)?;
1193                         continue;
1194                     }
1195                     if let Some(ref mut vm) = vm.as_mut() {
1196                         self.vm_receive_memory(&req, &mut socket, vm)?;
1197                     } else {
1198                         warn!("Configuration not sent yet");
1199                         Response::error().write_to(&mut socket)?;
1200                     }
1201                 }
1202                 Command::MemoryFd => {
1203                     info!("MemoryFd Command Received");
1204 
1205                     if !started {
1206                         warn!("Migration not started yet");
1207                         Response::error().write_to(&mut socket)?;
1208                         continue;
1209                     }
1210 
1211                     let mut buf = [0u8; 4];
1212                     let (_, file) = socket.recv_with_fd(&mut buf).map_err(|e| {
1213                         MigratableError::MigrateReceive(anyhow!(
1214                             "Error receiving slot from socket: {}",
1215                             e
1216                         ))
1217                     })?;
1218 
1219                     if existing_memory_files.is_none() {
1220                         existing_memory_files = Some(HashMap::default())
1221                     }
1222 
1223                     if let Some(ref mut existing_memory_files) = existing_memory_files {
1224                         let slot = u32::from_le_bytes(buf);
1225                         existing_memory_files.insert(slot, file.unwrap());
1226                     }
1227 
1228                     Response::ok().write_to(&mut socket)?;
1229                 }
1230                 Command::Complete => {
1231                     info!("Complete Command Received");
1232                     if let Some(ref mut vm) = self.vm.as_mut() {
1233                         vm.resume()?;
1234                         Response::ok().write_to(&mut socket)?;
1235                     } else {
1236                         warn!("VM not created yet");
1237                         Response::error().write_to(&mut socket)?;
1238                     }
1239                     break;
1240                 }
1241                 Command::Abandon => {
1242                     info!("Abandon Command Received");
1243                     self.vm = None;
1244                     self.vm_config = None;
1245                     Response::ok().write_to(&mut socket).ok();
1246                     break;
1247                 }
1248             }
1249         }
1250 
1251         Ok(())
1252     }
1253 
1254     // Returns true if there were dirty pages to send
1255     fn vm_maybe_send_dirty_pages<T>(
1256         vm: &mut Vm,
1257         socket: &mut T,
1258     ) -> result::Result<bool, MigratableError>
1259     where
1260         T: Read + Write,
1261     {
1262         // Send (dirty) memory table
1263         let table = vm.dirty_log()?;
1264 
1265         // But if there are no regions go straight to pause
1266         if table.regions().is_empty() {
1267             return Ok(false);
1268         }
1269 
1270         Request::memory(table.length()).write_to(socket).unwrap();
1271         table.write_to(socket)?;
1272         // And then the memory itself
1273         vm.send_memory_regions(&table, socket)?;
1274         let res = Response::read_from(socket)?;
1275         if res.status() != Status::Ok {
1276             warn!("Error during dirty memory migration");
1277             Request::abandon().write_to(socket)?;
1278             Response::read_from(socket).ok();
1279             return Err(MigratableError::MigrateSend(anyhow!(
1280                 "Error during dirty memory migration"
1281             )));
1282         }
1283 
1284         Ok(true)
1285     }
1286 
1287     fn send_migration(
1288         vm: &mut Vm,
1289         #[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc<
1290             dyn hypervisor::Hypervisor,
1291         >,
1292         send_data_migration: VmSendMigrationData,
1293     ) -> result::Result<(), MigratableError> {
1294         let path = Self::socket_url_to_path(&send_data_migration.destination_url)?;
1295         let mut socket = UnixStream::connect(&path).map_err(|e| {
1296             MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e))
1297         })?;
1298 
1299         // Start the migration
1300         Request::start().write_to(&mut socket)?;
1301         let res = Response::read_from(&mut socket)?;
1302         if res.status() != Status::Ok {
1303             warn!("Error starting migration");
1304             Request::abandon().write_to(&mut socket)?;
1305             Response::read_from(&mut socket).ok();
1306             return Err(MigratableError::MigrateSend(anyhow!(
1307                 "Error starting migration"
1308             )));
1309         }
1310 
1311         // Send config
1312         let vm_config = vm.get_config();
1313         #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
1314         let common_cpuid = {
1315             #[cfg(feature = "tdx")]
1316             let tdx_enabled = vm_config.lock().unwrap().tdx.is_some();
1317             let phys_bits = vm::physical_bits(vm_config.lock().unwrap().cpus.max_phys_bits);
1318             arch::generate_common_cpuid(
1319                 hypervisor,
1320                 None,
1321                 None,
1322                 phys_bits,
1323                 vm_config.lock().unwrap().cpus.kvm_hyperv,
1324                 #[cfg(feature = "tdx")]
1325                 tdx_enabled,
1326             )
1327             .map_err(|e| {
1328                 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid': {:?}", e))
1329             })?
1330         };
1331 
1332         if send_data_migration.local {
1333             vm.send_memory_fds(&mut socket)?;
1334         }
1335 
1336         let vm_migration_config = VmMigrationConfig {
1337             vm_config,
1338             #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
1339             common_cpuid,
1340             memory_manager_data: vm.memory_manager_data(),
1341         };
1342         let config_data = serde_json::to_vec(&vm_migration_config).unwrap();
1343         Request::config(config_data.len() as u64).write_to(&mut socket)?;
1344         socket
1345             .write_all(&config_data)
1346             .map_err(MigratableError::MigrateSocket)?;
1347         let res = Response::read_from(&mut socket)?;
1348         if res.status() != Status::Ok {
1349             warn!("Error during config migration");
1350             Request::abandon().write_to(&mut socket)?;
1351             Response::read_from(&mut socket).ok();
1352             return Err(MigratableError::MigrateSend(anyhow!(
1353                 "Error during config migration"
1354             )));
1355         }
1356 
1357         // Let every Migratable object know about the migration being started.
1358         vm.start_migration()?;
1359 
1360         if send_data_migration.local {
1361             // Now pause VM
1362             vm.pause()?;
1363         } else {
1364             // Start logging dirty pages
1365             vm.start_dirty_log()?;
1366 
1367             // Send memory table
1368             let table = vm.memory_range_table()?;
1369             Request::memory(table.length())
1370                 .write_to(&mut socket)
1371                 .unwrap();
1372             table.write_to(&mut socket)?;
1373             // And then the memory itself
1374             vm.send_memory_regions(&table, &mut socket)?;
1375             let res = Response::read_from(&mut socket)?;
1376             if res.status() != Status::Ok {
1377                 warn!("Error during memory migration");
1378                 Request::abandon().write_to(&mut socket)?;
1379                 Response::read_from(&mut socket).ok();
1380                 return Err(MigratableError::MigrateSend(anyhow!(
1381                     "Error during memory migration"
1382                 )));
1383             }
1384 
1385             // Try at most 5 passes of dirty memory sending
1386             const MAX_DIRTY_MIGRATIONS: usize = 5;
1387             for i in 0..MAX_DIRTY_MIGRATIONS {
1388                 info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS);
1389                 if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? {
1390                     break;
1391                 }
1392             }
1393 
1394             // Now pause VM
1395             vm.pause()?;
1396 
1397             // Send last batch of dirty pages
1398             Self::vm_maybe_send_dirty_pages(vm, &mut socket)?;
1399 
1400             // Stop logging dirty pages
1401             vm.stop_dirty_log()?;
1402         }
1403         // Capture snapshot and send it
1404         let vm_snapshot = vm.snapshot()?;
1405         let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap();
1406         Request::state(snapshot_data.len() as u64).write_to(&mut socket)?;
1407         socket
1408             .write_all(&snapshot_data)
1409             .map_err(MigratableError::MigrateSocket)?;
1410         let res = Response::read_from(&mut socket)?;
1411         if res.status() != Status::Ok {
1412             warn!("Error during state migration");
1413             Request::abandon().write_to(&mut socket)?;
1414             Response::read_from(&mut socket).ok();
1415             return Err(MigratableError::MigrateSend(anyhow!(
1416                 "Error during state migration"
1417             )));
1418         }
1419 
1420         // Complete the migration
1421         Request::complete().write_to(&mut socket)?;
1422         let res = Response::read_from(&mut socket)?;
1423         if res.status() != Status::Ok {
1424             warn!("Error completing migration");
1425             Request::abandon().write_to(&mut socket)?;
1426             Response::read_from(&mut socket).ok();
1427             return Err(MigratableError::MigrateSend(anyhow!(
1428                 "Error completing migration"
1429             )));
1430         }
1431         info!("Migration complete");
1432 
1433         // Let every Migratable object know about the migration being complete
1434         vm.complete_migration()
1435     }
1436 
1437     fn vm_send_migration(
1438         &mut self,
1439         send_data_migration: VmSendMigrationData,
1440     ) -> result::Result<(), MigratableError> {
1441         info!(
1442             "Sending migration: destination_url = {}, local = {}",
1443             send_data_migration.destination_url, send_data_migration.local
1444         );
1445 
1446         if !self
1447             .vm_config
1448             .as_ref()
1449             .unwrap()
1450             .lock()
1451             .unwrap()
1452             .memory
1453             .shared
1454             && send_data_migration.local
1455         {
1456             return Err(MigratableError::MigrateSend(anyhow!(
1457                 "Local migration requires shared memory enabled"
1458             )));
1459         }
1460 
1461         if let Some(vm) = self.vm.as_mut() {
1462             Self::send_migration(
1463                 vm,
1464                 #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
1465                 self.hypervisor.clone(),
1466                 send_data_migration,
1467             )
1468             .map_err(|migration_err| {
1469                 error!("Migration failed: {:?}", migration_err);
1470 
1471                 // Stop logging dirty pages
1472                 if let Err(e) = vm.stop_dirty_log() {
1473                     return e;
1474                 }
1475 
1476                 if vm.get_state().unwrap() == VmState::Paused {
1477                     if let Err(e) = vm.resume() {
1478                         return e;
1479                     }
1480                 }
1481 
1482                 migration_err
1483             })?;
1484 
1485             // Shutdown the VM after the migration succeeded
1486             self.exit_evt.write(1).map_err(|e| {
1487                 MigratableError::MigrateSend(anyhow!(
1488                     "Failed shutting down the VM after migration: {:?}",
1489                     e
1490                 ))
1491             })
1492         } else {
1493             Err(MigratableError::MigrateSend(anyhow!("VM is not running")))
1494         }
1495     }
1496 
1497     #[cfg(all(feature = "kvm", target_arch = "x86_64"))]
1498     fn vm_check_cpuid_compatibility(
1499         &self,
1500         src_vm_config: &Arc<Mutex<VmConfig>>,
1501         src_vm_cpuid: &hypervisor::CpuId,
1502     ) -> result::Result<(), MigratableError> {
1503         // We check the `CPUID` compatibility of between the source vm and destination, which is
1504         // mostly about feature compatibility and "topology/sgx" leaves are not relevant.
1505         let dest_cpuid = &{
1506             let vm_config = &src_vm_config.lock().unwrap();
1507 
1508             #[cfg(feature = "tdx")]
1509             let tdx_enabled = vm_config.tdx.is_some();
1510             let phys_bits = vm::physical_bits(vm_config.cpus.max_phys_bits);
1511             arch::generate_common_cpuid(
1512                 self.hypervisor.clone(),
1513                 None,
1514                 None,
1515                 phys_bits,
1516                 vm_config.cpus.kvm_hyperv,
1517                 #[cfg(feature = "tdx")]
1518                 tdx_enabled,
1519             )
1520             .map_err(|e| {
1521                 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e))
1522             })?
1523         };
1524         arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| {
1525             MigratableError::MigrateReceive(anyhow!(
1526                 "Error checking cpu feature compatibility': {:?}",
1527                 e
1528             ))
1529         })
1530     }
1531 
1532     fn control_loop(
1533         &mut self,
1534         api_receiver: Arc<Receiver<ApiRequest>>,
1535         #[cfg(feature = "gdb")] gdb_receiver: Arc<Receiver<gdb::GdbRequest>>,
1536     ) -> Result<()> {
1537         const EPOLL_EVENTS_LEN: usize = 100;
1538 
1539         let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN];
1540         let epoll_fd = self.epoll.as_raw_fd();
1541 
1542         'outer: loop {
1543             let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) {
1544                 Ok(res) => res,
1545                 Err(e) => {
1546                     if e.kind() == io::ErrorKind::Interrupted {
1547                         // It's well defined from the epoll_wait() syscall
1548                         // documentation that the epoll loop can be interrupted
1549                         // before any of the requested events occurred or the
1550                         // timeout expired. In both those cases, epoll_wait()
1551                         // returns an error of type EINTR, but this should not
1552                         // be considered as a regular error. Instead it is more
1553                         // appropriate to retry, by calling into epoll_wait().
1554                         continue;
1555                     }
1556                     return Err(Error::Epoll(e));
1557                 }
1558             };
1559 
1560             for event in events.iter().take(num_events) {
1561                 let dispatch_event: EpollDispatch = event.data.into();
1562                 match dispatch_event {
1563                     EpollDispatch::Unknown => {
1564                         let event = event.data;
1565                         warn!("Unknown VMM loop event: {}", event);
1566                     }
1567                     EpollDispatch::Exit => {
1568                         info!("VM exit event");
1569                         // Consume the event.
1570                         self.exit_evt.read().map_err(Error::EventFdRead)?;
1571                         self.vmm_shutdown().map_err(Error::VmmShutdown)?;
1572 
1573                         break 'outer;
1574                     }
1575                     EpollDispatch::Reset => {
1576                         info!("VM reset event");
1577                         // Consume the event.
1578                         self.reset_evt.read().map_err(Error::EventFdRead)?;
1579                         self.vm_reboot().map_err(Error::VmReboot)?;
1580                     }
1581                     EpollDispatch::ActivateVirtioDevices => {
1582                         if let Some(ref vm) = self.vm {
1583                             let count = self.activate_evt.read().map_err(Error::EventFdRead)?;
1584                             info!(
1585                                 "Trying to activate pending virtio devices: count = {}",
1586                                 count
1587                             );
1588                             vm.activate_virtio_devices()
1589                                 .map_err(Error::ActivateVirtioDevices)?;
1590                         }
1591                     }
1592                     EpollDispatch::Api => {
1593                         // Consume the event.
1594                         self.api_evt.read().map_err(Error::EventFdRead)?;
1595 
1596                         // Read from the API receiver channel
1597                         let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?;
1598 
1599                         info!("API request event: {:?}", api_request);
1600                         match api_request {
1601                             ApiRequest::VmCreate(config, sender) => {
1602                                 let response = self
1603                                     .vm_create(config)
1604                                     .map_err(ApiError::VmCreate)
1605                                     .map(|_| ApiResponsePayload::Empty);
1606 
1607                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1608                             }
1609                             ApiRequest::VmDelete(sender) => {
1610                                 let response = self
1611                                     .vm_delete()
1612                                     .map_err(ApiError::VmDelete)
1613                                     .map(|_| ApiResponsePayload::Empty);
1614 
1615                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1616                             }
1617                             ApiRequest::VmBoot(sender) => {
1618                                 let response = self
1619                                     .vm_boot()
1620                                     .map_err(ApiError::VmBoot)
1621                                     .map(|_| ApiResponsePayload::Empty);
1622 
1623                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1624                             }
1625                             ApiRequest::VmShutdown(sender) => {
1626                                 let response = self
1627                                     .vm_shutdown()
1628                                     .map_err(ApiError::VmShutdown)
1629                                     .map(|_| ApiResponsePayload::Empty);
1630 
1631                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1632                             }
1633                             ApiRequest::VmReboot(sender) => {
1634                                 let response = self
1635                                     .vm_reboot()
1636                                     .map_err(ApiError::VmReboot)
1637                                     .map(|_| ApiResponsePayload::Empty);
1638 
1639                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1640                             }
1641                             ApiRequest::VmInfo(sender) => {
1642                                 let response = self
1643                                     .vm_info()
1644                                     .map_err(ApiError::VmInfo)
1645                                     .map(ApiResponsePayload::VmInfo);
1646 
1647                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1648                             }
1649                             ApiRequest::VmmPing(sender) => {
1650                                 let response = ApiResponsePayload::VmmPing(self.vmm_ping());
1651 
1652                                 sender.send(Ok(response)).map_err(Error::ApiResponseSend)?;
1653                             }
1654                             ApiRequest::VmPause(sender) => {
1655                                 let response = self
1656                                     .vm_pause()
1657                                     .map_err(ApiError::VmPause)
1658                                     .map(|_| ApiResponsePayload::Empty);
1659 
1660                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1661                             }
1662                             ApiRequest::VmResume(sender) => {
1663                                 let response = self
1664                                     .vm_resume()
1665                                     .map_err(ApiError::VmResume)
1666                                     .map(|_| ApiResponsePayload::Empty);
1667 
1668                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1669                             }
1670                             ApiRequest::VmSnapshot(snapshot_data, sender) => {
1671                                 let response = self
1672                                     .vm_snapshot(&snapshot_data.destination_url)
1673                                     .map_err(ApiError::VmSnapshot)
1674                                     .map(|_| ApiResponsePayload::Empty);
1675 
1676                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1677                             }
1678                             ApiRequest::VmRestore(restore_data, sender) => {
1679                                 let response = self
1680                                     .vm_restore(restore_data.as_ref().clone())
1681                                     .map_err(ApiError::VmRestore)
1682                                     .map(|_| ApiResponsePayload::Empty);
1683 
1684                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1685                             }
1686                             ApiRequest::VmmShutdown(sender) => {
1687                                 let response = self
1688                                     .vmm_shutdown()
1689                                     .map_err(ApiError::VmmShutdown)
1690                                     .map(|_| ApiResponsePayload::Empty);
1691 
1692                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1693 
1694                                 break 'outer;
1695                             }
1696                             ApiRequest::VmResize(resize_data, sender) => {
1697                                 let response = self
1698                                     .vm_resize(
1699                                         resize_data.desired_vcpus,
1700                                         resize_data.desired_ram,
1701                                         resize_data.desired_balloon,
1702                                     )
1703                                     .map_err(ApiError::VmResize)
1704                                     .map(|_| ApiResponsePayload::Empty);
1705                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1706                             }
1707                             ApiRequest::VmResizeZone(resize_zone_data, sender) => {
1708                                 let response = self
1709                                     .vm_resize_zone(
1710                                         resize_zone_data.id.clone(),
1711                                         resize_zone_data.desired_ram,
1712                                     )
1713                                     .map_err(ApiError::VmResizeZone)
1714                                     .map(|_| ApiResponsePayload::Empty);
1715                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1716                             }
1717                             ApiRequest::VmAddDevice(add_device_data, sender) => {
1718                                 let response = self
1719                                     .vm_add_device(add_device_data.as_ref().clone())
1720                                     .map_err(ApiError::VmAddDevice)
1721                                     .map(ApiResponsePayload::VmAction);
1722                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1723                             }
1724                             ApiRequest::VmAddUserDevice(add_device_data, sender) => {
1725                                 let response = self
1726                                     .vm_add_user_device(add_device_data.as_ref().clone())
1727                                     .map_err(ApiError::VmAddUserDevice)
1728                                     .map(ApiResponsePayload::VmAction);
1729                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1730                             }
1731                             ApiRequest::VmRemoveDevice(remove_device_data, sender) => {
1732                                 let response = self
1733                                     .vm_remove_device(remove_device_data.id.clone())
1734                                     .map_err(ApiError::VmRemoveDevice)
1735                                     .map(|_| ApiResponsePayload::Empty);
1736                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1737                             }
1738                             ApiRequest::VmAddDisk(add_disk_data, sender) => {
1739                                 let response = self
1740                                     .vm_add_disk(add_disk_data.as_ref().clone())
1741                                     .map_err(ApiError::VmAddDisk)
1742                                     .map(ApiResponsePayload::VmAction);
1743                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1744                             }
1745                             ApiRequest::VmAddFs(add_fs_data, sender) => {
1746                                 let response = self
1747                                     .vm_add_fs(add_fs_data.as_ref().clone())
1748                                     .map_err(ApiError::VmAddFs)
1749                                     .map(ApiResponsePayload::VmAction);
1750                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1751                             }
1752                             ApiRequest::VmAddPmem(add_pmem_data, sender) => {
1753                                 let response = self
1754                                     .vm_add_pmem(add_pmem_data.as_ref().clone())
1755                                     .map_err(ApiError::VmAddPmem)
1756                                     .map(ApiResponsePayload::VmAction);
1757                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1758                             }
1759                             ApiRequest::VmAddNet(add_net_data, sender) => {
1760                                 let response = self
1761                                     .vm_add_net(add_net_data.as_ref().clone())
1762                                     .map_err(ApiError::VmAddNet)
1763                                     .map(ApiResponsePayload::VmAction);
1764                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1765                             }
1766                             ApiRequest::VmAddVdpa(add_vdpa_data, sender) => {
1767                                 let response = self
1768                                     .vm_add_vdpa(add_vdpa_data.as_ref().clone())
1769                                     .map_err(ApiError::VmAddVdpa)
1770                                     .map(ApiResponsePayload::VmAction);
1771                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1772                             }
1773                             ApiRequest::VmAddVsock(add_vsock_data, sender) => {
1774                                 let response = self
1775                                     .vm_add_vsock(add_vsock_data.as_ref().clone())
1776                                     .map_err(ApiError::VmAddVsock)
1777                                     .map(ApiResponsePayload::VmAction);
1778                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1779                             }
1780                             ApiRequest::VmCounters(sender) => {
1781                                 let response = self
1782                                     .vm_counters()
1783                                     .map_err(ApiError::VmInfo)
1784                                     .map(ApiResponsePayload::VmAction);
1785                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1786                             }
1787                             ApiRequest::VmReceiveMigration(receive_migration_data, sender) => {
1788                                 let response = self
1789                                     .vm_receive_migration(receive_migration_data.as_ref().clone())
1790                                     .map_err(ApiError::VmReceiveMigration)
1791                                     .map(|_| ApiResponsePayload::Empty);
1792                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1793                             }
1794                             ApiRequest::VmSendMigration(send_migration_data, sender) => {
1795                                 let response = self
1796                                     .vm_send_migration(send_migration_data.as_ref().clone())
1797                                     .map_err(ApiError::VmSendMigration)
1798                                     .map(|_| ApiResponsePayload::Empty);
1799                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1800                             }
1801                             ApiRequest::VmPowerButton(sender) => {
1802                                 let response = self
1803                                     .vm_power_button()
1804                                     .map_err(ApiError::VmPowerButton)
1805                                     .map(|_| ApiResponsePayload::Empty);
1806 
1807                                 sender.send(response).map_err(Error::ApiResponseSend)?;
1808                             }
1809                         }
1810                     }
1811                     #[cfg(feature = "gdb")]
1812                     EpollDispatch::Debug => {
1813                         // Consume the event.
1814                         self.debug_evt.read().map_err(Error::EventFdRead)?;
1815 
1816                         // Read from the API receiver channel
1817                         let gdb_request = gdb_receiver.recv().map_err(Error::GdbRequestRecv)?;
1818 
1819                         let response = if let Some(ref mut vm) = self.vm {
1820                             vm.debug_request(&gdb_request.payload, gdb_request.cpu_id)
1821                         } else {
1822                             Err(VmError::VmNotRunning)
1823                         }
1824                         .map_err(gdb::Error::Vm);
1825 
1826                         gdb_request
1827                             .sender
1828                             .send(response)
1829                             .map_err(Error::GdbResponseSend)?;
1830                     }
1831                     #[cfg(not(feature = "gdb"))]
1832                     EpollDispatch::Debug => {}
1833                 }
1834             }
1835         }
1836 
1837         Ok(())
1838     }
1839 }
1840 
1841 const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager";
1842 const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager";
1843 const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager";
1844 
1845 #[cfg(test)]
1846 mod unit_tests {
1847     use super::*;
1848     use config::{
1849         CmdlineConfig, ConsoleConfig, ConsoleOutputMode, CpusConfig, HotplugMethod, KernelConfig,
1850         MemoryConfig, RngConfig, VmConfig,
1851     };
1852 
1853     fn create_dummy_vmm() -> Vmm {
1854         Vmm::new(
1855             "dummy".to_string(),
1856             EventFd::new(EFD_NONBLOCK).unwrap(),
1857             #[cfg(feature = "gdb")]
1858             EventFd::new(EFD_NONBLOCK).unwrap(),
1859             #[cfg(feature = "gdb")]
1860             EventFd::new(EFD_NONBLOCK).unwrap(),
1861             SeccompAction::Allow,
1862             hypervisor::new().unwrap(),
1863             EventFd::new(EFD_NONBLOCK).unwrap(),
1864         )
1865         .unwrap()
1866     }
1867 
1868     fn create_dummy_vm_config() -> Arc<Mutex<VmConfig>> {
1869         Arc::new(Mutex::new(VmConfig {
1870             cpus: CpusConfig {
1871                 boot_vcpus: 1,
1872                 max_vcpus: 1,
1873                 topology: None,
1874                 kvm_hyperv: false,
1875                 max_phys_bits: 46,
1876                 affinity: None,
1877                 features: config::CpuFeatures::default(),
1878             },
1879             memory: MemoryConfig {
1880                 size: 536_870_912,
1881                 mergeable: false,
1882                 hotplug_method: HotplugMethod::Acpi,
1883                 hotplug_size: None,
1884                 hotplugged_size: None,
1885                 shared: true,
1886                 hugepages: false,
1887                 hugepage_size: None,
1888                 prefault: false,
1889                 zones: None,
1890             },
1891             kernel: Some(KernelConfig {
1892                 path: PathBuf::from("/path/to/kernel"),
1893             }),
1894             initramfs: None,
1895             cmdline: CmdlineConfig {
1896                 args: String::from(""),
1897             },
1898             disks: None,
1899             net: None,
1900             rng: RngConfig {
1901                 src: PathBuf::from("/dev/urandom"),
1902                 iommu: false,
1903             },
1904             balloon: None,
1905             fs: None,
1906             pmem: None,
1907             serial: ConsoleConfig {
1908                 file: None,
1909                 mode: ConsoleOutputMode::Null,
1910                 iommu: false,
1911             },
1912             console: ConsoleConfig {
1913                 file: None,
1914                 mode: ConsoleOutputMode::Tty,
1915                 iommu: false,
1916             },
1917             devices: None,
1918             user_devices: None,
1919             vdpa: None,
1920             vsock: None,
1921             iommu: false,
1922             #[cfg(target_arch = "x86_64")]
1923             sgx_epc: None,
1924             numa: None,
1925             watchdog: false,
1926             #[cfg(feature = "tdx")]
1927             tdx: None,
1928             #[cfg(feature = "gdb")]
1929             gdb: false,
1930             platform: None,
1931         }))
1932     }
1933 
1934     #[test]
1935     fn test_vmm_vm_create() {
1936         let mut vmm = create_dummy_vmm();
1937         let config = create_dummy_vm_config();
1938 
1939         assert!(matches!(vmm.vm_create(config.clone()), Ok(())));
1940         assert!(matches!(
1941             vmm.vm_create(config),
1942             Err(VmError::VmAlreadyCreated)
1943         ));
1944     }
1945 
1946     #[test]
1947     fn test_vmm_vm_cold_add_device() {
1948         let mut vmm = create_dummy_vmm();
1949         let device_config = DeviceConfig::parse("path=/path/to/device").unwrap();
1950 
1951         assert!(matches!(
1952             vmm.vm_add_device(device_config.clone()),
1953             Err(VmError::VmNotCreated)
1954         ));
1955 
1956         let _ = vmm.vm_create(create_dummy_vm_config());
1957         assert!(vmm
1958             .vm_config
1959             .as_ref()
1960             .unwrap()
1961             .lock()
1962             .unwrap()
1963             .devices
1964             .is_none());
1965 
1966         let result = vmm.vm_add_device(device_config.clone());
1967         assert!(result.is_ok());
1968         assert!(result.unwrap().is_none());
1969         assert_eq!(
1970             vmm.vm_config
1971                 .as_ref()
1972                 .unwrap()
1973                 .lock()
1974                 .unwrap()
1975                 .devices
1976                 .clone()
1977                 .unwrap()
1978                 .len(),
1979             1
1980         );
1981         assert_eq!(
1982             vmm.vm_config
1983                 .as_ref()
1984                 .unwrap()
1985                 .lock()
1986                 .unwrap()
1987                 .devices
1988                 .clone()
1989                 .unwrap()[0],
1990             device_config
1991         );
1992     }
1993 
1994     #[test]
1995     fn test_vmm_vm_cold_add_user_device() {
1996         let mut vmm = create_dummy_vmm();
1997         let user_device_config =
1998             UserDeviceConfig::parse("socket=/path/to/socket,id=8,pci_segment=2").unwrap();
1999 
2000         assert!(matches!(
2001             vmm.vm_add_user_device(user_device_config.clone()),
2002             Err(VmError::VmNotCreated)
2003         ));
2004 
2005         let _ = vmm.vm_create(create_dummy_vm_config());
2006         assert!(vmm
2007             .vm_config
2008             .as_ref()
2009             .unwrap()
2010             .lock()
2011             .unwrap()
2012             .user_devices
2013             .is_none());
2014 
2015         let result = vmm.vm_add_user_device(user_device_config.clone());
2016         assert!(result.is_ok());
2017         assert!(result.unwrap().is_none());
2018         assert_eq!(
2019             vmm.vm_config
2020                 .as_ref()
2021                 .unwrap()
2022                 .lock()
2023                 .unwrap()
2024                 .user_devices
2025                 .clone()
2026                 .unwrap()
2027                 .len(),
2028             1
2029         );
2030         assert_eq!(
2031             vmm.vm_config
2032                 .as_ref()
2033                 .unwrap()
2034                 .lock()
2035                 .unwrap()
2036                 .user_devices
2037                 .clone()
2038                 .unwrap()[0],
2039             user_device_config
2040         );
2041     }
2042 
2043     #[test]
2044     fn test_vmm_vm_cold_add_disk() {
2045         let mut vmm = create_dummy_vmm();
2046         let disk_config = DiskConfig::parse("path=/path/to_file").unwrap();
2047 
2048         assert!(matches!(
2049             vmm.vm_add_disk(disk_config.clone()),
2050             Err(VmError::VmNotCreated)
2051         ));
2052 
2053         let _ = vmm.vm_create(create_dummy_vm_config());
2054         assert!(vmm
2055             .vm_config
2056             .as_ref()
2057             .unwrap()
2058             .lock()
2059             .unwrap()
2060             .disks
2061             .is_none());
2062 
2063         let result = vmm.vm_add_disk(disk_config.clone());
2064         assert!(result.is_ok());
2065         assert!(result.unwrap().is_none());
2066         assert_eq!(
2067             vmm.vm_config
2068                 .as_ref()
2069                 .unwrap()
2070                 .lock()
2071                 .unwrap()
2072                 .disks
2073                 .clone()
2074                 .unwrap()
2075                 .len(),
2076             1
2077         );
2078         assert_eq!(
2079             vmm.vm_config
2080                 .as_ref()
2081                 .unwrap()
2082                 .lock()
2083                 .unwrap()
2084                 .disks
2085                 .clone()
2086                 .unwrap()[0],
2087             disk_config
2088         );
2089     }
2090 
2091     #[test]
2092     fn test_vmm_vm_cold_add_fs() {
2093         let mut vmm = create_dummy_vmm();
2094         let fs_config = FsConfig::parse("tag=mytag,socket=/tmp/sock").unwrap();
2095 
2096         assert!(matches!(
2097             vmm.vm_add_fs(fs_config.clone()),
2098             Err(VmError::VmNotCreated)
2099         ));
2100 
2101         let _ = vmm.vm_create(create_dummy_vm_config());
2102         assert!(vmm.vm_config.as_ref().unwrap().lock().unwrap().fs.is_none());
2103 
2104         let result = vmm.vm_add_fs(fs_config.clone());
2105         assert!(result.is_ok());
2106         assert!(result.unwrap().is_none());
2107         assert_eq!(
2108             vmm.vm_config
2109                 .as_ref()
2110                 .unwrap()
2111                 .lock()
2112                 .unwrap()
2113                 .fs
2114                 .clone()
2115                 .unwrap()
2116                 .len(),
2117             1
2118         );
2119         assert_eq!(
2120             vmm.vm_config
2121                 .as_ref()
2122                 .unwrap()
2123                 .lock()
2124                 .unwrap()
2125                 .fs
2126                 .clone()
2127                 .unwrap()[0],
2128             fs_config
2129         );
2130     }
2131 
2132     #[test]
2133     fn test_vmm_vm_cold_add_pmem() {
2134         let mut vmm = create_dummy_vmm();
2135         let pmem_config = PmemConfig::parse("file=/tmp/pmem,size=128M").unwrap();
2136 
2137         assert!(matches!(
2138             vmm.vm_add_pmem(pmem_config.clone()),
2139             Err(VmError::VmNotCreated)
2140         ));
2141 
2142         let _ = vmm.vm_create(create_dummy_vm_config());
2143         assert!(vmm
2144             .vm_config
2145             .as_ref()
2146             .unwrap()
2147             .lock()
2148             .unwrap()
2149             .pmem
2150             .is_none());
2151 
2152         let result = vmm.vm_add_pmem(pmem_config.clone());
2153         assert!(result.is_ok());
2154         assert!(result.unwrap().is_none());
2155         assert_eq!(
2156             vmm.vm_config
2157                 .as_ref()
2158                 .unwrap()
2159                 .lock()
2160                 .unwrap()
2161                 .pmem
2162                 .clone()
2163                 .unwrap()
2164                 .len(),
2165             1
2166         );
2167         assert_eq!(
2168             vmm.vm_config
2169                 .as_ref()
2170                 .unwrap()
2171                 .lock()
2172                 .unwrap()
2173                 .pmem
2174                 .clone()
2175                 .unwrap()[0],
2176             pmem_config
2177         );
2178     }
2179 
2180     #[test]
2181     fn test_vmm_vm_cold_add_net() {
2182         let mut vmm = create_dummy_vmm();
2183         let net_config = NetConfig::parse(
2184             "mac=de:ad:be:ef:12:34,host_mac=12:34:de:ad:be:ef,vhost_user=true,socket=/tmp/sock",
2185         )
2186         .unwrap();
2187 
2188         assert!(matches!(
2189             vmm.vm_add_net(net_config.clone()),
2190             Err(VmError::VmNotCreated)
2191         ));
2192 
2193         let _ = vmm.vm_create(create_dummy_vm_config());
2194         assert!(vmm
2195             .vm_config
2196             .as_ref()
2197             .unwrap()
2198             .lock()
2199             .unwrap()
2200             .net
2201             .is_none());
2202 
2203         let result = vmm.vm_add_net(net_config.clone());
2204         assert!(result.is_ok());
2205         assert!(result.unwrap().is_none());
2206         assert_eq!(
2207             vmm.vm_config
2208                 .as_ref()
2209                 .unwrap()
2210                 .lock()
2211                 .unwrap()
2212                 .net
2213                 .clone()
2214                 .unwrap()
2215                 .len(),
2216             1
2217         );
2218         assert_eq!(
2219             vmm.vm_config
2220                 .as_ref()
2221                 .unwrap()
2222                 .lock()
2223                 .unwrap()
2224                 .net
2225                 .clone()
2226                 .unwrap()[0],
2227             net_config
2228         );
2229     }
2230 
2231     #[test]
2232     fn test_vmm_vm_cold_add_vdpa() {
2233         let mut vmm = create_dummy_vmm();
2234         let vdpa_config = VdpaConfig::parse("path=/dev/vhost-vdpa,num_queues=2").unwrap();
2235 
2236         assert!(matches!(
2237             vmm.vm_add_vdpa(vdpa_config.clone()),
2238             Err(VmError::VmNotCreated)
2239         ));
2240 
2241         let _ = vmm.vm_create(create_dummy_vm_config());
2242         assert!(vmm
2243             .vm_config
2244             .as_ref()
2245             .unwrap()
2246             .lock()
2247             .unwrap()
2248             .vdpa
2249             .is_none());
2250 
2251         let result = vmm.vm_add_vdpa(vdpa_config.clone());
2252         assert!(result.is_ok());
2253         assert!(result.unwrap().is_none());
2254         assert_eq!(
2255             vmm.vm_config
2256                 .as_ref()
2257                 .unwrap()
2258                 .lock()
2259                 .unwrap()
2260                 .vdpa
2261                 .clone()
2262                 .unwrap()
2263                 .len(),
2264             1
2265         );
2266         assert_eq!(
2267             vmm.vm_config
2268                 .as_ref()
2269                 .unwrap()
2270                 .lock()
2271                 .unwrap()
2272                 .vdpa
2273                 .clone()
2274                 .unwrap()[0],
2275             vdpa_config
2276         );
2277     }
2278 
2279     #[test]
2280     fn test_vmm_vm_cold_add_vsock() {
2281         let mut vmm = create_dummy_vmm();
2282         let vsock_config = VsockConfig::parse("socket=/tmp/sock,cid=1,iommu=on").unwrap();
2283 
2284         assert!(matches!(
2285             vmm.vm_add_vsock(vsock_config.clone()),
2286             Err(VmError::VmNotCreated)
2287         ));
2288 
2289         let _ = vmm.vm_create(create_dummy_vm_config());
2290         assert!(vmm
2291             .vm_config
2292             .as_ref()
2293             .unwrap()
2294             .lock()
2295             .unwrap()
2296             .vsock
2297             .is_none());
2298 
2299         let result = vmm.vm_add_vsock(vsock_config.clone());
2300         assert!(result.is_ok());
2301         assert!(result.unwrap().is_none());
2302         assert_eq!(
2303             vmm.vm_config
2304                 .as_ref()
2305                 .unwrap()
2306                 .lock()
2307                 .unwrap()
2308                 .vsock
2309                 .clone()
2310                 .unwrap(),
2311             vsock_config
2312         );
2313     }
2314 }
2315