1 // Copyright © 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 6 #[macro_use] 7 extern crate event_monitor; 8 #[macro_use] 9 extern crate log; 10 11 use std::collections::HashMap; 12 use std::fs::File; 13 use std::io::{stdout, Read, Write}; 14 use std::net::{TcpListener, TcpStream}; 15 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; 16 use std::os::unix::net::{UnixListener, UnixStream}; 17 use std::panic::AssertUnwindSafe; 18 use std::path::PathBuf; 19 use std::rc::Rc; 20 use std::sync::mpsc::{Receiver, RecvError, SendError, Sender}; 21 use std::sync::{Arc, Mutex}; 22 #[cfg(not(target_arch = "riscv64"))] 23 use std::time::Instant; 24 use std::{io, result, thread}; 25 26 use anyhow::anyhow; 27 #[cfg(feature = "dbus_api")] 28 use api::dbus::{DBusApiOptions, DBusApiShutdownChannels}; 29 use api::http::HttpApiHandle; 30 use console_devices::{pre_create_console_devices, ConsoleInfo}; 31 use landlock::LandlockError; 32 use libc::{tcsetattr, termios, EFD_NONBLOCK, SIGINT, SIGTERM, TCSANOW}; 33 use memory_manager::MemoryManagerSnapshotData; 34 use pci::PciBdf; 35 use seccompiler::{apply_filter, SeccompAction}; 36 use serde::ser::{SerializeStruct, Serializer}; 37 use serde::{Deserialize, Serialize}; 38 use signal_hook::iterator::{Handle, Signals}; 39 use thiserror::Error; 40 use tracer::trace_scoped; 41 use vm_memory::bitmap::{AtomicBitmap, BitmapSlice}; 42 use vm_memory::{ReadVolatile, VolatileMemoryError, VolatileSlice, WriteVolatile}; 43 use vm_migration::protocol::*; 44 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 45 use vmm_sys_util::eventfd::EventFd; 46 use vmm_sys_util::signal::unblock_signal; 47 use vmm_sys_util::sock_ctrl_msg::ScmSocket; 48 49 use crate::api::{ 50 ApiRequest, ApiResponse, RequestHandler, VmInfoResponse, VmReceiveMigrationData, 51 VmSendMigrationData, VmmPingResponse, 52 }; 53 use crate::config::{add_to_config, RestoreConfig}; 54 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 55 use crate::coredump::GuestDebuggable; 56 use crate::landlock::Landlock; 57 use crate::memory_manager::MemoryManager; 58 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 59 use crate::migration::get_vm_snapshot; 60 use crate::migration::{recv_vm_config, recv_vm_state}; 61 use crate::seccomp_filters::{get_seccomp_filter, Thread}; 62 use crate::vm::{Error as VmError, Vm, VmState}; 63 use crate::vm_config::{ 64 DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig, VdpaConfig, 65 VmConfig, VsockConfig, 66 }; 67 68 #[cfg(not(target_arch = "riscv64"))] 69 mod acpi; 70 pub mod api; 71 mod clone3; 72 pub mod config; 73 pub mod console_devices; 74 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 75 mod coredump; 76 pub mod cpu; 77 pub mod device_manager; 78 pub mod device_tree; 79 #[cfg(feature = "guest_debug")] 80 mod gdb; 81 #[cfg(feature = "igvm")] 82 mod igvm; 83 pub mod interrupt; 84 pub mod landlock; 85 pub mod memory_manager; 86 pub mod migration; 87 mod pci_segment; 88 pub mod seccomp_filters; 89 mod serial_manager; 90 mod sigwinch_listener; 91 pub mod vm; 92 pub mod vm_config; 93 94 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; 95 type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>; 96 97 /// Errors associated with VMM management 98 #[derive(Debug, Error)] 99 pub enum Error { 100 /// API request receive error 101 #[error("Error receiving API request: {0}")] 102 ApiRequestRecv(#[source] RecvError), 103 104 /// API response send error 105 #[error("Error sending API request: {0}")] 106 ApiResponseSend(#[source] SendError<ApiResponse>), 107 108 /// Cannot bind to the UNIX domain socket path 109 #[error("Error binding to UNIX domain socket: {0}")] 110 Bind(#[source] io::Error), 111 112 /// Cannot clone EventFd. 113 #[error("Error cloning EventFd: {0}")] 114 EventFdClone(#[source] io::Error), 115 116 /// Cannot create EventFd. 117 #[error("Error creating EventFd: {0}")] 118 EventFdCreate(#[source] io::Error), 119 120 /// Cannot read from EventFd. 121 #[error("Error reading from EventFd: {0}")] 122 EventFdRead(#[source] io::Error), 123 124 /// Cannot create epoll context. 125 #[error("Error creating epoll context: {0}")] 126 Epoll(#[source] io::Error), 127 128 /// Cannot create HTTP thread 129 #[error("Error spawning HTTP thread: {0}")] 130 HttpThreadSpawn(#[source] io::Error), 131 132 /// Cannot create D-Bus thread 133 #[cfg(feature = "dbus_api")] 134 #[error("Error spawning D-Bus thread: {0}")] 135 DBusThreadSpawn(#[source] io::Error), 136 137 /// Cannot start D-Bus session 138 #[cfg(feature = "dbus_api")] 139 #[error("Error starting D-Bus session: {0}")] 140 CreateDBusSession(#[source] zbus::Error), 141 142 /// Cannot create `event-monitor` thread 143 #[error("Error spawning `event-monitor` thread: {0}")] 144 EventMonitorThreadSpawn(#[source] io::Error), 145 146 /// Cannot handle the VM STDIN stream 147 #[error("Error handling VM stdin: {0:?}")] 148 Stdin(#[source] VmError), 149 150 /// Cannot handle the VM pty stream 151 #[error("Error handling VM pty: {0:?}")] 152 Pty(#[source] VmError), 153 154 /// Cannot reboot the VM 155 #[error("Error rebooting VM: {0:?}")] 156 VmReboot(#[source] VmError), 157 158 /// Cannot create VMM thread 159 #[error("Error spawning VMM thread {0:?}")] 160 VmmThreadSpawn(#[source] io::Error), 161 162 /// Cannot shut the VMM down 163 #[error("Error shutting down VMM: {0:?}")] 164 VmmShutdown(#[source] VmError), 165 166 /// Cannot create seccomp filter 167 #[error("Error creating seccomp filter: {0}")] 168 CreateSeccompFilter(#[source] seccompiler::Error), 169 170 /// Cannot apply seccomp filter 171 #[error("Error applying seccomp filter: {0}")] 172 ApplySeccompFilter(#[source] seccompiler::Error), 173 174 /// Error activating virtio devices 175 #[error("Error activating virtio devices: {0:?}")] 176 ActivateVirtioDevices(#[source] VmError), 177 178 /// Error creating API server 179 #[error("Error creating API server {0:?}")] 180 // TODO #[source] once the type implements Error 181 CreateApiServer(micro_http::ServerError), 182 183 /// Error binding API server socket 184 #[error("Error creation API server's socket {0:?}")] 185 CreateApiServerSocket(#[source] io::Error), 186 187 #[cfg(feature = "guest_debug")] 188 #[error("Failed to start the GDB thread: {0}")] 189 GdbThreadSpawn(#[source] io::Error), 190 191 /// GDB request receive error 192 #[cfg(feature = "guest_debug")] 193 #[error("Error receiving GDB request: {0}")] 194 GdbRequestRecv(#[source] RecvError), 195 196 /// GDB response send error 197 #[cfg(feature = "guest_debug")] 198 #[error("Error sending GDB request: {0}")] 199 GdbResponseSend(#[source] SendError<gdb::GdbResponse>), 200 201 #[error("Cannot spawn a signal handler thread: {0}")] 202 SignalHandlerSpawn(#[source] io::Error), 203 204 #[error("Failed to join on threads: {0:?}")] 205 ThreadCleanup(std::boxed::Box<dyn std::any::Any + std::marker::Send>), 206 207 /// Cannot create Landlock object 208 #[error("Error creating landlock object: {0}")] 209 CreateLandlock(#[source] LandlockError), 210 211 /// Cannot apply landlock based sandboxing 212 #[error("Error applying landlock: {0}")] 213 ApplyLandlock(#[source] LandlockError), 214 } 215 pub type Result<T> = result::Result<T, Error>; 216 217 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 218 #[repr(u64)] 219 pub enum EpollDispatch { 220 Exit = 0, 221 Reset = 1, 222 Api = 2, 223 ActivateVirtioDevices = 3, 224 Debug = 4, 225 Unknown, 226 } 227 228 impl From<u64> for EpollDispatch { 229 fn from(v: u64) -> Self { 230 use EpollDispatch::*; 231 match v { 232 0 => Exit, 233 1 => Reset, 234 2 => Api, 235 3 => ActivateVirtioDevices, 236 4 => Debug, 237 _ => Unknown, 238 } 239 } 240 } 241 242 enum SocketStream { 243 Unix(UnixStream), 244 Tcp(TcpStream), 245 } 246 247 impl Read for SocketStream { 248 fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { 249 match self { 250 SocketStream::Unix(stream) => stream.read(buf), 251 SocketStream::Tcp(stream) => stream.read(buf), 252 } 253 } 254 } 255 256 impl Write for SocketStream { 257 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { 258 match self { 259 SocketStream::Unix(stream) => stream.write(buf), 260 SocketStream::Tcp(stream) => stream.write(buf), 261 } 262 } 263 264 fn flush(&mut self) -> std::io::Result<()> { 265 match self { 266 SocketStream::Unix(stream) => stream.flush(), 267 SocketStream::Tcp(stream) => stream.flush(), 268 } 269 } 270 } 271 272 impl AsRawFd for SocketStream { 273 fn as_raw_fd(&self) -> RawFd { 274 match self { 275 SocketStream::Unix(s) => s.as_raw_fd(), 276 SocketStream::Tcp(s) => s.as_raw_fd(), 277 } 278 } 279 } 280 281 impl ReadVolatile for SocketStream { 282 fn read_volatile<B: BitmapSlice>( 283 &mut self, 284 buf: &mut VolatileSlice<B>, 285 ) -> std::result::Result<usize, VolatileMemoryError> { 286 match self { 287 SocketStream::Unix(s) => s.read_volatile(buf), 288 SocketStream::Tcp(s) => s.read_volatile(buf), 289 } 290 } 291 292 fn read_exact_volatile<B: BitmapSlice>( 293 &mut self, 294 buf: &mut VolatileSlice<B>, 295 ) -> std::result::Result<(), VolatileMemoryError> { 296 match self { 297 SocketStream::Unix(s) => s.read_exact_volatile(buf), 298 SocketStream::Tcp(s) => s.read_exact_volatile(buf), 299 } 300 } 301 } 302 303 impl WriteVolatile for SocketStream { 304 fn write_volatile<B: BitmapSlice>( 305 &mut self, 306 buf: &VolatileSlice<B>, 307 ) -> std::result::Result<usize, VolatileMemoryError> { 308 match self { 309 SocketStream::Unix(s) => s.write_volatile(buf), 310 SocketStream::Tcp(s) => s.write_volatile(buf), 311 } 312 } 313 314 fn write_all_volatile<B: BitmapSlice>( 315 &mut self, 316 buf: &VolatileSlice<B>, 317 ) -> std::result::Result<(), VolatileMemoryError> { 318 match self { 319 SocketStream::Unix(s) => s.write_all_volatile(buf), 320 SocketStream::Tcp(s) => s.write_all_volatile(buf), 321 } 322 } 323 } 324 325 pub struct EpollContext { 326 epoll_file: File, 327 } 328 329 impl EpollContext { 330 pub fn new() -> result::Result<EpollContext, io::Error> { 331 let epoll_fd = epoll::create(true)?; 332 // Use 'File' to enforce closing on 'epoll_fd' 333 // SAFETY: the epoll_fd returned by epoll::create is valid and owned by us. 334 let epoll_file = unsafe { File::from_raw_fd(epoll_fd) }; 335 336 Ok(EpollContext { epoll_file }) 337 } 338 339 pub fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error> 340 where 341 T: AsRawFd, 342 { 343 let dispatch_index = token as u64; 344 epoll::ctl( 345 self.epoll_file.as_raw_fd(), 346 epoll::ControlOptions::EPOLL_CTL_ADD, 347 fd.as_raw_fd(), 348 epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index), 349 )?; 350 351 Ok(()) 352 } 353 354 #[cfg(fuzzing)] 355 pub fn add_event_custom<T>( 356 &mut self, 357 fd: &T, 358 id: u64, 359 evts: epoll::Events, 360 ) -> result::Result<(), io::Error> 361 where 362 T: AsRawFd, 363 { 364 epoll::ctl( 365 self.epoll_file.as_raw_fd(), 366 epoll::ControlOptions::EPOLL_CTL_ADD, 367 fd.as_raw_fd(), 368 epoll::Event::new(evts, id), 369 )?; 370 371 Ok(()) 372 } 373 } 374 375 impl AsRawFd for EpollContext { 376 fn as_raw_fd(&self) -> RawFd { 377 self.epoll_file.as_raw_fd() 378 } 379 } 380 381 pub struct PciDeviceInfo { 382 pub id: String, 383 pub bdf: PciBdf, 384 } 385 386 impl Serialize for PciDeviceInfo { 387 fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> 388 where 389 S: Serializer, 390 { 391 let bdf_str = self.bdf.to_string(); 392 393 // Serialize the structure. 394 let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?; 395 state.serialize_field("id", &self.id)?; 396 state.serialize_field("bdf", &bdf_str)?; 397 state.end() 398 } 399 } 400 401 pub fn feature_list() -> Vec<String> { 402 vec![ 403 #[cfg(feature = "dbus_api")] 404 "dbus_api".to_string(), 405 #[cfg(feature = "dhat-heap")] 406 "dhat-heap".to_string(), 407 #[cfg(feature = "guest_debug")] 408 "guest_debug".to_string(), 409 #[cfg(feature = "igvm")] 410 "igvm".to_string(), 411 #[cfg(feature = "io_uring")] 412 "io_uring".to_string(), 413 #[cfg(feature = "kvm")] 414 "kvm".to_string(), 415 #[cfg(feature = "mshv")] 416 "mshv".to_string(), 417 #[cfg(feature = "sev_snp")] 418 "sev_snp".to_string(), 419 #[cfg(feature = "tdx")] 420 "tdx".to_string(), 421 #[cfg(feature = "tracing")] 422 "tracing".to_string(), 423 ] 424 } 425 426 pub fn start_event_monitor_thread( 427 mut monitor: event_monitor::Monitor, 428 seccomp_action: &SeccompAction, 429 landlock_enable: bool, 430 hypervisor_type: hypervisor::HypervisorType, 431 exit_event: EventFd, 432 ) -> Result<thread::JoinHandle<Result<()>>> { 433 // Retrieve seccomp filter 434 let seccomp_filter = get_seccomp_filter(seccomp_action, Thread::EventMonitor, hypervisor_type) 435 .map_err(Error::CreateSeccompFilter)?; 436 437 thread::Builder::new() 438 .name("event-monitor".to_owned()) 439 .spawn(move || { 440 // Apply seccomp filter 441 if !seccomp_filter.is_empty() { 442 apply_filter(&seccomp_filter) 443 .map_err(Error::ApplySeccompFilter) 444 .map_err(|e| { 445 error!("Error applying seccomp filter: {:?}", e); 446 exit_event.write(1).ok(); 447 e 448 })?; 449 } 450 if landlock_enable { 451 Landlock::new() 452 .map_err(Error::CreateLandlock)? 453 .restrict_self() 454 .map_err(Error::ApplyLandlock) 455 .map_err(|e| { 456 error!("Error applying landlock to event monitor thread: {:?}", e); 457 exit_event.write(1).ok(); 458 e 459 })?; 460 } 461 462 std::panic::catch_unwind(AssertUnwindSafe(move || { 463 while let Ok(event) = monitor.rx.recv() { 464 let event = Arc::new(event); 465 466 if let Some(ref mut file) = monitor.file { 467 file.write_all(event.as_bytes().as_ref()).ok(); 468 file.write_all(b"\n\n").ok(); 469 } 470 471 for tx in monitor.broadcast.iter() { 472 tx.send(event.clone()).ok(); 473 } 474 } 475 })) 476 .map_err(|_| { 477 error!("`event-monitor` thread panicked"); 478 exit_event.write(1).ok(); 479 }) 480 .ok(); 481 482 Ok(()) 483 }) 484 .map_err(Error::EventMonitorThreadSpawn) 485 } 486 487 #[allow(unused_variables)] 488 #[allow(clippy::too_many_arguments)] 489 pub fn start_vmm_thread( 490 vmm_version: VmmVersionInfo, 491 http_path: &Option<String>, 492 http_fd: Option<RawFd>, 493 #[cfg(feature = "dbus_api")] dbus_options: Option<DBusApiOptions>, 494 api_event: EventFd, 495 api_sender: Sender<ApiRequest>, 496 api_receiver: Receiver<ApiRequest>, 497 #[cfg(feature = "guest_debug")] debug_path: Option<PathBuf>, 498 #[cfg(feature = "guest_debug")] debug_event: EventFd, 499 #[cfg(feature = "guest_debug")] vm_debug_event: EventFd, 500 exit_event: EventFd, 501 seccomp_action: &SeccompAction, 502 hypervisor: Arc<dyn hypervisor::Hypervisor>, 503 landlock_enable: bool, 504 ) -> Result<VmmThreadHandle> { 505 #[cfg(feature = "guest_debug")] 506 let gdb_hw_breakpoints = hypervisor.get_guest_debug_hw_bps(); 507 #[cfg(feature = "guest_debug")] 508 let (gdb_sender, gdb_receiver) = std::sync::mpsc::channel(); 509 #[cfg(feature = "guest_debug")] 510 let gdb_debug_event = debug_event.try_clone().map_err(Error::EventFdClone)?; 511 #[cfg(feature = "guest_debug")] 512 let gdb_vm_debug_event = vm_debug_event.try_clone().map_err(Error::EventFdClone)?; 513 514 let api_event_clone = api_event.try_clone().map_err(Error::EventFdClone)?; 515 let hypervisor_type = hypervisor.hypervisor_type(); 516 517 // Retrieve seccomp filter 518 let vmm_seccomp_filter = get_seccomp_filter(seccomp_action, Thread::Vmm, hypervisor_type) 519 .map_err(Error::CreateSeccompFilter)?; 520 521 let vmm_seccomp_action = seccomp_action.clone(); 522 let thread = { 523 let exit_event = exit_event.try_clone().map_err(Error::EventFdClone)?; 524 thread::Builder::new() 525 .name("vmm".to_string()) 526 .spawn(move || { 527 // Apply seccomp filter for VMM thread. 528 if !vmm_seccomp_filter.is_empty() { 529 apply_filter(&vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?; 530 } 531 532 let mut vmm = Vmm::new( 533 vmm_version, 534 api_event, 535 #[cfg(feature = "guest_debug")] 536 debug_event, 537 #[cfg(feature = "guest_debug")] 538 vm_debug_event, 539 vmm_seccomp_action, 540 hypervisor, 541 exit_event, 542 )?; 543 544 vmm.setup_signal_handler(landlock_enable)?; 545 546 vmm.control_loop( 547 Rc::new(api_receiver), 548 #[cfg(feature = "guest_debug")] 549 Rc::new(gdb_receiver), 550 ) 551 }) 552 .map_err(Error::VmmThreadSpawn)? 553 }; 554 555 // The VMM thread is started, we can start the dbus thread 556 // and start serving HTTP requests 557 #[cfg(feature = "dbus_api")] 558 let dbus_shutdown_chs = match dbus_options { 559 Some(opts) => { 560 let (_, chs) = api::start_dbus_thread( 561 opts, 562 api_event_clone.try_clone().map_err(Error::EventFdClone)?, 563 api_sender.clone(), 564 seccomp_action, 565 exit_event.try_clone().map_err(Error::EventFdClone)?, 566 hypervisor_type, 567 )?; 568 Some(chs) 569 } 570 None => None, 571 }; 572 573 let http_api_handle = if let Some(http_path) = http_path { 574 Some(api::start_http_path_thread( 575 http_path, 576 api_event_clone, 577 api_sender, 578 seccomp_action, 579 exit_event, 580 hypervisor_type, 581 landlock_enable, 582 )?) 583 } else if let Some(http_fd) = http_fd { 584 Some(api::start_http_fd_thread( 585 http_fd, 586 api_event_clone, 587 api_sender, 588 seccomp_action, 589 exit_event, 590 hypervisor_type, 591 landlock_enable, 592 )?) 593 } else { 594 None 595 }; 596 597 #[cfg(feature = "guest_debug")] 598 if let Some(debug_path) = debug_path { 599 let target = gdb::GdbStub::new( 600 gdb_sender, 601 gdb_debug_event, 602 gdb_vm_debug_event, 603 gdb_hw_breakpoints, 604 ); 605 thread::Builder::new() 606 .name("gdb".to_owned()) 607 .spawn(move || gdb::gdb_thread(target, &debug_path)) 608 .map_err(Error::GdbThreadSpawn)?; 609 } 610 611 Ok(VmmThreadHandle { 612 thread_handle: thread, 613 #[cfg(feature = "dbus_api")] 614 dbus_shutdown_chs, 615 http_api_handle, 616 }) 617 } 618 619 #[derive(Clone, Deserialize, Serialize)] 620 struct VmMigrationConfig { 621 vm_config: Arc<Mutex<VmConfig>>, 622 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 623 common_cpuid: Vec<hypervisor::arch::x86::CpuIdEntry>, 624 memory_manager_data: MemoryManagerSnapshotData, 625 } 626 627 #[derive(Debug, Clone)] 628 pub struct VmmVersionInfo { 629 pub build_version: String, 630 pub version: String, 631 } 632 633 impl VmmVersionInfo { 634 pub fn new(build_version: &str, version: &str) -> Self { 635 Self { 636 build_version: build_version.to_owned(), 637 version: version.to_owned(), 638 } 639 } 640 } 641 642 pub struct VmmThreadHandle { 643 pub thread_handle: thread::JoinHandle<Result<()>>, 644 #[cfg(feature = "dbus_api")] 645 pub dbus_shutdown_chs: Option<DBusApiShutdownChannels>, 646 pub http_api_handle: Option<HttpApiHandle>, 647 } 648 649 pub struct Vmm { 650 epoll: EpollContext, 651 exit_evt: EventFd, 652 reset_evt: EventFd, 653 api_evt: EventFd, 654 #[cfg(feature = "guest_debug")] 655 debug_evt: EventFd, 656 #[cfg(feature = "guest_debug")] 657 vm_debug_evt: EventFd, 658 version: VmmVersionInfo, 659 vm: Option<Vm>, 660 vm_config: Option<Arc<Mutex<VmConfig>>>, 661 seccomp_action: SeccompAction, 662 hypervisor: Arc<dyn hypervisor::Hypervisor>, 663 activate_evt: EventFd, 664 signals: Option<Handle>, 665 threads: Vec<thread::JoinHandle<()>>, 666 original_termios_opt: Arc<Mutex<Option<termios>>>, 667 console_resize_pipe: Option<Arc<File>>, 668 console_info: Option<ConsoleInfo>, 669 } 670 671 impl Vmm { 672 pub const HANDLED_SIGNALS: [i32; 2] = [SIGTERM, SIGINT]; 673 674 fn signal_handler( 675 mut signals: Signals, 676 original_termios_opt: Arc<Mutex<Option<termios>>>, 677 exit_evt: &EventFd, 678 ) { 679 for sig in &Self::HANDLED_SIGNALS { 680 unblock_signal(*sig).unwrap(); 681 } 682 683 for signal in signals.forever() { 684 match signal { 685 SIGTERM | SIGINT => { 686 if exit_evt.write(1).is_err() { 687 // Resetting the terminal is usually done as the VMM exits 688 if let Ok(lock) = original_termios_opt.lock() { 689 if let Some(termios) = *lock { 690 // SAFETY: FFI call 691 let _ = unsafe { 692 tcsetattr(stdout().lock().as_raw_fd(), TCSANOW, &termios) 693 }; 694 } 695 } else { 696 warn!("Failed to lock original termios"); 697 } 698 699 std::process::exit(1); 700 } 701 } 702 _ => (), 703 } 704 } 705 } 706 707 fn setup_signal_handler(&mut self, landlock_enable: bool) -> Result<()> { 708 let signals = Signals::new(Self::HANDLED_SIGNALS); 709 match signals { 710 Ok(signals) => { 711 self.signals = Some(signals.handle()); 712 let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?; 713 let original_termios_opt = Arc::clone(&self.original_termios_opt); 714 715 let signal_handler_seccomp_filter = get_seccomp_filter( 716 &self.seccomp_action, 717 Thread::SignalHandler, 718 self.hypervisor.hypervisor_type(), 719 ) 720 .map_err(Error::CreateSeccompFilter)?; 721 self.threads.push( 722 thread::Builder::new() 723 .name("vmm_signal_handler".to_string()) 724 .spawn(move || { 725 if !signal_handler_seccomp_filter.is_empty() { 726 if let Err(e) = apply_filter(&signal_handler_seccomp_filter) 727 .map_err(Error::ApplySeccompFilter) 728 { 729 error!("Error applying seccomp filter: {:?}", e); 730 exit_evt.write(1).ok(); 731 return; 732 } 733 } 734 if landlock_enable{ 735 match Landlock::new() { 736 Ok(landlock) => { 737 let _ = landlock.restrict_self().map_err(Error::ApplyLandlock).map_err(|e| { 738 error!("Error applying Landlock to signal handler thread: {:?}", e); 739 exit_evt.write(1).ok(); 740 }); 741 } 742 Err(e) => { 743 error!("Error creating Landlock object: {:?}", e); 744 exit_evt.write(1).ok(); 745 } 746 }; 747 } 748 749 std::panic::catch_unwind(AssertUnwindSafe(|| { 750 Vmm::signal_handler(signals, original_termios_opt, &exit_evt); 751 })) 752 .map_err(|_| { 753 error!("vmm signal_handler thread panicked"); 754 exit_evt.write(1).ok() 755 }) 756 .ok(); 757 }) 758 .map_err(Error::SignalHandlerSpawn)?, 759 ); 760 } 761 Err(e) => error!("Signal not found {}", e), 762 } 763 Ok(()) 764 } 765 766 #[allow(clippy::too_many_arguments)] 767 fn new( 768 vmm_version: VmmVersionInfo, 769 api_evt: EventFd, 770 #[cfg(feature = "guest_debug")] debug_evt: EventFd, 771 #[cfg(feature = "guest_debug")] vm_debug_evt: EventFd, 772 seccomp_action: SeccompAction, 773 hypervisor: Arc<dyn hypervisor::Hypervisor>, 774 exit_evt: EventFd, 775 ) -> Result<Self> { 776 let mut epoll = EpollContext::new().map_err(Error::Epoll)?; 777 let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 778 let activate_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 779 780 epoll 781 .add_event(&exit_evt, EpollDispatch::Exit) 782 .map_err(Error::Epoll)?; 783 784 epoll 785 .add_event(&reset_evt, EpollDispatch::Reset) 786 .map_err(Error::Epoll)?; 787 788 epoll 789 .add_event(&activate_evt, EpollDispatch::ActivateVirtioDevices) 790 .map_err(Error::Epoll)?; 791 792 epoll 793 .add_event(&api_evt, EpollDispatch::Api) 794 .map_err(Error::Epoll)?; 795 796 #[cfg(feature = "guest_debug")] 797 epoll 798 .add_event(&debug_evt, EpollDispatch::Debug) 799 .map_err(Error::Epoll)?; 800 801 Ok(Vmm { 802 epoll, 803 exit_evt, 804 reset_evt, 805 api_evt, 806 #[cfg(feature = "guest_debug")] 807 debug_evt, 808 #[cfg(feature = "guest_debug")] 809 vm_debug_evt, 810 version: vmm_version, 811 vm: None, 812 vm_config: None, 813 seccomp_action, 814 hypervisor, 815 activate_evt, 816 signals: None, 817 threads: vec![], 818 original_termios_opt: Arc::new(Mutex::new(None)), 819 console_resize_pipe: None, 820 console_info: None, 821 }) 822 } 823 824 fn vm_receive_config<T>( 825 &mut self, 826 req: &Request, 827 socket: &mut T, 828 existing_memory_files: Option<HashMap<u32, File>>, 829 ) -> std::result::Result<Arc<Mutex<MemoryManager>>, MigratableError> 830 where 831 T: Read + Write, 832 { 833 // Read in config data along with memory manager data 834 let mut data: Vec<u8> = Vec::new(); 835 data.resize_with(req.length() as usize, Default::default); 836 socket 837 .read_exact(&mut data) 838 .map_err(MigratableError::MigrateSocket)?; 839 840 let vm_migration_config: VmMigrationConfig = 841 serde_json::from_slice(&data).map_err(|e| { 842 MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e)) 843 })?; 844 845 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 846 self.vm_check_cpuid_compatibility( 847 &vm_migration_config.vm_config, 848 &vm_migration_config.common_cpuid, 849 )?; 850 851 let config = vm_migration_config.vm_config.clone(); 852 self.vm_config = Some(vm_migration_config.vm_config); 853 self.console_info = Some(pre_create_console_devices(self).map_err(|e| { 854 MigratableError::MigrateReceive(anyhow!("Error creating console devices: {:?}", e)) 855 })?); 856 857 if self 858 .vm_config 859 .as_ref() 860 .unwrap() 861 .lock() 862 .unwrap() 863 .landlock_enable 864 { 865 apply_landlock(self.vm_config.as_ref().unwrap().clone()).map_err(|e| { 866 MigratableError::MigrateReceive(anyhow!("Error applying landlock: {:?}", e)) 867 })?; 868 } 869 870 let vm = Vm::create_hypervisor_vm( 871 &self.hypervisor, 872 #[cfg(feature = "tdx")] 873 false, 874 #[cfg(feature = "sev_snp")] 875 false, 876 #[cfg(feature = "sev_snp")] 877 config.lock().unwrap().memory.total_size(), 878 ) 879 .map_err(|e| { 880 MigratableError::MigrateReceive(anyhow!( 881 "Error creating hypervisor VM from snapshot: {:?}", 882 e 883 )) 884 })?; 885 886 let phys_bits = 887 vm::physical_bits(&self.hypervisor, config.lock().unwrap().cpus.max_phys_bits); 888 889 let memory_manager = MemoryManager::new( 890 vm, 891 &config.lock().unwrap().memory.clone(), 892 None, 893 phys_bits, 894 #[cfg(feature = "tdx")] 895 false, 896 Some(&vm_migration_config.memory_manager_data), 897 existing_memory_files, 898 #[cfg(target_arch = "x86_64")] 899 None, 900 ) 901 .map_err(|e| { 902 MigratableError::MigrateReceive(anyhow!( 903 "Error creating MemoryManager from snapshot: {:?}", 904 e 905 )) 906 })?; 907 908 Response::ok().write_to(socket)?; 909 910 Ok(memory_manager) 911 } 912 913 fn vm_receive_state<T>( 914 &mut self, 915 req: &Request, 916 socket: &mut T, 917 mm: Arc<Mutex<MemoryManager>>, 918 ) -> std::result::Result<(), MigratableError> 919 where 920 T: Read + Write, 921 { 922 // Read in state data 923 let mut data: Vec<u8> = Vec::new(); 924 data.resize_with(req.length() as usize, Default::default); 925 socket 926 .read_exact(&mut data) 927 .map_err(MigratableError::MigrateSocket)?; 928 let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| { 929 MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e)) 930 })?; 931 932 let exit_evt = self.exit_evt.try_clone().map_err(|e| { 933 MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e)) 934 })?; 935 let reset_evt = self.reset_evt.try_clone().map_err(|e| { 936 MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e)) 937 })?; 938 #[cfg(feature = "guest_debug")] 939 let debug_evt = self.vm_debug_evt.try_clone().map_err(|e| { 940 MigratableError::MigrateReceive(anyhow!("Error cloning debug EventFd: {}", e)) 941 })?; 942 let activate_evt = self.activate_evt.try_clone().map_err(|e| { 943 MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e)) 944 })?; 945 946 #[cfg(not(target_arch = "riscv64"))] 947 let timestamp = Instant::now(); 948 let hypervisor_vm = mm.lock().unwrap().vm.clone(); 949 let mut vm = Vm::new_from_memory_manager( 950 self.vm_config.clone().unwrap(), 951 mm, 952 hypervisor_vm, 953 exit_evt, 954 reset_evt, 955 #[cfg(feature = "guest_debug")] 956 debug_evt, 957 &self.seccomp_action, 958 self.hypervisor.clone(), 959 activate_evt, 960 #[cfg(not(target_arch = "riscv64"))] 961 timestamp, 962 self.console_info.clone(), 963 self.console_resize_pipe.clone(), 964 Arc::clone(&self.original_termios_opt), 965 Some(snapshot), 966 ) 967 .map_err(|e| { 968 MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e)) 969 })?; 970 971 // Create VM 972 vm.restore().map_err(|e| { 973 Response::error().write_to(socket).ok(); 974 MigratableError::MigrateReceive(anyhow!("Failed restoring the Vm: {}", e)) 975 })?; 976 self.vm = Some(vm); 977 978 Response::ok().write_to(socket)?; 979 980 Ok(()) 981 } 982 983 fn vm_receive_memory<T>( 984 &mut self, 985 req: &Request, 986 socket: &mut T, 987 memory_manager: &mut MemoryManager, 988 ) -> std::result::Result<(), MigratableError> 989 where 990 T: Read + ReadVolatile + Write, 991 { 992 // Read table 993 let table = MemoryRangeTable::read_from(socket, req.length())?; 994 995 // And then read the memory itself 996 memory_manager 997 .receive_memory_regions(&table, socket) 998 .inspect_err(|_| { 999 Response::error().write_to(socket).ok(); 1000 })?; 1001 Response::ok().write_to(socket)?; 1002 Ok(()) 1003 } 1004 1005 fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> { 1006 url.strip_prefix("unix:") 1007 .ok_or_else(|| { 1008 MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url)) 1009 }) 1010 .map(|s| s.into()) 1011 } 1012 1013 fn send_migration_socket( 1014 destination_url: &str, 1015 ) -> std::result::Result<SocketStream, MigratableError> { 1016 if let Some(address) = destination_url.strip_prefix("tcp:") { 1017 info!("Connecting to TCP socket at {}", address); 1018 1019 let socket = TcpStream::connect(address).map_err(|e| { 1020 MigratableError::MigrateSend(anyhow!("Error connecting to TCP socket: {}", e)) 1021 })?; 1022 1023 Ok(SocketStream::Tcp(socket)) 1024 } else { 1025 let path = Vmm::socket_url_to_path(destination_url)?; 1026 info!("Connecting to UNIX socket at {:?}", path); 1027 1028 let socket = UnixStream::connect(&path).map_err(|e| { 1029 MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e)) 1030 })?; 1031 1032 Ok(SocketStream::Unix(socket)) 1033 } 1034 } 1035 1036 fn receive_migration_socket( 1037 receiver_url: &str, 1038 ) -> std::result::Result<SocketStream, MigratableError> { 1039 if let Some(address) = receiver_url.strip_prefix("tcp:") { 1040 let listener = TcpListener::bind(address).map_err(|e| { 1041 MigratableError::MigrateReceive(anyhow!("Error binding to TCP socket: {}", e)) 1042 })?; 1043 1044 let (socket, _addr) = listener.accept().map_err(|e| { 1045 MigratableError::MigrateReceive(anyhow!( 1046 "Error accepting connection on TCP socket: {}", 1047 e 1048 )) 1049 })?; 1050 1051 Ok(SocketStream::Tcp(socket)) 1052 } else { 1053 let path = Vmm::socket_url_to_path(receiver_url)?; 1054 let listener = UnixListener::bind(&path).map_err(|e| { 1055 MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e)) 1056 })?; 1057 1058 let (socket, _addr) = listener.accept().map_err(|e| { 1059 MigratableError::MigrateReceive(anyhow!( 1060 "Error accepting connection on UNIX socket: {}", 1061 e 1062 )) 1063 })?; 1064 1065 // Remove the UNIX socket file after accepting the connection 1066 std::fs::remove_file(&path).map_err(|e| { 1067 MigratableError::MigrateReceive(anyhow!("Error removing UNIX socket file: {}", e)) 1068 })?; 1069 1070 Ok(SocketStream::Unix(socket)) 1071 } 1072 } 1073 1074 // Returns true if there were dirty pages to send 1075 fn vm_maybe_send_dirty_pages( 1076 vm: &mut Vm, 1077 socket: &mut SocketStream, 1078 ) -> result::Result<bool, MigratableError> { 1079 // Send (dirty) memory table 1080 let table = vm.dirty_log()?; 1081 1082 // But if there are no regions go straight to pause 1083 if table.regions().is_empty() { 1084 return Ok(false); 1085 } 1086 1087 Request::memory(table.length()).write_to(socket).unwrap(); 1088 table.write_to(socket)?; 1089 // And then the memory itself 1090 vm.send_memory_regions(&table, socket)?; 1091 Response::read_from(socket)?.ok_or_abandon( 1092 socket, 1093 MigratableError::MigrateSend(anyhow!("Error during dirty memory migration")), 1094 )?; 1095 1096 Ok(true) 1097 } 1098 1099 fn send_migration( 1100 vm: &mut Vm, 1101 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc< 1102 dyn hypervisor::Hypervisor, 1103 >, 1104 send_data_migration: VmSendMigrationData, 1105 ) -> result::Result<(), MigratableError> { 1106 // Set up the socket connection 1107 let mut socket = Self::send_migration_socket(&send_data_migration.destination_url)?; 1108 1109 // Start the migration 1110 Request::start().write_to(&mut socket)?; 1111 Response::read_from(&mut socket)?.ok_or_abandon( 1112 &mut socket, 1113 MigratableError::MigrateSend(anyhow!("Error starting migration")), 1114 )?; 1115 1116 // Send config 1117 let vm_config = vm.get_config(); 1118 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1119 let common_cpuid = { 1120 #[cfg(feature = "tdx")] 1121 if vm_config.lock().unwrap().is_tdx_enabled() { 1122 return Err(MigratableError::MigrateSend(anyhow!( 1123 "Live Migration is not supported when TDX is enabled" 1124 ))); 1125 }; 1126 1127 let amx = vm_config.lock().unwrap().cpus.features.amx; 1128 let phys_bits = 1129 vm::physical_bits(&hypervisor, vm_config.lock().unwrap().cpus.max_phys_bits); 1130 arch::generate_common_cpuid( 1131 &hypervisor, 1132 &arch::CpuidConfig { 1133 sgx_epc_sections: None, 1134 phys_bits, 1135 kvm_hyperv: vm_config.lock().unwrap().cpus.kvm_hyperv, 1136 #[cfg(feature = "tdx")] 1137 tdx: false, 1138 amx, 1139 }, 1140 ) 1141 .map_err(|e| { 1142 MigratableError::MigrateSend(anyhow!("Error generating common cpuid': {:?}", e)) 1143 })? 1144 }; 1145 1146 if send_data_migration.local { 1147 match &mut socket { 1148 SocketStream::Unix(unix_socket) => { 1149 // Proceed with sending memory file descriptors over UNIX socket 1150 vm.send_memory_fds(unix_socket)?; 1151 } 1152 SocketStream::Tcp(_tcp_socket) => { 1153 return Err(MigratableError::MigrateSend(anyhow!( 1154 "--local option is not supported with TCP sockets", 1155 ))); 1156 } 1157 } 1158 } 1159 1160 let vm_migration_config = VmMigrationConfig { 1161 vm_config, 1162 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1163 common_cpuid, 1164 memory_manager_data: vm.memory_manager_data(), 1165 }; 1166 let config_data = serde_json::to_vec(&vm_migration_config).unwrap(); 1167 Request::config(config_data.len() as u64).write_to(&mut socket)?; 1168 socket 1169 .write_all(&config_data) 1170 .map_err(MigratableError::MigrateSocket)?; 1171 Response::read_from(&mut socket)?.ok_or_abandon( 1172 &mut socket, 1173 MigratableError::MigrateSend(anyhow!("Error during config migration")), 1174 )?; 1175 1176 // Let every Migratable object know about the migration being started. 1177 vm.start_migration()?; 1178 1179 if send_data_migration.local { 1180 // Now pause VM 1181 vm.pause()?; 1182 } else { 1183 // Start logging dirty pages 1184 vm.start_dirty_log()?; 1185 1186 // Send memory table 1187 let table = vm.memory_range_table()?; 1188 Request::memory(table.length()) 1189 .write_to(&mut socket) 1190 .unwrap(); 1191 table.write_to(&mut socket)?; 1192 // And then the memory itself 1193 vm.send_memory_regions(&table, &mut socket)?; 1194 Response::read_from(&mut socket)?.ok_or_abandon( 1195 &mut socket, 1196 MigratableError::MigrateSend(anyhow!("Error during dirty memory migration")), 1197 )?; 1198 1199 // Try at most 5 passes of dirty memory sending 1200 const MAX_DIRTY_MIGRATIONS: usize = 5; 1201 for i in 0..MAX_DIRTY_MIGRATIONS { 1202 info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS); 1203 if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? { 1204 break; 1205 } 1206 } 1207 1208 // Now pause VM 1209 vm.pause()?; 1210 1211 // Send last batch of dirty pages 1212 Self::vm_maybe_send_dirty_pages(vm, &mut socket)?; 1213 } 1214 1215 // We release the locks early to enable locking them on the destination host. 1216 // The VM is already stopped. 1217 vm.release_disk_locks() 1218 .map_err(|e| MigratableError::UnlockError(anyhow!("{e}")))?; 1219 1220 // Capture snapshot and send it 1221 let vm_snapshot = vm.snapshot()?; 1222 let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap(); 1223 Request::state(snapshot_data.len() as u64).write_to(&mut socket)?; 1224 socket 1225 .write_all(&snapshot_data) 1226 .map_err(MigratableError::MigrateSocket)?; 1227 Response::read_from(&mut socket)?.ok_or_abandon( 1228 &mut socket, 1229 MigratableError::MigrateSend(anyhow!("Error during state migration")), 1230 )?; 1231 // Complete the migration 1232 // At this step, the receiving VMM will acquire disk locks again. 1233 Request::complete().write_to(&mut socket)?; 1234 Response::read_from(&mut socket)?.ok_or_abandon( 1235 &mut socket, 1236 MigratableError::MigrateSend(anyhow!("Error completing migration")), 1237 )?; 1238 1239 // Stop logging dirty pages 1240 if !send_data_migration.local { 1241 vm.stop_dirty_log()?; 1242 } 1243 1244 info!("Migration complete"); 1245 1246 // Let every Migratable object know about the migration being complete 1247 vm.complete_migration() 1248 } 1249 1250 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1251 fn vm_check_cpuid_compatibility( 1252 &self, 1253 src_vm_config: &Arc<Mutex<VmConfig>>, 1254 src_vm_cpuid: &[hypervisor::arch::x86::CpuIdEntry], 1255 ) -> result::Result<(), MigratableError> { 1256 #[cfg(feature = "tdx")] 1257 if src_vm_config.lock().unwrap().is_tdx_enabled() { 1258 return Err(MigratableError::MigrateReceive(anyhow!( 1259 "Live Migration is not supported when TDX is enabled" 1260 ))); 1261 }; 1262 1263 // We check the `CPUID` compatibility of between the source vm and destination, which is 1264 // mostly about feature compatibility and "topology/sgx" leaves are not relevant. 1265 let dest_cpuid = &{ 1266 let vm_config = &src_vm_config.lock().unwrap(); 1267 1268 let phys_bits = vm::physical_bits(&self.hypervisor, vm_config.cpus.max_phys_bits); 1269 arch::generate_common_cpuid( 1270 &self.hypervisor.clone(), 1271 &arch::CpuidConfig { 1272 sgx_epc_sections: None, 1273 phys_bits, 1274 kvm_hyperv: vm_config.cpus.kvm_hyperv, 1275 #[cfg(feature = "tdx")] 1276 tdx: false, 1277 amx: vm_config.cpus.features.amx, 1278 }, 1279 ) 1280 .map_err(|e| { 1281 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e)) 1282 })? 1283 }; 1284 arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| { 1285 MigratableError::MigrateReceive(anyhow!( 1286 "Error checking cpu feature compatibility': {:?}", 1287 e 1288 )) 1289 }) 1290 } 1291 1292 fn vm_restore( 1293 &mut self, 1294 source_url: &str, 1295 vm_config: Arc<Mutex<VmConfig>>, 1296 prefault: bool, 1297 ) -> std::result::Result<(), VmError> { 1298 let snapshot = recv_vm_state(source_url).map_err(VmError::Restore)?; 1299 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1300 let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?; 1301 1302 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1303 self.vm_check_cpuid_compatibility(&vm_config, &vm_snapshot.common_cpuid) 1304 .map_err(VmError::Restore)?; 1305 1306 self.vm_config = Some(Arc::clone(&vm_config)); 1307 1308 // Always re-populate the 'console_info' based on the new 'vm_config' 1309 self.console_info = 1310 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1311 1312 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1313 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1314 #[cfg(feature = "guest_debug")] 1315 let debug_evt = self 1316 .vm_debug_evt 1317 .try_clone() 1318 .map_err(VmError::EventFdClone)?; 1319 let activate_evt = self 1320 .activate_evt 1321 .try_clone() 1322 .map_err(VmError::EventFdClone)?; 1323 1324 let vm = Vm::new( 1325 vm_config, 1326 exit_evt, 1327 reset_evt, 1328 #[cfg(feature = "guest_debug")] 1329 debug_evt, 1330 &self.seccomp_action, 1331 self.hypervisor.clone(), 1332 activate_evt, 1333 self.console_info.clone(), 1334 self.console_resize_pipe.clone(), 1335 Arc::clone(&self.original_termios_opt), 1336 Some(snapshot), 1337 Some(source_url), 1338 Some(prefault), 1339 )?; 1340 self.vm = Some(vm); 1341 1342 if self 1343 .vm_config 1344 .as_ref() 1345 .unwrap() 1346 .lock() 1347 .unwrap() 1348 .landlock_enable 1349 { 1350 apply_landlock(self.vm_config.as_ref().unwrap().clone()) 1351 .map_err(VmError::ApplyLandlock)?; 1352 } 1353 1354 // Now we can restore the rest of the VM. 1355 if let Some(ref mut vm) = self.vm { 1356 vm.restore() 1357 } else { 1358 Err(VmError::VmNotCreated) 1359 } 1360 } 1361 1362 fn control_loop( 1363 &mut self, 1364 api_receiver: Rc<Receiver<ApiRequest>>, 1365 #[cfg(feature = "guest_debug")] gdb_receiver: Rc<Receiver<gdb::GdbRequest>>, 1366 ) -> Result<()> { 1367 const EPOLL_EVENTS_LEN: usize = 100; 1368 1369 let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN]; 1370 let epoll_fd = self.epoll.as_raw_fd(); 1371 1372 'outer: loop { 1373 let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { 1374 Ok(res) => res, 1375 Err(e) => { 1376 if e.kind() == io::ErrorKind::Interrupted { 1377 // It's well defined from the epoll_wait() syscall 1378 // documentation that the epoll loop can be interrupted 1379 // before any of the requested events occurred or the 1380 // timeout expired. In both those cases, epoll_wait() 1381 // returns an error of type EINTR, but this should not 1382 // be considered as a regular error. Instead it is more 1383 // appropriate to retry, by calling into epoll_wait(). 1384 continue; 1385 } 1386 return Err(Error::Epoll(e)); 1387 } 1388 }; 1389 1390 for event in events.iter().take(num_events) { 1391 let dispatch_event: EpollDispatch = event.data.into(); 1392 match dispatch_event { 1393 EpollDispatch::Unknown => { 1394 let event = event.data; 1395 warn!("Unknown VMM loop event: {}", event); 1396 } 1397 EpollDispatch::Exit => { 1398 info!("VM exit event"); 1399 // Consume the event. 1400 self.exit_evt.read().map_err(Error::EventFdRead)?; 1401 self.vmm_shutdown().map_err(Error::VmmShutdown)?; 1402 1403 break 'outer; 1404 } 1405 EpollDispatch::Reset => { 1406 info!("VM reset event"); 1407 // Consume the event. 1408 self.reset_evt.read().map_err(Error::EventFdRead)?; 1409 self.vm_reboot().map_err(Error::VmReboot)?; 1410 } 1411 EpollDispatch::ActivateVirtioDevices => { 1412 if let Some(ref vm) = self.vm { 1413 let count = self.activate_evt.read().map_err(Error::EventFdRead)?; 1414 info!( 1415 "Trying to activate pending virtio devices: count = {}", 1416 count 1417 ); 1418 vm.activate_virtio_devices() 1419 .map_err(Error::ActivateVirtioDevices)?; 1420 } 1421 } 1422 EpollDispatch::Api => { 1423 // Consume the events. 1424 for _ in 0..self.api_evt.read().map_err(Error::EventFdRead)? { 1425 // Read from the API receiver channel 1426 let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?; 1427 1428 if api_request(self)? { 1429 break 'outer; 1430 } 1431 } 1432 } 1433 #[cfg(feature = "guest_debug")] 1434 EpollDispatch::Debug => { 1435 // Consume the events. 1436 for _ in 0..self.debug_evt.read().map_err(Error::EventFdRead)? { 1437 // Read from the API receiver channel 1438 let gdb_request = gdb_receiver.recv().map_err(Error::GdbRequestRecv)?; 1439 1440 let response = if let Some(ref mut vm) = self.vm { 1441 vm.debug_request(&gdb_request.payload, gdb_request.cpu_id) 1442 } else { 1443 Err(VmError::VmNotRunning) 1444 } 1445 .map_err(gdb::Error::Vm); 1446 1447 gdb_request 1448 .sender 1449 .send(response) 1450 .map_err(Error::GdbResponseSend)?; 1451 } 1452 } 1453 #[cfg(not(feature = "guest_debug"))] 1454 EpollDispatch::Debug => {} 1455 } 1456 } 1457 } 1458 1459 // Trigger the termination of the signal_handler thread 1460 if let Some(signals) = self.signals.take() { 1461 signals.close(); 1462 } 1463 1464 // Wait for all the threads to finish 1465 for thread in self.threads.drain(..) { 1466 thread.join().map_err(Error::ThreadCleanup)? 1467 } 1468 1469 Ok(()) 1470 } 1471 } 1472 1473 fn apply_landlock(vm_config: Arc<Mutex<VmConfig>>) -> result::Result<(), LandlockError> { 1474 vm_config.lock().unwrap().apply_landlock()?; 1475 Ok(()) 1476 } 1477 1478 impl RequestHandler for Vmm { 1479 fn vm_create(&mut self, config: Box<VmConfig>) -> result::Result<(), VmError> { 1480 // We only store the passed VM config. 1481 // The VM will be created when being asked to boot it. 1482 if self.vm_config.is_none() { 1483 self.vm_config = Some(Arc::new(Mutex::new(*config))); 1484 self.console_info = 1485 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1486 1487 if self 1488 .vm_config 1489 .as_ref() 1490 .unwrap() 1491 .lock() 1492 .unwrap() 1493 .landlock_enable 1494 { 1495 apply_landlock(self.vm_config.as_ref().unwrap().clone()) 1496 .map_err(VmError::ApplyLandlock)?; 1497 } 1498 Ok(()) 1499 } else { 1500 Err(VmError::VmAlreadyCreated) 1501 } 1502 } 1503 1504 fn vm_boot(&mut self) -> result::Result<(), VmError> { 1505 tracer::start(); 1506 info!("Booting VM"); 1507 event!("vm", "booting"); 1508 let r = { 1509 trace_scoped!("vm_boot"); 1510 // If we don't have a config, we cannot boot a VM. 1511 if self.vm_config.is_none() { 1512 return Err(VmError::VmMissingConfig); 1513 }; 1514 1515 // console_info is set to None in vm_shutdown. re-populate here if empty 1516 if self.console_info.is_none() { 1517 self.console_info = 1518 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1519 } 1520 1521 // Create a new VM if we don't have one yet. 1522 if self.vm.is_none() { 1523 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1524 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1525 #[cfg(feature = "guest_debug")] 1526 let vm_debug_evt = self 1527 .vm_debug_evt 1528 .try_clone() 1529 .map_err(VmError::EventFdClone)?; 1530 let activate_evt = self 1531 .activate_evt 1532 .try_clone() 1533 .map_err(VmError::EventFdClone)?; 1534 1535 if let Some(ref vm_config) = self.vm_config { 1536 let vm = Vm::new( 1537 Arc::clone(vm_config), 1538 exit_evt, 1539 reset_evt, 1540 #[cfg(feature = "guest_debug")] 1541 vm_debug_evt, 1542 &self.seccomp_action, 1543 self.hypervisor.clone(), 1544 activate_evt, 1545 self.console_info.clone(), 1546 self.console_resize_pipe.clone(), 1547 Arc::clone(&self.original_termios_opt), 1548 None, 1549 None, 1550 None, 1551 )?; 1552 1553 self.vm = Some(vm); 1554 } 1555 } 1556 1557 // Now we can boot the VM. 1558 if let Some(ref mut vm) = self.vm { 1559 vm.boot() 1560 } else { 1561 Err(VmError::VmNotCreated) 1562 } 1563 }; 1564 tracer::end(); 1565 if r.is_ok() { 1566 event!("vm", "booted"); 1567 } 1568 r 1569 } 1570 1571 fn vm_pause(&mut self) -> result::Result<(), VmError> { 1572 if let Some(ref mut vm) = self.vm { 1573 vm.pause().map_err(VmError::Pause) 1574 } else { 1575 Err(VmError::VmNotRunning) 1576 } 1577 } 1578 1579 fn vm_resume(&mut self) -> result::Result<(), VmError> { 1580 if let Some(ref mut vm) = self.vm { 1581 vm.resume().map_err(VmError::Resume) 1582 } else { 1583 Err(VmError::VmNotRunning) 1584 } 1585 } 1586 1587 fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> { 1588 if let Some(ref mut vm) = self.vm { 1589 // Drain console_info so that FDs are not reused 1590 let _ = self.console_info.take(); 1591 vm.snapshot() 1592 .map_err(VmError::Snapshot) 1593 .and_then(|snapshot| { 1594 vm.send(&snapshot, destination_url) 1595 .map_err(VmError::SnapshotSend) 1596 }) 1597 } else { 1598 Err(VmError::VmNotRunning) 1599 } 1600 } 1601 1602 fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> { 1603 if self.vm.is_some() || self.vm_config.is_some() { 1604 return Err(VmError::VmAlreadyCreated); 1605 } 1606 1607 let source_url = restore_cfg.source_url.as_path().to_str(); 1608 if source_url.is_none() { 1609 return Err(VmError::InvalidRestoreSourceUrl); 1610 } 1611 // Safe to unwrap as we checked it was Some(&str). 1612 let source_url = source_url.unwrap(); 1613 1614 let vm_config = Arc::new(Mutex::new( 1615 recv_vm_config(source_url).map_err(VmError::Restore)?, 1616 )); 1617 restore_cfg 1618 .validate(&vm_config.lock().unwrap().clone()) 1619 .map_err(VmError::ConfigValidation)?; 1620 1621 // Update VM's net configurations with new fds received for restore operation 1622 if let (Some(restored_nets), Some(vm_net_configs)) = 1623 (restore_cfg.net_fds, &mut vm_config.lock().unwrap().net) 1624 { 1625 for net in restored_nets.iter() { 1626 for net_config in vm_net_configs.iter_mut() { 1627 // update only if the net dev is backed by FDs 1628 if net_config.id == Some(net.id.clone()) && net_config.fds.is_some() { 1629 net_config.fds.clone_from(&net.fds); 1630 } 1631 } 1632 } 1633 } 1634 1635 self.vm_restore(source_url, vm_config, restore_cfg.prefault) 1636 .map_err(|vm_restore_err| { 1637 error!("VM Restore failed: {:?}", vm_restore_err); 1638 1639 // Cleanup the VM being created while vm restore 1640 if let Err(e) = self.vm_delete() { 1641 return e; 1642 } 1643 1644 vm_restore_err 1645 }) 1646 } 1647 1648 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 1649 fn vm_coredump(&mut self, destination_url: &str) -> result::Result<(), VmError> { 1650 if let Some(ref mut vm) = self.vm { 1651 vm.coredump(destination_url).map_err(VmError::Coredump) 1652 } else { 1653 Err(VmError::VmNotRunning) 1654 } 1655 } 1656 1657 fn vm_shutdown(&mut self) -> result::Result<(), VmError> { 1658 let r = if let Some(ref mut vm) = self.vm.take() { 1659 // Drain console_info so that the FDs are not reused 1660 let _ = self.console_info.take(); 1661 vm.shutdown() 1662 } else { 1663 Err(VmError::VmNotRunning) 1664 }; 1665 1666 if r.is_ok() { 1667 event!("vm", "shutdown"); 1668 } 1669 1670 r 1671 } 1672 1673 fn vm_reboot(&mut self) -> result::Result<(), VmError> { 1674 event!("vm", "rebooting"); 1675 1676 // First we stop the current VM 1677 let config = if let Some(mut vm) = self.vm.take() { 1678 let config = vm.get_config(); 1679 vm.shutdown()?; 1680 config 1681 } else { 1682 return Err(VmError::VmNotCreated); 1683 }; 1684 1685 // vm.shutdown() closes all the console devices, so set console_info to None 1686 // so that the closed FD #s are not reused. 1687 let _ = self.console_info.take(); 1688 1689 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1690 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1691 #[cfg(feature = "guest_debug")] 1692 let debug_evt = self 1693 .vm_debug_evt 1694 .try_clone() 1695 .map_err(VmError::EventFdClone)?; 1696 let activate_evt = self 1697 .activate_evt 1698 .try_clone() 1699 .map_err(VmError::EventFdClone)?; 1700 1701 // The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be 1702 // an event sitting in the shared reset_evt. Without doing this we get very early reboots 1703 // during the boot process. 1704 if self.reset_evt.read().is_ok() { 1705 warn!("Spurious second reset event received. Ignoring."); 1706 } 1707 1708 self.console_info = 1709 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1710 1711 // Then we create the new VM 1712 let mut vm = Vm::new( 1713 config, 1714 exit_evt, 1715 reset_evt, 1716 #[cfg(feature = "guest_debug")] 1717 debug_evt, 1718 &self.seccomp_action, 1719 self.hypervisor.clone(), 1720 activate_evt, 1721 self.console_info.clone(), 1722 self.console_resize_pipe.clone(), 1723 Arc::clone(&self.original_termios_opt), 1724 None, 1725 None, 1726 None, 1727 )?; 1728 1729 // And we boot it 1730 vm.boot()?; 1731 1732 self.vm = Some(vm); 1733 1734 event!("vm", "rebooted"); 1735 1736 Ok(()) 1737 } 1738 1739 fn vm_info(&self) -> result::Result<VmInfoResponse, VmError> { 1740 match &self.vm_config { 1741 Some(vm_config) => { 1742 let state = match &self.vm { 1743 Some(vm) => vm.get_state()?, 1744 None => VmState::Created, 1745 }; 1746 let config = vm_config.lock().unwrap().clone(); 1747 1748 let mut memory_actual_size = config.memory.total_size(); 1749 if let Some(vm) = &self.vm { 1750 memory_actual_size -= vm.balloon_size(); 1751 } 1752 1753 let device_tree = self 1754 .vm 1755 .as_ref() 1756 .map(|vm| vm.device_tree().lock().unwrap().clone()); 1757 1758 Ok(VmInfoResponse { 1759 config: Box::new(config), 1760 state, 1761 memory_actual_size, 1762 device_tree, 1763 }) 1764 } 1765 None => Err(VmError::VmNotCreated), 1766 } 1767 } 1768 1769 fn vmm_ping(&self) -> VmmPingResponse { 1770 let VmmVersionInfo { 1771 build_version, 1772 version, 1773 } = self.version.clone(); 1774 1775 VmmPingResponse { 1776 build_version, 1777 version, 1778 pid: std::process::id() as i64, 1779 features: feature_list(), 1780 } 1781 } 1782 1783 fn vm_delete(&mut self) -> result::Result<(), VmError> { 1784 if self.vm_config.is_none() { 1785 return Ok(()); 1786 } 1787 1788 // If a VM is booted, we first try to shut it down. 1789 if self.vm.is_some() { 1790 self.vm_shutdown()?; 1791 } 1792 1793 self.vm_config = None; 1794 1795 event!("vm", "deleted"); 1796 1797 Ok(()) 1798 } 1799 1800 fn vmm_shutdown(&mut self) -> result::Result<(), VmError> { 1801 self.vm_delete()?; 1802 event!("vmm", "shutdown"); 1803 Ok(()) 1804 } 1805 1806 fn vm_resize( 1807 &mut self, 1808 desired_vcpus: Option<u8>, 1809 desired_ram: Option<u64>, 1810 desired_balloon: Option<u64>, 1811 ) -> result::Result<(), VmError> { 1812 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1813 1814 if let Some(ref mut vm) = self.vm { 1815 if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) { 1816 error!("Error when resizing VM: {:?}", e); 1817 Err(e) 1818 } else { 1819 Ok(()) 1820 } 1821 } else { 1822 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1823 if let Some(desired_vcpus) = desired_vcpus { 1824 config.cpus.boot_vcpus = desired_vcpus; 1825 } 1826 if let Some(desired_ram) = desired_ram { 1827 config.memory.size = desired_ram; 1828 } 1829 if let Some(desired_balloon) = desired_balloon { 1830 if let Some(balloon_config) = &mut config.balloon { 1831 balloon_config.size = desired_balloon; 1832 } 1833 } 1834 Ok(()) 1835 } 1836 } 1837 1838 fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> { 1839 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1840 1841 if let Some(ref mut vm) = self.vm { 1842 if let Err(e) = vm.resize_zone(id, desired_ram) { 1843 error!("Error when resizing VM: {:?}", e); 1844 Err(e) 1845 } else { 1846 Ok(()) 1847 } 1848 } else { 1849 // Update VmConfig by setting the new desired ram. 1850 let memory_config = &mut self.vm_config.as_ref().unwrap().lock().unwrap().memory; 1851 1852 if let Some(zones) = &mut memory_config.zones { 1853 for zone in zones.iter_mut() { 1854 if zone.id == id { 1855 zone.size = desired_ram; 1856 return Ok(()); 1857 } 1858 } 1859 } 1860 1861 error!("Could not find the memory zone {} for the resize", id); 1862 Err(VmError::ResizeZone) 1863 } 1864 } 1865 1866 fn vm_add_device( 1867 &mut self, 1868 device_cfg: DeviceConfig, 1869 ) -> result::Result<Option<Vec<u8>>, VmError> { 1870 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1871 1872 { 1873 // Validate the configuration change in a cloned configuration 1874 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1875 add_to_config(&mut config.devices, device_cfg.clone()); 1876 config.validate().map_err(VmError::ConfigValidation)?; 1877 } 1878 1879 if let Some(ref mut vm) = self.vm { 1880 let info = vm.add_device(device_cfg).map_err(|e| { 1881 error!("Error when adding new device to the VM: {:?}", e); 1882 e 1883 })?; 1884 serde_json::to_vec(&info) 1885 .map(Some) 1886 .map_err(VmError::SerializeJson) 1887 } else { 1888 // Update VmConfig by adding the new device. 1889 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1890 add_to_config(&mut config.devices, device_cfg); 1891 Ok(None) 1892 } 1893 } 1894 1895 fn vm_add_user_device( 1896 &mut self, 1897 device_cfg: UserDeviceConfig, 1898 ) -> result::Result<Option<Vec<u8>>, VmError> { 1899 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1900 1901 { 1902 // Validate the configuration change in a cloned configuration 1903 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1904 add_to_config(&mut config.user_devices, device_cfg.clone()); 1905 config.validate().map_err(VmError::ConfigValidation)?; 1906 } 1907 1908 if let Some(ref mut vm) = self.vm { 1909 let info = vm.add_user_device(device_cfg).map_err(|e| { 1910 error!("Error when adding new user device to the VM: {:?}", e); 1911 e 1912 })?; 1913 serde_json::to_vec(&info) 1914 .map(Some) 1915 .map_err(VmError::SerializeJson) 1916 } else { 1917 // Update VmConfig by adding the new device. 1918 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1919 add_to_config(&mut config.user_devices, device_cfg); 1920 Ok(None) 1921 } 1922 } 1923 1924 fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> { 1925 if let Some(ref mut vm) = self.vm { 1926 if let Err(e) = vm.remove_device(id) { 1927 error!("Error when removing device from the VM: {:?}", e); 1928 Err(e) 1929 } else { 1930 Ok(()) 1931 } 1932 } else if let Some(ref config) = self.vm_config { 1933 let mut config = config.lock().unwrap(); 1934 if config.remove_device(&id) { 1935 Ok(()) 1936 } else { 1937 Err(VmError::NoDeviceToRemove(id)) 1938 } 1939 } else { 1940 Err(VmError::VmNotCreated) 1941 } 1942 } 1943 1944 fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1945 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1946 1947 { 1948 // Validate the configuration change in a cloned configuration 1949 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1950 add_to_config(&mut config.disks, disk_cfg.clone()); 1951 config.validate().map_err(VmError::ConfigValidation)?; 1952 } 1953 1954 if let Some(ref mut vm) = self.vm { 1955 let info = vm.add_disk(disk_cfg).map_err(|e| { 1956 error!("Error when adding new disk to the VM: {:?}", e); 1957 e 1958 })?; 1959 serde_json::to_vec(&info) 1960 .map(Some) 1961 .map_err(VmError::SerializeJson) 1962 } else { 1963 // Update VmConfig by adding the new device. 1964 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1965 add_to_config(&mut config.disks, disk_cfg); 1966 Ok(None) 1967 } 1968 } 1969 1970 fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1971 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1972 1973 { 1974 // Validate the configuration change in a cloned configuration 1975 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1976 add_to_config(&mut config.fs, fs_cfg.clone()); 1977 config.validate().map_err(VmError::ConfigValidation)?; 1978 } 1979 1980 if let Some(ref mut vm) = self.vm { 1981 let info = vm.add_fs(fs_cfg).map_err(|e| { 1982 error!("Error when adding new fs to the VM: {:?}", e); 1983 e 1984 })?; 1985 serde_json::to_vec(&info) 1986 .map(Some) 1987 .map_err(VmError::SerializeJson) 1988 } else { 1989 // Update VmConfig by adding the new device. 1990 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1991 add_to_config(&mut config.fs, fs_cfg); 1992 Ok(None) 1993 } 1994 } 1995 1996 fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1997 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1998 1999 { 2000 // Validate the configuration change in a cloned configuration 2001 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2002 add_to_config(&mut config.pmem, pmem_cfg.clone()); 2003 config.validate().map_err(VmError::ConfigValidation)?; 2004 } 2005 2006 if let Some(ref mut vm) = self.vm { 2007 let info = vm.add_pmem(pmem_cfg).map_err(|e| { 2008 error!("Error when adding new pmem device to the VM: {:?}", e); 2009 e 2010 })?; 2011 serde_json::to_vec(&info) 2012 .map(Some) 2013 .map_err(VmError::SerializeJson) 2014 } else { 2015 // Update VmConfig by adding the new device. 2016 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2017 add_to_config(&mut config.pmem, pmem_cfg); 2018 Ok(None) 2019 } 2020 } 2021 2022 fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2023 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2024 2025 { 2026 // Validate the configuration change in a cloned configuration 2027 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2028 add_to_config(&mut config.net, net_cfg.clone()); 2029 config.validate().map_err(VmError::ConfigValidation)?; 2030 } 2031 2032 if let Some(ref mut vm) = self.vm { 2033 let info = vm.add_net(net_cfg).map_err(|e| { 2034 error!("Error when adding new network device to the VM: {:?}", e); 2035 e 2036 })?; 2037 serde_json::to_vec(&info) 2038 .map(Some) 2039 .map_err(VmError::SerializeJson) 2040 } else { 2041 // Update VmConfig by adding the new device. 2042 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2043 add_to_config(&mut config.net, net_cfg); 2044 Ok(None) 2045 } 2046 } 2047 2048 fn vm_add_vdpa(&mut self, vdpa_cfg: VdpaConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2049 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2050 2051 { 2052 // Validate the configuration change in a cloned configuration 2053 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2054 add_to_config(&mut config.vdpa, vdpa_cfg.clone()); 2055 config.validate().map_err(VmError::ConfigValidation)?; 2056 } 2057 2058 if let Some(ref mut vm) = self.vm { 2059 let info = vm.add_vdpa(vdpa_cfg).map_err(|e| { 2060 error!("Error when adding new vDPA device to the VM: {:?}", e); 2061 e 2062 })?; 2063 serde_json::to_vec(&info) 2064 .map(Some) 2065 .map_err(VmError::SerializeJson) 2066 } else { 2067 // Update VmConfig by adding the new device. 2068 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2069 add_to_config(&mut config.vdpa, vdpa_cfg); 2070 Ok(None) 2071 } 2072 } 2073 2074 fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2075 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2076 2077 { 2078 // Validate the configuration change in a cloned configuration 2079 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2080 2081 if config.vsock.is_some() { 2082 return Err(VmError::TooManyVsockDevices); 2083 } 2084 2085 config.vsock = Some(vsock_cfg.clone()); 2086 config.validate().map_err(VmError::ConfigValidation)?; 2087 } 2088 2089 if let Some(ref mut vm) = self.vm { 2090 let info = vm.add_vsock(vsock_cfg).map_err(|e| { 2091 error!("Error when adding new vsock device to the VM: {:?}", e); 2092 e 2093 })?; 2094 serde_json::to_vec(&info) 2095 .map(Some) 2096 .map_err(VmError::SerializeJson) 2097 } else { 2098 // Update VmConfig by adding the new device. 2099 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2100 config.vsock = Some(vsock_cfg); 2101 Ok(None) 2102 } 2103 } 2104 2105 fn vm_counters(&mut self) -> result::Result<Option<Vec<u8>>, VmError> { 2106 if let Some(ref mut vm) = self.vm { 2107 let info = vm.counters().map_err(|e| { 2108 error!("Error when getting counters from the VM: {:?}", e); 2109 e 2110 })?; 2111 serde_json::to_vec(&info) 2112 .map(Some) 2113 .map_err(VmError::SerializeJson) 2114 } else { 2115 Err(VmError::VmNotRunning) 2116 } 2117 } 2118 2119 fn vm_power_button(&mut self) -> result::Result<(), VmError> { 2120 if let Some(ref mut vm) = self.vm { 2121 vm.power_button() 2122 } else { 2123 Err(VmError::VmNotRunning) 2124 } 2125 } 2126 2127 fn vm_nmi(&mut self) -> result::Result<(), VmError> { 2128 if let Some(ref mut vm) = self.vm { 2129 vm.nmi() 2130 } else { 2131 Err(VmError::VmNotRunning) 2132 } 2133 } 2134 2135 fn vm_receive_migration( 2136 &mut self, 2137 receive_data_migration: VmReceiveMigrationData, 2138 ) -> result::Result<(), MigratableError> { 2139 info!( 2140 "Receiving migration: receiver_url = {}", 2141 receive_data_migration.receiver_url 2142 ); 2143 2144 // Accept the connection and get the socket 2145 let mut socket = Vmm::receive_migration_socket(&receive_data_migration.receiver_url)?; 2146 2147 let mut started = false; 2148 let mut memory_manager: Option<Arc<Mutex<MemoryManager>>> = None; 2149 let mut existing_memory_files = None; 2150 loop { 2151 let req = Request::read_from(&mut socket)?; 2152 match req.command() { 2153 Command::Invalid => info!("Invalid Command Received"), 2154 Command::Start => { 2155 info!("Start Command Received"); 2156 started = true; 2157 2158 Response::ok().write_to(&mut socket)?; 2159 } 2160 Command::Config => { 2161 info!("Config Command Received"); 2162 2163 if !started { 2164 warn!("Migration not started yet"); 2165 Response::error().write_to(&mut socket)?; 2166 continue; 2167 } 2168 memory_manager = Some(self.vm_receive_config( 2169 &req, 2170 &mut socket, 2171 existing_memory_files.take(), 2172 )?); 2173 } 2174 Command::State => { 2175 info!("State Command Received"); 2176 2177 if !started { 2178 warn!("Migration not started yet"); 2179 Response::error().write_to(&mut socket)?; 2180 continue; 2181 } 2182 if let Some(mm) = memory_manager.take() { 2183 self.vm_receive_state(&req, &mut socket, mm)?; 2184 } else { 2185 warn!("Configuration not sent yet"); 2186 Response::error().write_to(&mut socket)?; 2187 } 2188 } 2189 Command::Memory => { 2190 info!("Memory Command Received"); 2191 2192 if !started { 2193 warn!("Migration not started yet"); 2194 Response::error().write_to(&mut socket)?; 2195 continue; 2196 } 2197 if let Some(mm) = memory_manager.as_ref() { 2198 self.vm_receive_memory(&req, &mut socket, &mut mm.lock().unwrap())?; 2199 } else { 2200 warn!("Configuration not sent yet"); 2201 Response::error().write_to(&mut socket)?; 2202 } 2203 } 2204 Command::MemoryFd => { 2205 info!("MemoryFd Command Received"); 2206 2207 if !started { 2208 warn!("Migration not started yet"); 2209 Response::error().write_to(&mut socket)?; 2210 continue; 2211 } 2212 2213 match &mut socket { 2214 SocketStream::Unix(unix_socket) => { 2215 let mut buf = [0u8; 4]; 2216 let (_, file) = unix_socket.recv_with_fd(&mut buf).map_err(|e| { 2217 MigratableError::MigrateReceive(anyhow!( 2218 "Error receiving slot from socket: {}", 2219 e 2220 )) 2221 })?; 2222 2223 if existing_memory_files.is_none() { 2224 existing_memory_files = Some(HashMap::default()) 2225 } 2226 2227 if let Some(ref mut existing_memory_files) = existing_memory_files { 2228 let slot = u32::from_le_bytes(buf); 2229 existing_memory_files.insert(slot, file.unwrap()); 2230 } 2231 2232 Response::ok().write_to(&mut socket)?; 2233 } 2234 SocketStream::Tcp(_tcp_socket) => { 2235 // For TCP sockets, we cannot transfer file descriptors 2236 warn!( 2237 "MemoryFd command received over TCP socket, which is not supported" 2238 ); 2239 Response::error().write_to(&mut socket)?; 2240 } 2241 } 2242 } 2243 Command::Complete => { 2244 info!("Complete Command Received"); 2245 if let Some(ref mut vm) = self.vm.as_mut() { 2246 vm.resume()?; 2247 Response::ok().write_to(&mut socket)?; 2248 } else { 2249 warn!("VM not created yet"); 2250 Response::error().write_to(&mut socket)?; 2251 } 2252 break; 2253 } 2254 Command::Abandon => { 2255 info!("Abandon Command Received"); 2256 self.vm = None; 2257 self.vm_config = None; 2258 Response::ok().write_to(&mut socket).ok(); 2259 break; 2260 } 2261 } 2262 } 2263 2264 Ok(()) 2265 } 2266 2267 fn vm_send_migration( 2268 &mut self, 2269 send_data_migration: VmSendMigrationData, 2270 ) -> result::Result<(), MigratableError> { 2271 info!( 2272 "Sending migration: destination_url = {}, local = {}", 2273 send_data_migration.destination_url, send_data_migration.local 2274 ); 2275 2276 if !self 2277 .vm_config 2278 .as_ref() 2279 .unwrap() 2280 .lock() 2281 .unwrap() 2282 .backed_by_shared_memory() 2283 && send_data_migration.local 2284 { 2285 return Err(MigratableError::MigrateSend(anyhow!( 2286 "Local migration requires shared memory or hugepages enabled" 2287 ))); 2288 } 2289 2290 if let Some(vm) = self.vm.as_mut() { 2291 Self::send_migration( 2292 vm, 2293 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 2294 self.hypervisor.clone(), 2295 send_data_migration.clone(), 2296 ) 2297 .map_err(|migration_err| { 2298 error!("Migration failed: {:?}", migration_err); 2299 2300 // Stop logging dirty pages only for non-local migrations 2301 if !send_data_migration.local { 2302 if let Err(e) = vm.stop_dirty_log() { 2303 return e; 2304 } 2305 } 2306 2307 if vm.get_state().unwrap() == VmState::Paused { 2308 if let Err(e) = vm.resume() { 2309 return e; 2310 } 2311 } 2312 2313 migration_err 2314 })?; 2315 2316 // Shutdown the VM after the migration succeeded 2317 self.exit_evt.write(1).map_err(|e| { 2318 MigratableError::MigrateSend(anyhow!( 2319 "Failed shutting down the VM after migration: {:?}", 2320 e 2321 )) 2322 }) 2323 } else { 2324 Err(MigratableError::MigrateSend(anyhow!("VM is not running"))) 2325 } 2326 } 2327 } 2328 2329 const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager"; 2330 const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager"; 2331 const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager"; 2332 2333 #[cfg(test)] 2334 mod unit_tests { 2335 use super::*; 2336 #[cfg(target_arch = "x86_64")] 2337 use crate::vm_config::DebugConsoleConfig; 2338 use crate::vm_config::{ 2339 ConsoleConfig, ConsoleOutputMode, CpuFeatures, CpusConfig, HotplugMethod, MemoryConfig, 2340 PayloadConfig, RngConfig, 2341 }; 2342 2343 fn create_dummy_vmm() -> Vmm { 2344 Vmm::new( 2345 VmmVersionInfo::new("dummy", "dummy"), 2346 EventFd::new(EFD_NONBLOCK).unwrap(), 2347 #[cfg(feature = "guest_debug")] 2348 EventFd::new(EFD_NONBLOCK).unwrap(), 2349 #[cfg(feature = "guest_debug")] 2350 EventFd::new(EFD_NONBLOCK).unwrap(), 2351 SeccompAction::Allow, 2352 hypervisor::new().unwrap(), 2353 EventFd::new(EFD_NONBLOCK).unwrap(), 2354 ) 2355 .unwrap() 2356 } 2357 2358 fn create_dummy_vm_config() -> Box<VmConfig> { 2359 Box::new(VmConfig { 2360 cpus: CpusConfig { 2361 boot_vcpus: 1, 2362 max_vcpus: 1, 2363 topology: None, 2364 kvm_hyperv: false, 2365 max_phys_bits: 46, 2366 affinity: None, 2367 features: CpuFeatures::default(), 2368 }, 2369 memory: MemoryConfig { 2370 size: 536_870_912, 2371 mergeable: false, 2372 hotplug_method: HotplugMethod::Acpi, 2373 hotplug_size: None, 2374 hotplugged_size: None, 2375 shared: true, 2376 hugepages: false, 2377 hugepage_size: None, 2378 prefault: false, 2379 zones: None, 2380 thp: true, 2381 }, 2382 payload: Some(PayloadConfig { 2383 kernel: Some(PathBuf::from("/path/to/kernel")), 2384 firmware: None, 2385 cmdline: None, 2386 initramfs: None, 2387 #[cfg(feature = "igvm")] 2388 igvm: None, 2389 #[cfg(feature = "sev_snp")] 2390 host_data: None, 2391 }), 2392 rate_limit_groups: None, 2393 disks: None, 2394 net: None, 2395 rng: RngConfig { 2396 src: PathBuf::from("/dev/urandom"), 2397 iommu: false, 2398 }, 2399 balloon: None, 2400 fs: None, 2401 pmem: None, 2402 serial: ConsoleConfig { 2403 file: None, 2404 mode: ConsoleOutputMode::Null, 2405 iommu: false, 2406 socket: None, 2407 }, 2408 console: ConsoleConfig { 2409 file: None, 2410 mode: ConsoleOutputMode::Tty, 2411 iommu: false, 2412 socket: None, 2413 }, 2414 #[cfg(target_arch = "x86_64")] 2415 debug_console: DebugConsoleConfig::default(), 2416 devices: None, 2417 user_devices: None, 2418 vdpa: None, 2419 vsock: None, 2420 #[cfg(feature = "pvmemcontrol")] 2421 pvmemcontrol: None, 2422 pvpanic: false, 2423 iommu: false, 2424 #[cfg(target_arch = "x86_64")] 2425 sgx_epc: None, 2426 numa: None, 2427 watchdog: false, 2428 #[cfg(feature = "guest_debug")] 2429 gdb: false, 2430 pci_segments: None, 2431 platform: None, 2432 tpm: None, 2433 preserved_fds: None, 2434 landlock_enable: false, 2435 landlock_rules: None, 2436 }) 2437 } 2438 2439 #[test] 2440 fn test_vmm_vm_create() { 2441 let mut vmm = create_dummy_vmm(); 2442 let config = create_dummy_vm_config(); 2443 2444 assert!(matches!(vmm.vm_create(config.clone()), Ok(()))); 2445 assert!(matches!( 2446 vmm.vm_create(config), 2447 Err(VmError::VmAlreadyCreated) 2448 )); 2449 } 2450 2451 #[test] 2452 fn test_vmm_vm_cold_add_device() { 2453 let mut vmm = create_dummy_vmm(); 2454 let device_config = DeviceConfig::parse("path=/path/to/device").unwrap(); 2455 2456 assert!(matches!( 2457 vmm.vm_add_device(device_config.clone()), 2458 Err(VmError::VmNotCreated) 2459 )); 2460 2461 let _ = vmm.vm_create(create_dummy_vm_config()); 2462 assert!(vmm 2463 .vm_config 2464 .as_ref() 2465 .unwrap() 2466 .lock() 2467 .unwrap() 2468 .devices 2469 .is_none()); 2470 2471 assert!(vmm.vm_add_device(device_config.clone()).unwrap().is_none()); 2472 assert_eq!( 2473 vmm.vm_config 2474 .as_ref() 2475 .unwrap() 2476 .lock() 2477 .unwrap() 2478 .devices 2479 .clone() 2480 .unwrap() 2481 .len(), 2482 1 2483 ); 2484 assert_eq!( 2485 vmm.vm_config 2486 .as_ref() 2487 .unwrap() 2488 .lock() 2489 .unwrap() 2490 .devices 2491 .clone() 2492 .unwrap()[0], 2493 device_config 2494 ); 2495 } 2496 2497 #[test] 2498 fn test_vmm_vm_cold_add_user_device() { 2499 let mut vmm = create_dummy_vmm(); 2500 let user_device_config = 2501 UserDeviceConfig::parse("socket=/path/to/socket,id=8,pci_segment=2").unwrap(); 2502 2503 assert!(matches!( 2504 vmm.vm_add_user_device(user_device_config.clone()), 2505 Err(VmError::VmNotCreated) 2506 )); 2507 2508 let _ = vmm.vm_create(create_dummy_vm_config()); 2509 assert!(vmm 2510 .vm_config 2511 .as_ref() 2512 .unwrap() 2513 .lock() 2514 .unwrap() 2515 .user_devices 2516 .is_none()); 2517 2518 assert!(vmm 2519 .vm_add_user_device(user_device_config.clone()) 2520 .unwrap() 2521 .is_none()); 2522 assert_eq!( 2523 vmm.vm_config 2524 .as_ref() 2525 .unwrap() 2526 .lock() 2527 .unwrap() 2528 .user_devices 2529 .clone() 2530 .unwrap() 2531 .len(), 2532 1 2533 ); 2534 assert_eq!( 2535 vmm.vm_config 2536 .as_ref() 2537 .unwrap() 2538 .lock() 2539 .unwrap() 2540 .user_devices 2541 .clone() 2542 .unwrap()[0], 2543 user_device_config 2544 ); 2545 } 2546 2547 #[test] 2548 fn test_vmm_vm_cold_add_disk() { 2549 let mut vmm = create_dummy_vmm(); 2550 let disk_config = DiskConfig::parse("path=/path/to_file").unwrap(); 2551 2552 assert!(matches!( 2553 vmm.vm_add_disk(disk_config.clone()), 2554 Err(VmError::VmNotCreated) 2555 )); 2556 2557 let _ = vmm.vm_create(create_dummy_vm_config()); 2558 assert!(vmm 2559 .vm_config 2560 .as_ref() 2561 .unwrap() 2562 .lock() 2563 .unwrap() 2564 .disks 2565 .is_none()); 2566 2567 assert!(vmm.vm_add_disk(disk_config.clone()).unwrap().is_none()); 2568 assert_eq!( 2569 vmm.vm_config 2570 .as_ref() 2571 .unwrap() 2572 .lock() 2573 .unwrap() 2574 .disks 2575 .clone() 2576 .unwrap() 2577 .len(), 2578 1 2579 ); 2580 assert_eq!( 2581 vmm.vm_config 2582 .as_ref() 2583 .unwrap() 2584 .lock() 2585 .unwrap() 2586 .disks 2587 .clone() 2588 .unwrap()[0], 2589 disk_config 2590 ); 2591 } 2592 2593 #[test] 2594 fn test_vmm_vm_cold_add_fs() { 2595 let mut vmm = create_dummy_vmm(); 2596 let fs_config = FsConfig::parse("tag=mytag,socket=/tmp/sock").unwrap(); 2597 2598 assert!(matches!( 2599 vmm.vm_add_fs(fs_config.clone()), 2600 Err(VmError::VmNotCreated) 2601 )); 2602 2603 let _ = vmm.vm_create(create_dummy_vm_config()); 2604 assert!(vmm.vm_config.as_ref().unwrap().lock().unwrap().fs.is_none()); 2605 2606 assert!(vmm.vm_add_fs(fs_config.clone()).unwrap().is_none()); 2607 assert_eq!( 2608 vmm.vm_config 2609 .as_ref() 2610 .unwrap() 2611 .lock() 2612 .unwrap() 2613 .fs 2614 .clone() 2615 .unwrap() 2616 .len(), 2617 1 2618 ); 2619 assert_eq!( 2620 vmm.vm_config 2621 .as_ref() 2622 .unwrap() 2623 .lock() 2624 .unwrap() 2625 .fs 2626 .clone() 2627 .unwrap()[0], 2628 fs_config 2629 ); 2630 } 2631 2632 #[test] 2633 fn test_vmm_vm_cold_add_pmem() { 2634 let mut vmm = create_dummy_vmm(); 2635 let pmem_config = PmemConfig::parse("file=/tmp/pmem,size=128M").unwrap(); 2636 2637 assert!(matches!( 2638 vmm.vm_add_pmem(pmem_config.clone()), 2639 Err(VmError::VmNotCreated) 2640 )); 2641 2642 let _ = vmm.vm_create(create_dummy_vm_config()); 2643 assert!(vmm 2644 .vm_config 2645 .as_ref() 2646 .unwrap() 2647 .lock() 2648 .unwrap() 2649 .pmem 2650 .is_none()); 2651 2652 assert!(vmm.vm_add_pmem(pmem_config.clone()).unwrap().is_none()); 2653 assert_eq!( 2654 vmm.vm_config 2655 .as_ref() 2656 .unwrap() 2657 .lock() 2658 .unwrap() 2659 .pmem 2660 .clone() 2661 .unwrap() 2662 .len(), 2663 1 2664 ); 2665 assert_eq!( 2666 vmm.vm_config 2667 .as_ref() 2668 .unwrap() 2669 .lock() 2670 .unwrap() 2671 .pmem 2672 .clone() 2673 .unwrap()[0], 2674 pmem_config 2675 ); 2676 } 2677 2678 #[test] 2679 fn test_vmm_vm_cold_add_net() { 2680 let mut vmm = create_dummy_vmm(); 2681 let net_config = NetConfig::parse( 2682 "mac=de:ad:be:ef:12:34,host_mac=12:34:de:ad:be:ef,vhost_user=true,socket=/tmp/sock", 2683 ) 2684 .unwrap(); 2685 2686 assert!(matches!( 2687 vmm.vm_add_net(net_config.clone()), 2688 Err(VmError::VmNotCreated) 2689 )); 2690 2691 let _ = vmm.vm_create(create_dummy_vm_config()); 2692 assert!(vmm 2693 .vm_config 2694 .as_ref() 2695 .unwrap() 2696 .lock() 2697 .unwrap() 2698 .net 2699 .is_none()); 2700 2701 assert!(vmm.vm_add_net(net_config.clone()).unwrap().is_none()); 2702 assert_eq!( 2703 vmm.vm_config 2704 .as_ref() 2705 .unwrap() 2706 .lock() 2707 .unwrap() 2708 .net 2709 .clone() 2710 .unwrap() 2711 .len(), 2712 1 2713 ); 2714 assert_eq!( 2715 vmm.vm_config 2716 .as_ref() 2717 .unwrap() 2718 .lock() 2719 .unwrap() 2720 .net 2721 .clone() 2722 .unwrap()[0], 2723 net_config 2724 ); 2725 } 2726 2727 #[test] 2728 fn test_vmm_vm_cold_add_vdpa() { 2729 let mut vmm = create_dummy_vmm(); 2730 let vdpa_config = VdpaConfig::parse("path=/dev/vhost-vdpa,num_queues=2").unwrap(); 2731 2732 assert!(matches!( 2733 vmm.vm_add_vdpa(vdpa_config.clone()), 2734 Err(VmError::VmNotCreated) 2735 )); 2736 2737 let _ = vmm.vm_create(create_dummy_vm_config()); 2738 assert!(vmm 2739 .vm_config 2740 .as_ref() 2741 .unwrap() 2742 .lock() 2743 .unwrap() 2744 .vdpa 2745 .is_none()); 2746 2747 assert!(vmm.vm_add_vdpa(vdpa_config.clone()).unwrap().is_none()); 2748 assert_eq!( 2749 vmm.vm_config 2750 .as_ref() 2751 .unwrap() 2752 .lock() 2753 .unwrap() 2754 .vdpa 2755 .clone() 2756 .unwrap() 2757 .len(), 2758 1 2759 ); 2760 assert_eq!( 2761 vmm.vm_config 2762 .as_ref() 2763 .unwrap() 2764 .lock() 2765 .unwrap() 2766 .vdpa 2767 .clone() 2768 .unwrap()[0], 2769 vdpa_config 2770 ); 2771 } 2772 2773 #[test] 2774 fn test_vmm_vm_cold_add_vsock() { 2775 let mut vmm = create_dummy_vmm(); 2776 let vsock_config = VsockConfig::parse("socket=/tmp/sock,cid=3,iommu=on").unwrap(); 2777 2778 assert!(matches!( 2779 vmm.vm_add_vsock(vsock_config.clone()), 2780 Err(VmError::VmNotCreated) 2781 )); 2782 2783 let _ = vmm.vm_create(create_dummy_vm_config()); 2784 assert!(vmm 2785 .vm_config 2786 .as_ref() 2787 .unwrap() 2788 .lock() 2789 .unwrap() 2790 .vsock 2791 .is_none()); 2792 2793 assert!(vmm.vm_add_vsock(vsock_config.clone()).unwrap().is_none()); 2794 assert_eq!( 2795 vmm.vm_config 2796 .as_ref() 2797 .unwrap() 2798 .lock() 2799 .unwrap() 2800 .vsock 2801 .clone() 2802 .unwrap(), 2803 vsock_config 2804 ); 2805 } 2806 } 2807