1 // Copyright © 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 6 #[macro_use] 7 extern crate event_monitor; 8 #[macro_use] 9 extern crate log; 10 11 use std::collections::HashMap; 12 use std::fs::File; 13 use std::io::{stdout, Read, Write}; 14 use std::net::{TcpListener, TcpStream}; 15 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; 16 use std::os::unix::net::{UnixListener, UnixStream}; 17 use std::panic::AssertUnwindSafe; 18 use std::path::PathBuf; 19 use std::rc::Rc; 20 use std::sync::mpsc::{Receiver, RecvError, SendError, Sender}; 21 use std::sync::{Arc, Mutex}; 22 #[cfg(not(target_arch = "riscv64"))] 23 use std::time::Instant; 24 use std::{io, result, thread}; 25 26 use anyhow::anyhow; 27 #[cfg(feature = "dbus_api")] 28 use api::dbus::{DBusApiOptions, DBusApiShutdownChannels}; 29 use api::http::HttpApiHandle; 30 use console_devices::{pre_create_console_devices, ConsoleInfo}; 31 use landlock::LandlockError; 32 use libc::{tcsetattr, termios, EFD_NONBLOCK, SIGINT, SIGTERM, TCSANOW}; 33 use memory_manager::MemoryManagerSnapshotData; 34 use pci::PciBdf; 35 use seccompiler::{apply_filter, SeccompAction}; 36 use serde::ser::{SerializeStruct, Serializer}; 37 use serde::{Deserialize, Serialize}; 38 use signal_hook::iterator::{Handle, Signals}; 39 use thiserror::Error; 40 use tracer::trace_scoped; 41 use vm_memory::bitmap::{AtomicBitmap, BitmapSlice}; 42 use vm_memory::{ReadVolatile, VolatileMemoryError, VolatileSlice, WriteVolatile}; 43 use vm_migration::protocol::*; 44 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 45 use vmm_sys_util::eventfd::EventFd; 46 use vmm_sys_util::signal::unblock_signal; 47 use vmm_sys_util::sock_ctrl_msg::ScmSocket; 48 49 use crate::api::{ 50 ApiRequest, ApiResponse, RequestHandler, VmInfoResponse, VmReceiveMigrationData, 51 VmSendMigrationData, VmmPingResponse, 52 }; 53 use crate::config::{add_to_config, RestoreConfig}; 54 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 55 use crate::coredump::GuestDebuggable; 56 use crate::landlock::Landlock; 57 use crate::memory_manager::MemoryManager; 58 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 59 use crate::migration::get_vm_snapshot; 60 use crate::migration::{recv_vm_config, recv_vm_state}; 61 use crate::seccomp_filters::{get_seccomp_filter, Thread}; 62 use crate::vm::{Error as VmError, Vm, VmState}; 63 use crate::vm_config::{ 64 DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig, VdpaConfig, 65 VmConfig, VsockConfig, 66 }; 67 68 #[cfg(not(target_arch = "riscv64"))] 69 mod acpi; 70 pub mod api; 71 mod clone3; 72 pub mod config; 73 pub mod console_devices; 74 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 75 mod coredump; 76 pub mod cpu; 77 pub mod device_manager; 78 pub mod device_tree; 79 #[cfg(feature = "guest_debug")] 80 mod gdb; 81 #[cfg(feature = "igvm")] 82 mod igvm; 83 pub mod interrupt; 84 pub mod landlock; 85 pub mod memory_manager; 86 pub mod migration; 87 mod pci_segment; 88 pub mod seccomp_filters; 89 mod serial_manager; 90 mod sigwinch_listener; 91 pub mod vm; 92 pub mod vm_config; 93 94 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; 95 type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>; 96 97 /// Errors associated with VMM management 98 #[derive(Debug, Error)] 99 pub enum Error { 100 /// API request receive error 101 #[error("Error receiving API request: {0}")] 102 ApiRequestRecv(#[source] RecvError), 103 104 /// API response send error 105 #[error("Error sending API request: {0}")] 106 ApiResponseSend(#[source] SendError<ApiResponse>), 107 108 /// Cannot bind to the UNIX domain socket path 109 #[error("Error binding to UNIX domain socket: {0}")] 110 Bind(#[source] io::Error), 111 112 /// Cannot clone EventFd. 113 #[error("Error cloning EventFd: {0}")] 114 EventFdClone(#[source] io::Error), 115 116 /// Cannot create EventFd. 117 #[error("Error creating EventFd: {0}")] 118 EventFdCreate(#[source] io::Error), 119 120 /// Cannot read from EventFd. 121 #[error("Error reading from EventFd: {0}")] 122 EventFdRead(#[source] io::Error), 123 124 /// Cannot create epoll context. 125 #[error("Error creating epoll context: {0}")] 126 Epoll(#[source] io::Error), 127 128 /// Cannot create HTTP thread 129 #[error("Error spawning HTTP thread: {0}")] 130 HttpThreadSpawn(#[source] io::Error), 131 132 /// Cannot create D-Bus thread 133 #[cfg(feature = "dbus_api")] 134 #[error("Error spawning D-Bus thread: {0}")] 135 DBusThreadSpawn(#[source] io::Error), 136 137 /// Cannot start D-Bus session 138 #[cfg(feature = "dbus_api")] 139 #[error("Error starting D-Bus session: {0}")] 140 CreateDBusSession(#[source] zbus::Error), 141 142 /// Cannot create `event-monitor` thread 143 #[error("Error spawning `event-monitor` thread: {0}")] 144 EventMonitorThreadSpawn(#[source] io::Error), 145 146 /// Cannot handle the VM STDIN stream 147 #[error("Error handling VM stdin: {0:?}")] 148 Stdin(VmError), 149 150 /// Cannot handle the VM pty stream 151 #[error("Error handling VM pty: {0:?}")] 152 Pty(VmError), 153 154 /// Cannot reboot the VM 155 #[error("Error rebooting VM: {0:?}")] 156 VmReboot(VmError), 157 158 /// Cannot create VMM thread 159 #[error("Error spawning VMM thread {0:?}")] 160 VmmThreadSpawn(#[source] io::Error), 161 162 /// Cannot shut the VMM down 163 #[error("Error shutting down VMM: {0:?}")] 164 VmmShutdown(VmError), 165 166 /// Cannot create seccomp filter 167 #[error("Error creating seccomp filter: {0}")] 168 CreateSeccompFilter(seccompiler::Error), 169 170 /// Cannot apply seccomp filter 171 #[error("Error applying seccomp filter: {0}")] 172 ApplySeccompFilter(seccompiler::Error), 173 174 /// Error activating virtio devices 175 #[error("Error activating virtio devices: {0:?}")] 176 ActivateVirtioDevices(VmError), 177 178 /// Error creating API server 179 #[error("Error creating API server {0:?}")] 180 CreateApiServer(micro_http::ServerError), 181 182 /// Error binding API server socket 183 #[error("Error creation API server's socket {0:?}")] 184 CreateApiServerSocket(#[source] io::Error), 185 186 #[cfg(feature = "guest_debug")] 187 #[error("Failed to start the GDB thread: {0}")] 188 GdbThreadSpawn(io::Error), 189 190 /// GDB request receive error 191 #[cfg(feature = "guest_debug")] 192 #[error("Error receiving GDB request: {0}")] 193 GdbRequestRecv(#[source] RecvError), 194 195 /// GDB response send error 196 #[cfg(feature = "guest_debug")] 197 #[error("Error sending GDB request: {0}")] 198 GdbResponseSend(#[source] SendError<gdb::GdbResponse>), 199 200 #[error("Cannot spawn a signal handler thread: {0}")] 201 SignalHandlerSpawn(#[source] io::Error), 202 203 #[error("Failed to join on threads: {0:?}")] 204 ThreadCleanup(std::boxed::Box<dyn std::any::Any + std::marker::Send>), 205 206 /// Cannot create Landlock object 207 #[error("Error creating landlock object: {0}")] 208 CreateLandlock(LandlockError), 209 210 /// Cannot apply landlock based sandboxing 211 #[error("Error applying landlock: {0}")] 212 ApplyLandlock(LandlockError), 213 } 214 pub type Result<T> = result::Result<T, Error>; 215 216 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 217 #[repr(u64)] 218 pub enum EpollDispatch { 219 Exit = 0, 220 Reset = 1, 221 Api = 2, 222 ActivateVirtioDevices = 3, 223 Debug = 4, 224 Unknown, 225 } 226 227 impl From<u64> for EpollDispatch { 228 fn from(v: u64) -> Self { 229 use EpollDispatch::*; 230 match v { 231 0 => Exit, 232 1 => Reset, 233 2 => Api, 234 3 => ActivateVirtioDevices, 235 4 => Debug, 236 _ => Unknown, 237 } 238 } 239 } 240 241 enum SocketStream { 242 Unix(UnixStream), 243 Tcp(TcpStream), 244 } 245 246 impl Read for SocketStream { 247 fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { 248 match self { 249 SocketStream::Unix(stream) => stream.read(buf), 250 SocketStream::Tcp(stream) => stream.read(buf), 251 } 252 } 253 } 254 255 impl Write for SocketStream { 256 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { 257 match self { 258 SocketStream::Unix(stream) => stream.write(buf), 259 SocketStream::Tcp(stream) => stream.write(buf), 260 } 261 } 262 263 fn flush(&mut self) -> std::io::Result<()> { 264 match self { 265 SocketStream::Unix(stream) => stream.flush(), 266 SocketStream::Tcp(stream) => stream.flush(), 267 } 268 } 269 } 270 271 impl AsRawFd for SocketStream { 272 fn as_raw_fd(&self) -> RawFd { 273 match self { 274 SocketStream::Unix(s) => s.as_raw_fd(), 275 SocketStream::Tcp(s) => s.as_raw_fd(), 276 } 277 } 278 } 279 280 impl ReadVolatile for SocketStream { 281 fn read_volatile<B: BitmapSlice>( 282 &mut self, 283 buf: &mut VolatileSlice<B>, 284 ) -> std::result::Result<usize, VolatileMemoryError> { 285 match self { 286 SocketStream::Unix(s) => s.read_volatile(buf), 287 SocketStream::Tcp(s) => s.read_volatile(buf), 288 } 289 } 290 291 fn read_exact_volatile<B: BitmapSlice>( 292 &mut self, 293 buf: &mut VolatileSlice<B>, 294 ) -> std::result::Result<(), VolatileMemoryError> { 295 match self { 296 SocketStream::Unix(s) => s.read_exact_volatile(buf), 297 SocketStream::Tcp(s) => s.read_exact_volatile(buf), 298 } 299 } 300 } 301 302 impl WriteVolatile for SocketStream { 303 fn write_volatile<B: BitmapSlice>( 304 &mut self, 305 buf: &VolatileSlice<B>, 306 ) -> std::result::Result<usize, VolatileMemoryError> { 307 match self { 308 SocketStream::Unix(s) => s.write_volatile(buf), 309 SocketStream::Tcp(s) => s.write_volatile(buf), 310 } 311 } 312 313 fn write_all_volatile<B: BitmapSlice>( 314 &mut self, 315 buf: &VolatileSlice<B>, 316 ) -> std::result::Result<(), VolatileMemoryError> { 317 match self { 318 SocketStream::Unix(s) => s.write_all_volatile(buf), 319 SocketStream::Tcp(s) => s.write_all_volatile(buf), 320 } 321 } 322 } 323 324 pub struct EpollContext { 325 epoll_file: File, 326 } 327 328 impl EpollContext { 329 pub fn new() -> result::Result<EpollContext, io::Error> { 330 let epoll_fd = epoll::create(true)?; 331 // Use 'File' to enforce closing on 'epoll_fd' 332 // SAFETY: the epoll_fd returned by epoll::create is valid and owned by us. 333 let epoll_file = unsafe { File::from_raw_fd(epoll_fd) }; 334 335 Ok(EpollContext { epoll_file }) 336 } 337 338 pub fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error> 339 where 340 T: AsRawFd, 341 { 342 let dispatch_index = token as u64; 343 epoll::ctl( 344 self.epoll_file.as_raw_fd(), 345 epoll::ControlOptions::EPOLL_CTL_ADD, 346 fd.as_raw_fd(), 347 epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index), 348 )?; 349 350 Ok(()) 351 } 352 353 #[cfg(fuzzing)] 354 pub fn add_event_custom<T>( 355 &mut self, 356 fd: &T, 357 id: u64, 358 evts: epoll::Events, 359 ) -> result::Result<(), io::Error> 360 where 361 T: AsRawFd, 362 { 363 epoll::ctl( 364 self.epoll_file.as_raw_fd(), 365 epoll::ControlOptions::EPOLL_CTL_ADD, 366 fd.as_raw_fd(), 367 epoll::Event::new(evts, id), 368 )?; 369 370 Ok(()) 371 } 372 } 373 374 impl AsRawFd for EpollContext { 375 fn as_raw_fd(&self) -> RawFd { 376 self.epoll_file.as_raw_fd() 377 } 378 } 379 380 pub struct PciDeviceInfo { 381 pub id: String, 382 pub bdf: PciBdf, 383 } 384 385 impl Serialize for PciDeviceInfo { 386 fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> 387 where 388 S: Serializer, 389 { 390 let bdf_str = self.bdf.to_string(); 391 392 // Serialize the structure. 393 let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?; 394 state.serialize_field("id", &self.id)?; 395 state.serialize_field("bdf", &bdf_str)?; 396 state.end() 397 } 398 } 399 400 pub fn feature_list() -> Vec<String> { 401 vec![ 402 #[cfg(feature = "dbus_api")] 403 "dbus_api".to_string(), 404 #[cfg(feature = "dhat-heap")] 405 "dhat-heap".to_string(), 406 #[cfg(feature = "guest_debug")] 407 "guest_debug".to_string(), 408 #[cfg(feature = "igvm")] 409 "igvm".to_string(), 410 #[cfg(feature = "io_uring")] 411 "io_uring".to_string(), 412 #[cfg(feature = "kvm")] 413 "kvm".to_string(), 414 #[cfg(feature = "mshv")] 415 "mshv".to_string(), 416 #[cfg(feature = "sev_snp")] 417 "sev_snp".to_string(), 418 #[cfg(feature = "tdx")] 419 "tdx".to_string(), 420 #[cfg(feature = "tracing")] 421 "tracing".to_string(), 422 ] 423 } 424 425 pub fn start_event_monitor_thread( 426 mut monitor: event_monitor::Monitor, 427 seccomp_action: &SeccompAction, 428 landlock_enable: bool, 429 hypervisor_type: hypervisor::HypervisorType, 430 exit_event: EventFd, 431 ) -> Result<thread::JoinHandle<Result<()>>> { 432 // Retrieve seccomp filter 433 let seccomp_filter = get_seccomp_filter(seccomp_action, Thread::EventMonitor, hypervisor_type) 434 .map_err(Error::CreateSeccompFilter)?; 435 436 thread::Builder::new() 437 .name("event-monitor".to_owned()) 438 .spawn(move || { 439 // Apply seccomp filter 440 if !seccomp_filter.is_empty() { 441 apply_filter(&seccomp_filter) 442 .map_err(Error::ApplySeccompFilter) 443 .map_err(|e| { 444 error!("Error applying seccomp filter: {:?}", e); 445 exit_event.write(1).ok(); 446 e 447 })?; 448 } 449 if landlock_enable { 450 Landlock::new() 451 .map_err(Error::CreateLandlock)? 452 .restrict_self() 453 .map_err(Error::ApplyLandlock) 454 .map_err(|e| { 455 error!("Error applying landlock to event monitor thread: {:?}", e); 456 exit_event.write(1).ok(); 457 e 458 })?; 459 } 460 461 std::panic::catch_unwind(AssertUnwindSafe(move || { 462 while let Ok(event) = monitor.rx.recv() { 463 let event = Arc::new(event); 464 465 if let Some(ref mut file) = monitor.file { 466 file.write_all(event.as_bytes().as_ref()).ok(); 467 file.write_all(b"\n\n").ok(); 468 } 469 470 for tx in monitor.broadcast.iter() { 471 tx.send(event.clone()).ok(); 472 } 473 } 474 })) 475 .map_err(|_| { 476 error!("`event-monitor` thread panicked"); 477 exit_event.write(1).ok(); 478 }) 479 .ok(); 480 481 Ok(()) 482 }) 483 .map_err(Error::EventMonitorThreadSpawn) 484 } 485 486 #[allow(unused_variables)] 487 #[allow(clippy::too_many_arguments)] 488 pub fn start_vmm_thread( 489 vmm_version: VmmVersionInfo, 490 http_path: &Option<String>, 491 http_fd: Option<RawFd>, 492 #[cfg(feature = "dbus_api")] dbus_options: Option<DBusApiOptions>, 493 api_event: EventFd, 494 api_sender: Sender<ApiRequest>, 495 api_receiver: Receiver<ApiRequest>, 496 #[cfg(feature = "guest_debug")] debug_path: Option<PathBuf>, 497 #[cfg(feature = "guest_debug")] debug_event: EventFd, 498 #[cfg(feature = "guest_debug")] vm_debug_event: EventFd, 499 exit_event: EventFd, 500 seccomp_action: &SeccompAction, 501 hypervisor: Arc<dyn hypervisor::Hypervisor>, 502 landlock_enable: bool, 503 ) -> Result<VmmThreadHandle> { 504 #[cfg(feature = "guest_debug")] 505 let gdb_hw_breakpoints = hypervisor.get_guest_debug_hw_bps(); 506 #[cfg(feature = "guest_debug")] 507 let (gdb_sender, gdb_receiver) = std::sync::mpsc::channel(); 508 #[cfg(feature = "guest_debug")] 509 let gdb_debug_event = debug_event.try_clone().map_err(Error::EventFdClone)?; 510 #[cfg(feature = "guest_debug")] 511 let gdb_vm_debug_event = vm_debug_event.try_clone().map_err(Error::EventFdClone)?; 512 513 let api_event_clone = api_event.try_clone().map_err(Error::EventFdClone)?; 514 let hypervisor_type = hypervisor.hypervisor_type(); 515 516 // Retrieve seccomp filter 517 let vmm_seccomp_filter = get_seccomp_filter(seccomp_action, Thread::Vmm, hypervisor_type) 518 .map_err(Error::CreateSeccompFilter)?; 519 520 let vmm_seccomp_action = seccomp_action.clone(); 521 let thread = { 522 let exit_event = exit_event.try_clone().map_err(Error::EventFdClone)?; 523 thread::Builder::new() 524 .name("vmm".to_string()) 525 .spawn(move || { 526 // Apply seccomp filter for VMM thread. 527 if !vmm_seccomp_filter.is_empty() { 528 apply_filter(&vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?; 529 } 530 531 let mut vmm = Vmm::new( 532 vmm_version, 533 api_event, 534 #[cfg(feature = "guest_debug")] 535 debug_event, 536 #[cfg(feature = "guest_debug")] 537 vm_debug_event, 538 vmm_seccomp_action, 539 hypervisor, 540 exit_event, 541 )?; 542 543 vmm.setup_signal_handler(landlock_enable)?; 544 545 vmm.control_loop( 546 Rc::new(api_receiver), 547 #[cfg(feature = "guest_debug")] 548 Rc::new(gdb_receiver), 549 ) 550 }) 551 .map_err(Error::VmmThreadSpawn)? 552 }; 553 554 // The VMM thread is started, we can start the dbus thread 555 // and start serving HTTP requests 556 #[cfg(feature = "dbus_api")] 557 let dbus_shutdown_chs = match dbus_options { 558 Some(opts) => { 559 let (_, chs) = api::start_dbus_thread( 560 opts, 561 api_event_clone.try_clone().map_err(Error::EventFdClone)?, 562 api_sender.clone(), 563 seccomp_action, 564 exit_event.try_clone().map_err(Error::EventFdClone)?, 565 hypervisor_type, 566 )?; 567 Some(chs) 568 } 569 None => None, 570 }; 571 572 let http_api_handle = if let Some(http_path) = http_path { 573 Some(api::start_http_path_thread( 574 http_path, 575 api_event_clone, 576 api_sender, 577 seccomp_action, 578 exit_event, 579 hypervisor_type, 580 landlock_enable, 581 )?) 582 } else if let Some(http_fd) = http_fd { 583 Some(api::start_http_fd_thread( 584 http_fd, 585 api_event_clone, 586 api_sender, 587 seccomp_action, 588 exit_event, 589 hypervisor_type, 590 landlock_enable, 591 )?) 592 } else { 593 None 594 }; 595 596 #[cfg(feature = "guest_debug")] 597 if let Some(debug_path) = debug_path { 598 let target = gdb::GdbStub::new( 599 gdb_sender, 600 gdb_debug_event, 601 gdb_vm_debug_event, 602 gdb_hw_breakpoints, 603 ); 604 thread::Builder::new() 605 .name("gdb".to_owned()) 606 .spawn(move || gdb::gdb_thread(target, &debug_path)) 607 .map_err(Error::GdbThreadSpawn)?; 608 } 609 610 Ok(VmmThreadHandle { 611 thread_handle: thread, 612 #[cfg(feature = "dbus_api")] 613 dbus_shutdown_chs, 614 http_api_handle, 615 }) 616 } 617 618 #[derive(Clone, Deserialize, Serialize)] 619 struct VmMigrationConfig { 620 vm_config: Arc<Mutex<VmConfig>>, 621 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 622 common_cpuid: Vec<hypervisor::arch::x86::CpuIdEntry>, 623 memory_manager_data: MemoryManagerSnapshotData, 624 } 625 626 #[derive(Debug, Clone)] 627 pub struct VmmVersionInfo { 628 pub build_version: String, 629 pub version: String, 630 } 631 632 impl VmmVersionInfo { 633 pub fn new(build_version: &str, version: &str) -> Self { 634 Self { 635 build_version: build_version.to_owned(), 636 version: version.to_owned(), 637 } 638 } 639 } 640 641 pub struct VmmThreadHandle { 642 pub thread_handle: thread::JoinHandle<Result<()>>, 643 #[cfg(feature = "dbus_api")] 644 pub dbus_shutdown_chs: Option<DBusApiShutdownChannels>, 645 pub http_api_handle: Option<HttpApiHandle>, 646 } 647 648 pub struct Vmm { 649 epoll: EpollContext, 650 exit_evt: EventFd, 651 reset_evt: EventFd, 652 api_evt: EventFd, 653 #[cfg(feature = "guest_debug")] 654 debug_evt: EventFd, 655 #[cfg(feature = "guest_debug")] 656 vm_debug_evt: EventFd, 657 version: VmmVersionInfo, 658 vm: Option<Vm>, 659 vm_config: Option<Arc<Mutex<VmConfig>>>, 660 seccomp_action: SeccompAction, 661 hypervisor: Arc<dyn hypervisor::Hypervisor>, 662 activate_evt: EventFd, 663 signals: Option<Handle>, 664 threads: Vec<thread::JoinHandle<()>>, 665 original_termios_opt: Arc<Mutex<Option<termios>>>, 666 console_resize_pipe: Option<Arc<File>>, 667 console_info: Option<ConsoleInfo>, 668 } 669 670 impl Vmm { 671 pub const HANDLED_SIGNALS: [i32; 2] = [SIGTERM, SIGINT]; 672 673 fn signal_handler( 674 mut signals: Signals, 675 original_termios_opt: Arc<Mutex<Option<termios>>>, 676 exit_evt: &EventFd, 677 ) { 678 for sig in &Self::HANDLED_SIGNALS { 679 unblock_signal(*sig).unwrap(); 680 } 681 682 for signal in signals.forever() { 683 match signal { 684 SIGTERM | SIGINT => { 685 if exit_evt.write(1).is_err() { 686 // Resetting the terminal is usually done as the VMM exits 687 if let Ok(lock) = original_termios_opt.lock() { 688 if let Some(termios) = *lock { 689 // SAFETY: FFI call 690 let _ = unsafe { 691 tcsetattr(stdout().lock().as_raw_fd(), TCSANOW, &termios) 692 }; 693 } 694 } else { 695 warn!("Failed to lock original termios"); 696 } 697 698 std::process::exit(1); 699 } 700 } 701 _ => (), 702 } 703 } 704 } 705 706 fn setup_signal_handler(&mut self, landlock_enable: bool) -> Result<()> { 707 let signals = Signals::new(Self::HANDLED_SIGNALS); 708 match signals { 709 Ok(signals) => { 710 self.signals = Some(signals.handle()); 711 let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?; 712 let original_termios_opt = Arc::clone(&self.original_termios_opt); 713 714 let signal_handler_seccomp_filter = get_seccomp_filter( 715 &self.seccomp_action, 716 Thread::SignalHandler, 717 self.hypervisor.hypervisor_type(), 718 ) 719 .map_err(Error::CreateSeccompFilter)?; 720 self.threads.push( 721 thread::Builder::new() 722 .name("vmm_signal_handler".to_string()) 723 .spawn(move || { 724 if !signal_handler_seccomp_filter.is_empty() { 725 if let Err(e) = apply_filter(&signal_handler_seccomp_filter) 726 .map_err(Error::ApplySeccompFilter) 727 { 728 error!("Error applying seccomp filter: {:?}", e); 729 exit_evt.write(1).ok(); 730 return; 731 } 732 } 733 if landlock_enable{ 734 match Landlock::new() { 735 Ok(landlock) => { 736 let _ = landlock.restrict_self().map_err(Error::ApplyLandlock).map_err(|e| { 737 error!("Error applying Landlock to signal handler thread: {:?}", e); 738 exit_evt.write(1).ok(); 739 }); 740 } 741 Err(e) => { 742 error!("Error creating Landlock object: {:?}", e); 743 exit_evt.write(1).ok(); 744 } 745 }; 746 } 747 748 std::panic::catch_unwind(AssertUnwindSafe(|| { 749 Vmm::signal_handler(signals, original_termios_opt, &exit_evt); 750 })) 751 .map_err(|_| { 752 error!("vmm signal_handler thread panicked"); 753 exit_evt.write(1).ok() 754 }) 755 .ok(); 756 }) 757 .map_err(Error::SignalHandlerSpawn)?, 758 ); 759 } 760 Err(e) => error!("Signal not found {}", e), 761 } 762 Ok(()) 763 } 764 765 #[allow(clippy::too_many_arguments)] 766 fn new( 767 vmm_version: VmmVersionInfo, 768 api_evt: EventFd, 769 #[cfg(feature = "guest_debug")] debug_evt: EventFd, 770 #[cfg(feature = "guest_debug")] vm_debug_evt: EventFd, 771 seccomp_action: SeccompAction, 772 hypervisor: Arc<dyn hypervisor::Hypervisor>, 773 exit_evt: EventFd, 774 ) -> Result<Self> { 775 let mut epoll = EpollContext::new().map_err(Error::Epoll)?; 776 let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 777 let activate_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 778 779 epoll 780 .add_event(&exit_evt, EpollDispatch::Exit) 781 .map_err(Error::Epoll)?; 782 783 epoll 784 .add_event(&reset_evt, EpollDispatch::Reset) 785 .map_err(Error::Epoll)?; 786 787 epoll 788 .add_event(&activate_evt, EpollDispatch::ActivateVirtioDevices) 789 .map_err(Error::Epoll)?; 790 791 epoll 792 .add_event(&api_evt, EpollDispatch::Api) 793 .map_err(Error::Epoll)?; 794 795 #[cfg(feature = "guest_debug")] 796 epoll 797 .add_event(&debug_evt, EpollDispatch::Debug) 798 .map_err(Error::Epoll)?; 799 800 Ok(Vmm { 801 epoll, 802 exit_evt, 803 reset_evt, 804 api_evt, 805 #[cfg(feature = "guest_debug")] 806 debug_evt, 807 #[cfg(feature = "guest_debug")] 808 vm_debug_evt, 809 version: vmm_version, 810 vm: None, 811 vm_config: None, 812 seccomp_action, 813 hypervisor, 814 activate_evt, 815 signals: None, 816 threads: vec![], 817 original_termios_opt: Arc::new(Mutex::new(None)), 818 console_resize_pipe: None, 819 console_info: None, 820 }) 821 } 822 823 fn vm_receive_config<T>( 824 &mut self, 825 req: &Request, 826 socket: &mut T, 827 existing_memory_files: Option<HashMap<u32, File>>, 828 ) -> std::result::Result<Arc<Mutex<MemoryManager>>, MigratableError> 829 where 830 T: Read + Write, 831 { 832 // Read in config data along with memory manager data 833 let mut data: Vec<u8> = Vec::new(); 834 data.resize_with(req.length() as usize, Default::default); 835 socket 836 .read_exact(&mut data) 837 .map_err(MigratableError::MigrateSocket)?; 838 839 let vm_migration_config: VmMigrationConfig = 840 serde_json::from_slice(&data).map_err(|e| { 841 MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e)) 842 })?; 843 844 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 845 self.vm_check_cpuid_compatibility( 846 &vm_migration_config.vm_config, 847 &vm_migration_config.common_cpuid, 848 )?; 849 850 let config = vm_migration_config.vm_config.clone(); 851 self.vm_config = Some(vm_migration_config.vm_config); 852 self.console_info = Some(pre_create_console_devices(self).map_err(|e| { 853 MigratableError::MigrateReceive(anyhow!("Error creating console devices: {:?}", e)) 854 })?); 855 856 if self 857 .vm_config 858 .as_ref() 859 .unwrap() 860 .lock() 861 .unwrap() 862 .landlock_enable 863 { 864 apply_landlock(self.vm_config.as_ref().unwrap().clone()).map_err(|e| { 865 MigratableError::MigrateReceive(anyhow!("Error applying landlock: {:?}", e)) 866 })?; 867 } 868 869 let vm = Vm::create_hypervisor_vm( 870 &self.hypervisor, 871 #[cfg(feature = "tdx")] 872 false, 873 #[cfg(feature = "sev_snp")] 874 false, 875 #[cfg(feature = "sev_snp")] 876 config.lock().unwrap().memory.total_size(), 877 ) 878 .map_err(|e| { 879 MigratableError::MigrateReceive(anyhow!( 880 "Error creating hypervisor VM from snapshot: {:?}", 881 e 882 )) 883 })?; 884 885 let phys_bits = 886 vm::physical_bits(&self.hypervisor, config.lock().unwrap().cpus.max_phys_bits); 887 888 let memory_manager = MemoryManager::new( 889 vm, 890 &config.lock().unwrap().memory.clone(), 891 None, 892 phys_bits, 893 #[cfg(feature = "tdx")] 894 false, 895 Some(&vm_migration_config.memory_manager_data), 896 existing_memory_files, 897 #[cfg(target_arch = "x86_64")] 898 None, 899 ) 900 .map_err(|e| { 901 MigratableError::MigrateReceive(anyhow!( 902 "Error creating MemoryManager from snapshot: {:?}", 903 e 904 )) 905 })?; 906 907 Response::ok().write_to(socket)?; 908 909 Ok(memory_manager) 910 } 911 912 fn vm_receive_state<T>( 913 &mut self, 914 req: &Request, 915 socket: &mut T, 916 mm: Arc<Mutex<MemoryManager>>, 917 ) -> std::result::Result<(), MigratableError> 918 where 919 T: Read + Write, 920 { 921 // Read in state data 922 let mut data: Vec<u8> = Vec::new(); 923 data.resize_with(req.length() as usize, Default::default); 924 socket 925 .read_exact(&mut data) 926 .map_err(MigratableError::MigrateSocket)?; 927 let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| { 928 MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e)) 929 })?; 930 931 let exit_evt = self.exit_evt.try_clone().map_err(|e| { 932 MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e)) 933 })?; 934 let reset_evt = self.reset_evt.try_clone().map_err(|e| { 935 MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e)) 936 })?; 937 #[cfg(feature = "guest_debug")] 938 let debug_evt = self.vm_debug_evt.try_clone().map_err(|e| { 939 MigratableError::MigrateReceive(anyhow!("Error cloning debug EventFd: {}", e)) 940 })?; 941 let activate_evt = self.activate_evt.try_clone().map_err(|e| { 942 MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e)) 943 })?; 944 945 #[cfg(not(target_arch = "riscv64"))] 946 let timestamp = Instant::now(); 947 let hypervisor_vm = mm.lock().unwrap().vm.clone(); 948 let mut vm = Vm::new_from_memory_manager( 949 self.vm_config.clone().unwrap(), 950 mm, 951 hypervisor_vm, 952 exit_evt, 953 reset_evt, 954 #[cfg(feature = "guest_debug")] 955 debug_evt, 956 &self.seccomp_action, 957 self.hypervisor.clone(), 958 activate_evt, 959 #[cfg(not(target_arch = "riscv64"))] 960 timestamp, 961 self.console_info.clone(), 962 self.console_resize_pipe.clone(), 963 Arc::clone(&self.original_termios_opt), 964 Some(snapshot), 965 ) 966 .map_err(|e| { 967 MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e)) 968 })?; 969 970 // Create VM 971 vm.restore().map_err(|e| { 972 Response::error().write_to(socket).ok(); 973 MigratableError::MigrateReceive(anyhow!("Failed restoring the Vm: {}", e)) 974 })?; 975 self.vm = Some(vm); 976 977 Response::ok().write_to(socket)?; 978 979 Ok(()) 980 } 981 982 fn vm_receive_memory<T>( 983 &mut self, 984 req: &Request, 985 socket: &mut T, 986 memory_manager: &mut MemoryManager, 987 ) -> std::result::Result<(), MigratableError> 988 where 989 T: Read + ReadVolatile + Write, 990 { 991 // Read table 992 let table = MemoryRangeTable::read_from(socket, req.length())?; 993 994 // And then read the memory itself 995 memory_manager 996 .receive_memory_regions(&table, socket) 997 .inspect_err(|_| { 998 Response::error().write_to(socket).ok(); 999 })?; 1000 Response::ok().write_to(socket)?; 1001 Ok(()) 1002 } 1003 1004 fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> { 1005 url.strip_prefix("unix:") 1006 .ok_or_else(|| { 1007 MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url)) 1008 }) 1009 .map(|s| s.into()) 1010 } 1011 1012 fn send_migration_socket( 1013 destination_url: &str, 1014 ) -> std::result::Result<SocketStream, MigratableError> { 1015 if let Some(address) = destination_url.strip_prefix("tcp:") { 1016 info!("Connecting to TCP socket at {}", address); 1017 1018 let socket = TcpStream::connect(address).map_err(|e| { 1019 MigratableError::MigrateSend(anyhow!("Error connecting to TCP socket: {}", e)) 1020 })?; 1021 1022 Ok(SocketStream::Tcp(socket)) 1023 } else { 1024 let path = Vmm::socket_url_to_path(destination_url)?; 1025 info!("Connecting to UNIX socket at {:?}", path); 1026 1027 let socket = UnixStream::connect(&path).map_err(|e| { 1028 MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e)) 1029 })?; 1030 1031 Ok(SocketStream::Unix(socket)) 1032 } 1033 } 1034 1035 fn receive_migration_socket( 1036 receiver_url: &str, 1037 ) -> std::result::Result<SocketStream, MigratableError> { 1038 if let Some(address) = receiver_url.strip_prefix("tcp:") { 1039 let listener = TcpListener::bind(address).map_err(|e| { 1040 MigratableError::MigrateReceive(anyhow!("Error binding to TCP socket: {}", e)) 1041 })?; 1042 1043 let (socket, _addr) = listener.accept().map_err(|e| { 1044 MigratableError::MigrateReceive(anyhow!( 1045 "Error accepting connection on TCP socket: {}", 1046 e 1047 )) 1048 })?; 1049 1050 Ok(SocketStream::Tcp(socket)) 1051 } else { 1052 let path = Vmm::socket_url_to_path(receiver_url)?; 1053 let listener = UnixListener::bind(&path).map_err(|e| { 1054 MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e)) 1055 })?; 1056 1057 let (socket, _addr) = listener.accept().map_err(|e| { 1058 MigratableError::MigrateReceive(anyhow!( 1059 "Error accepting connection on UNIX socket: {}", 1060 e 1061 )) 1062 })?; 1063 1064 // Remove the UNIX socket file after accepting the connection 1065 std::fs::remove_file(&path).map_err(|e| { 1066 MigratableError::MigrateReceive(anyhow!("Error removing UNIX socket file: {}", e)) 1067 })?; 1068 1069 Ok(SocketStream::Unix(socket)) 1070 } 1071 } 1072 1073 // Returns true if there were dirty pages to send 1074 fn vm_maybe_send_dirty_pages( 1075 vm: &mut Vm, 1076 socket: &mut SocketStream, 1077 ) -> result::Result<bool, MigratableError> { 1078 // Send (dirty) memory table 1079 let table = vm.dirty_log()?; 1080 1081 // But if there are no regions go straight to pause 1082 if table.regions().is_empty() { 1083 return Ok(false); 1084 } 1085 1086 Request::memory(table.length()).write_to(socket).unwrap(); 1087 table.write_to(socket)?; 1088 // And then the memory itself 1089 vm.send_memory_regions(&table, socket)?; 1090 Response::read_from(socket)?.ok_or_abandon( 1091 socket, 1092 MigratableError::MigrateSend(anyhow!("Error during dirty memory migration")), 1093 )?; 1094 1095 Ok(true) 1096 } 1097 1098 fn send_migration( 1099 vm: &mut Vm, 1100 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc< 1101 dyn hypervisor::Hypervisor, 1102 >, 1103 send_data_migration: VmSendMigrationData, 1104 ) -> result::Result<(), MigratableError> { 1105 // Set up the socket connection 1106 let mut socket = Self::send_migration_socket(&send_data_migration.destination_url)?; 1107 1108 // Start the migration 1109 Request::start().write_to(&mut socket)?; 1110 Response::read_from(&mut socket)?.ok_or_abandon( 1111 &mut socket, 1112 MigratableError::MigrateSend(anyhow!("Error starting migration")), 1113 )?; 1114 1115 // Send config 1116 let vm_config = vm.get_config(); 1117 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1118 let common_cpuid = { 1119 #[cfg(feature = "tdx")] 1120 if vm_config.lock().unwrap().is_tdx_enabled() { 1121 return Err(MigratableError::MigrateSend(anyhow!( 1122 "Live Migration is not supported when TDX is enabled" 1123 ))); 1124 }; 1125 1126 let amx = vm_config.lock().unwrap().cpus.features.amx; 1127 let phys_bits = 1128 vm::physical_bits(&hypervisor, vm_config.lock().unwrap().cpus.max_phys_bits); 1129 arch::generate_common_cpuid( 1130 &hypervisor, 1131 &arch::CpuidConfig { 1132 sgx_epc_sections: None, 1133 phys_bits, 1134 kvm_hyperv: vm_config.lock().unwrap().cpus.kvm_hyperv, 1135 #[cfg(feature = "tdx")] 1136 tdx: false, 1137 amx, 1138 }, 1139 ) 1140 .map_err(|e| { 1141 MigratableError::MigrateSend(anyhow!("Error generating common cpuid': {:?}", e)) 1142 })? 1143 }; 1144 1145 if send_data_migration.local { 1146 match &mut socket { 1147 SocketStream::Unix(unix_socket) => { 1148 // Proceed with sending memory file descriptors over UNIX socket 1149 vm.send_memory_fds(unix_socket)?; 1150 } 1151 SocketStream::Tcp(_tcp_socket) => { 1152 return Err(MigratableError::MigrateSend(anyhow!( 1153 "--local option is not supported with TCP sockets", 1154 ))); 1155 } 1156 } 1157 } 1158 1159 let vm_migration_config = VmMigrationConfig { 1160 vm_config, 1161 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1162 common_cpuid, 1163 memory_manager_data: vm.memory_manager_data(), 1164 }; 1165 let config_data = serde_json::to_vec(&vm_migration_config).unwrap(); 1166 Request::config(config_data.len() as u64).write_to(&mut socket)?; 1167 socket 1168 .write_all(&config_data) 1169 .map_err(MigratableError::MigrateSocket)?; 1170 Response::read_from(&mut socket)?.ok_or_abandon( 1171 &mut socket, 1172 MigratableError::MigrateSend(anyhow!("Error during config migration")), 1173 )?; 1174 1175 // Let every Migratable object know about the migration being started. 1176 vm.start_migration()?; 1177 1178 if send_data_migration.local { 1179 // Now pause VM 1180 vm.pause()?; 1181 } else { 1182 // Start logging dirty pages 1183 vm.start_dirty_log()?; 1184 1185 // Send memory table 1186 let table = vm.memory_range_table()?; 1187 Request::memory(table.length()) 1188 .write_to(&mut socket) 1189 .unwrap(); 1190 table.write_to(&mut socket)?; 1191 // And then the memory itself 1192 vm.send_memory_regions(&table, &mut socket)?; 1193 Response::read_from(&mut socket)?.ok_or_abandon( 1194 &mut socket, 1195 MigratableError::MigrateSend(anyhow!("Error during dirty memory migration")), 1196 )?; 1197 1198 // Try at most 5 passes of dirty memory sending 1199 const MAX_DIRTY_MIGRATIONS: usize = 5; 1200 for i in 0..MAX_DIRTY_MIGRATIONS { 1201 info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS); 1202 if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? { 1203 break; 1204 } 1205 } 1206 1207 // Now pause VM 1208 vm.pause()?; 1209 1210 // Send last batch of dirty pages 1211 Self::vm_maybe_send_dirty_pages(vm, &mut socket)?; 1212 } 1213 1214 // We release the locks early to enable locking them on the destination host. 1215 // The VM is already stopped. 1216 vm.release_disk_locks() 1217 .map_err(|e| MigratableError::UnlockError(anyhow!("{e}")))?; 1218 1219 // Capture snapshot and send it 1220 let vm_snapshot = vm.snapshot()?; 1221 let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap(); 1222 Request::state(snapshot_data.len() as u64).write_to(&mut socket)?; 1223 socket 1224 .write_all(&snapshot_data) 1225 .map_err(MigratableError::MigrateSocket)?; 1226 Response::read_from(&mut socket)?.ok_or_abandon( 1227 &mut socket, 1228 MigratableError::MigrateSend(anyhow!("Error during state migration")), 1229 )?; 1230 // Complete the migration 1231 // At this step, the receiving VMM will acquire disk locks again. 1232 Request::complete().write_to(&mut socket)?; 1233 Response::read_from(&mut socket)?.ok_or_abandon( 1234 &mut socket, 1235 MigratableError::MigrateSend(anyhow!("Error completing migration")), 1236 )?; 1237 1238 // Stop logging dirty pages 1239 if !send_data_migration.local { 1240 vm.stop_dirty_log()?; 1241 } 1242 1243 info!("Migration complete"); 1244 1245 // Let every Migratable object know about the migration being complete 1246 vm.complete_migration() 1247 } 1248 1249 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1250 fn vm_check_cpuid_compatibility( 1251 &self, 1252 src_vm_config: &Arc<Mutex<VmConfig>>, 1253 src_vm_cpuid: &[hypervisor::arch::x86::CpuIdEntry], 1254 ) -> result::Result<(), MigratableError> { 1255 #[cfg(feature = "tdx")] 1256 if src_vm_config.lock().unwrap().is_tdx_enabled() { 1257 return Err(MigratableError::MigrateReceive(anyhow!( 1258 "Live Migration is not supported when TDX is enabled" 1259 ))); 1260 }; 1261 1262 // We check the `CPUID` compatibility of between the source vm and destination, which is 1263 // mostly about feature compatibility and "topology/sgx" leaves are not relevant. 1264 let dest_cpuid = &{ 1265 let vm_config = &src_vm_config.lock().unwrap(); 1266 1267 let phys_bits = vm::physical_bits(&self.hypervisor, vm_config.cpus.max_phys_bits); 1268 arch::generate_common_cpuid( 1269 &self.hypervisor.clone(), 1270 &arch::CpuidConfig { 1271 sgx_epc_sections: None, 1272 phys_bits, 1273 kvm_hyperv: vm_config.cpus.kvm_hyperv, 1274 #[cfg(feature = "tdx")] 1275 tdx: false, 1276 amx: vm_config.cpus.features.amx, 1277 }, 1278 ) 1279 .map_err(|e| { 1280 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e)) 1281 })? 1282 }; 1283 arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| { 1284 MigratableError::MigrateReceive(anyhow!( 1285 "Error checking cpu feature compatibility': {:?}", 1286 e 1287 )) 1288 }) 1289 } 1290 1291 fn vm_restore( 1292 &mut self, 1293 source_url: &str, 1294 vm_config: Arc<Mutex<VmConfig>>, 1295 prefault: bool, 1296 ) -> std::result::Result<(), VmError> { 1297 let snapshot = recv_vm_state(source_url).map_err(VmError::Restore)?; 1298 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1299 let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?; 1300 1301 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1302 self.vm_check_cpuid_compatibility(&vm_config, &vm_snapshot.common_cpuid) 1303 .map_err(VmError::Restore)?; 1304 1305 self.vm_config = Some(Arc::clone(&vm_config)); 1306 1307 // Always re-populate the 'console_info' based on the new 'vm_config' 1308 self.console_info = 1309 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1310 1311 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1312 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1313 #[cfg(feature = "guest_debug")] 1314 let debug_evt = self 1315 .vm_debug_evt 1316 .try_clone() 1317 .map_err(VmError::EventFdClone)?; 1318 let activate_evt = self 1319 .activate_evt 1320 .try_clone() 1321 .map_err(VmError::EventFdClone)?; 1322 1323 let vm = Vm::new( 1324 vm_config, 1325 exit_evt, 1326 reset_evt, 1327 #[cfg(feature = "guest_debug")] 1328 debug_evt, 1329 &self.seccomp_action, 1330 self.hypervisor.clone(), 1331 activate_evt, 1332 self.console_info.clone(), 1333 self.console_resize_pipe.clone(), 1334 Arc::clone(&self.original_termios_opt), 1335 Some(snapshot), 1336 Some(source_url), 1337 Some(prefault), 1338 )?; 1339 self.vm = Some(vm); 1340 1341 if self 1342 .vm_config 1343 .as_ref() 1344 .unwrap() 1345 .lock() 1346 .unwrap() 1347 .landlock_enable 1348 { 1349 apply_landlock(self.vm_config.as_ref().unwrap().clone()) 1350 .map_err(VmError::ApplyLandlock)?; 1351 } 1352 1353 // Now we can restore the rest of the VM. 1354 if let Some(ref mut vm) = self.vm { 1355 vm.restore() 1356 } else { 1357 Err(VmError::VmNotCreated) 1358 } 1359 } 1360 1361 fn control_loop( 1362 &mut self, 1363 api_receiver: Rc<Receiver<ApiRequest>>, 1364 #[cfg(feature = "guest_debug")] gdb_receiver: Rc<Receiver<gdb::GdbRequest>>, 1365 ) -> Result<()> { 1366 const EPOLL_EVENTS_LEN: usize = 100; 1367 1368 let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN]; 1369 let epoll_fd = self.epoll.as_raw_fd(); 1370 1371 'outer: loop { 1372 let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { 1373 Ok(res) => res, 1374 Err(e) => { 1375 if e.kind() == io::ErrorKind::Interrupted { 1376 // It's well defined from the epoll_wait() syscall 1377 // documentation that the epoll loop can be interrupted 1378 // before any of the requested events occurred or the 1379 // timeout expired. In both those cases, epoll_wait() 1380 // returns an error of type EINTR, but this should not 1381 // be considered as a regular error. Instead it is more 1382 // appropriate to retry, by calling into epoll_wait(). 1383 continue; 1384 } 1385 return Err(Error::Epoll(e)); 1386 } 1387 }; 1388 1389 for event in events.iter().take(num_events) { 1390 let dispatch_event: EpollDispatch = event.data.into(); 1391 match dispatch_event { 1392 EpollDispatch::Unknown => { 1393 let event = event.data; 1394 warn!("Unknown VMM loop event: {}", event); 1395 } 1396 EpollDispatch::Exit => { 1397 info!("VM exit event"); 1398 // Consume the event. 1399 self.exit_evt.read().map_err(Error::EventFdRead)?; 1400 self.vmm_shutdown().map_err(Error::VmmShutdown)?; 1401 1402 break 'outer; 1403 } 1404 EpollDispatch::Reset => { 1405 info!("VM reset event"); 1406 // Consume the event. 1407 self.reset_evt.read().map_err(Error::EventFdRead)?; 1408 self.vm_reboot().map_err(Error::VmReboot)?; 1409 } 1410 EpollDispatch::ActivateVirtioDevices => { 1411 if let Some(ref vm) = self.vm { 1412 let count = self.activate_evt.read().map_err(Error::EventFdRead)?; 1413 info!( 1414 "Trying to activate pending virtio devices: count = {}", 1415 count 1416 ); 1417 vm.activate_virtio_devices() 1418 .map_err(Error::ActivateVirtioDevices)?; 1419 } 1420 } 1421 EpollDispatch::Api => { 1422 // Consume the events. 1423 for _ in 0..self.api_evt.read().map_err(Error::EventFdRead)? { 1424 // Read from the API receiver channel 1425 let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?; 1426 1427 if api_request(self)? { 1428 break 'outer; 1429 } 1430 } 1431 } 1432 #[cfg(feature = "guest_debug")] 1433 EpollDispatch::Debug => { 1434 // Consume the events. 1435 for _ in 0..self.debug_evt.read().map_err(Error::EventFdRead)? { 1436 // Read from the API receiver channel 1437 let gdb_request = gdb_receiver.recv().map_err(Error::GdbRequestRecv)?; 1438 1439 let response = if let Some(ref mut vm) = self.vm { 1440 vm.debug_request(&gdb_request.payload, gdb_request.cpu_id) 1441 } else { 1442 Err(VmError::VmNotRunning) 1443 } 1444 .map_err(gdb::Error::Vm); 1445 1446 gdb_request 1447 .sender 1448 .send(response) 1449 .map_err(Error::GdbResponseSend)?; 1450 } 1451 } 1452 #[cfg(not(feature = "guest_debug"))] 1453 EpollDispatch::Debug => {} 1454 } 1455 } 1456 } 1457 1458 // Trigger the termination of the signal_handler thread 1459 if let Some(signals) = self.signals.take() { 1460 signals.close(); 1461 } 1462 1463 // Wait for all the threads to finish 1464 for thread in self.threads.drain(..) { 1465 thread.join().map_err(Error::ThreadCleanup)? 1466 } 1467 1468 Ok(()) 1469 } 1470 } 1471 1472 fn apply_landlock(vm_config: Arc<Mutex<VmConfig>>) -> result::Result<(), LandlockError> { 1473 vm_config.lock().unwrap().apply_landlock()?; 1474 Ok(()) 1475 } 1476 1477 impl RequestHandler for Vmm { 1478 fn vm_create(&mut self, config: Box<VmConfig>) -> result::Result<(), VmError> { 1479 // We only store the passed VM config. 1480 // The VM will be created when being asked to boot it. 1481 if self.vm_config.is_none() { 1482 self.vm_config = Some(Arc::new(Mutex::new(*config))); 1483 self.console_info = 1484 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1485 1486 if self 1487 .vm_config 1488 .as_ref() 1489 .unwrap() 1490 .lock() 1491 .unwrap() 1492 .landlock_enable 1493 { 1494 apply_landlock(self.vm_config.as_ref().unwrap().clone()) 1495 .map_err(VmError::ApplyLandlock)?; 1496 } 1497 Ok(()) 1498 } else { 1499 Err(VmError::VmAlreadyCreated) 1500 } 1501 } 1502 1503 fn vm_boot(&mut self) -> result::Result<(), VmError> { 1504 tracer::start(); 1505 info!("Booting VM"); 1506 event!("vm", "booting"); 1507 let r = { 1508 trace_scoped!("vm_boot"); 1509 // If we don't have a config, we cannot boot a VM. 1510 if self.vm_config.is_none() { 1511 return Err(VmError::VmMissingConfig); 1512 }; 1513 1514 // console_info is set to None in vm_shutdown. re-populate here if empty 1515 if self.console_info.is_none() { 1516 self.console_info = 1517 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1518 } 1519 1520 // Create a new VM if we don't have one yet. 1521 if self.vm.is_none() { 1522 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1523 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1524 #[cfg(feature = "guest_debug")] 1525 let vm_debug_evt = self 1526 .vm_debug_evt 1527 .try_clone() 1528 .map_err(VmError::EventFdClone)?; 1529 let activate_evt = self 1530 .activate_evt 1531 .try_clone() 1532 .map_err(VmError::EventFdClone)?; 1533 1534 if let Some(ref vm_config) = self.vm_config { 1535 let vm = Vm::new( 1536 Arc::clone(vm_config), 1537 exit_evt, 1538 reset_evt, 1539 #[cfg(feature = "guest_debug")] 1540 vm_debug_evt, 1541 &self.seccomp_action, 1542 self.hypervisor.clone(), 1543 activate_evt, 1544 self.console_info.clone(), 1545 self.console_resize_pipe.clone(), 1546 Arc::clone(&self.original_termios_opt), 1547 None, 1548 None, 1549 None, 1550 )?; 1551 1552 self.vm = Some(vm); 1553 } 1554 } 1555 1556 // Now we can boot the VM. 1557 if let Some(ref mut vm) = self.vm { 1558 vm.boot() 1559 } else { 1560 Err(VmError::VmNotCreated) 1561 } 1562 }; 1563 tracer::end(); 1564 if r.is_ok() { 1565 event!("vm", "booted"); 1566 } 1567 r 1568 } 1569 1570 fn vm_pause(&mut self) -> result::Result<(), VmError> { 1571 if let Some(ref mut vm) = self.vm { 1572 vm.pause().map_err(VmError::Pause) 1573 } else { 1574 Err(VmError::VmNotRunning) 1575 } 1576 } 1577 1578 fn vm_resume(&mut self) -> result::Result<(), VmError> { 1579 if let Some(ref mut vm) = self.vm { 1580 vm.resume().map_err(VmError::Resume) 1581 } else { 1582 Err(VmError::VmNotRunning) 1583 } 1584 } 1585 1586 fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> { 1587 if let Some(ref mut vm) = self.vm { 1588 // Drain console_info so that FDs are not reused 1589 let _ = self.console_info.take(); 1590 vm.snapshot() 1591 .map_err(VmError::Snapshot) 1592 .and_then(|snapshot| { 1593 vm.send(&snapshot, destination_url) 1594 .map_err(VmError::SnapshotSend) 1595 }) 1596 } else { 1597 Err(VmError::VmNotRunning) 1598 } 1599 } 1600 1601 fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> { 1602 if self.vm.is_some() || self.vm_config.is_some() { 1603 return Err(VmError::VmAlreadyCreated); 1604 } 1605 1606 let source_url = restore_cfg.source_url.as_path().to_str(); 1607 if source_url.is_none() { 1608 return Err(VmError::InvalidRestoreSourceUrl); 1609 } 1610 // Safe to unwrap as we checked it was Some(&str). 1611 let source_url = source_url.unwrap(); 1612 1613 let vm_config = Arc::new(Mutex::new( 1614 recv_vm_config(source_url).map_err(VmError::Restore)?, 1615 )); 1616 restore_cfg 1617 .validate(&vm_config.lock().unwrap().clone()) 1618 .map_err(VmError::ConfigValidation)?; 1619 1620 // Update VM's net configurations with new fds received for restore operation 1621 if let (Some(restored_nets), Some(vm_net_configs)) = 1622 (restore_cfg.net_fds, &mut vm_config.lock().unwrap().net) 1623 { 1624 for net in restored_nets.iter() { 1625 for net_config in vm_net_configs.iter_mut() { 1626 // update only if the net dev is backed by FDs 1627 if net_config.id == Some(net.id.clone()) && net_config.fds.is_some() { 1628 net_config.fds.clone_from(&net.fds); 1629 } 1630 } 1631 } 1632 } 1633 1634 self.vm_restore(source_url, vm_config, restore_cfg.prefault) 1635 .map_err(|vm_restore_err| { 1636 error!("VM Restore failed: {:?}", vm_restore_err); 1637 1638 // Cleanup the VM being created while vm restore 1639 if let Err(e) = self.vm_delete() { 1640 return e; 1641 } 1642 1643 vm_restore_err 1644 }) 1645 } 1646 1647 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 1648 fn vm_coredump(&mut self, destination_url: &str) -> result::Result<(), VmError> { 1649 if let Some(ref mut vm) = self.vm { 1650 vm.coredump(destination_url).map_err(VmError::Coredump) 1651 } else { 1652 Err(VmError::VmNotRunning) 1653 } 1654 } 1655 1656 fn vm_shutdown(&mut self) -> result::Result<(), VmError> { 1657 let r = if let Some(ref mut vm) = self.vm.take() { 1658 // Drain console_info so that the FDs are not reused 1659 let _ = self.console_info.take(); 1660 vm.shutdown() 1661 } else { 1662 Err(VmError::VmNotRunning) 1663 }; 1664 1665 if r.is_ok() { 1666 event!("vm", "shutdown"); 1667 } 1668 1669 r 1670 } 1671 1672 fn vm_reboot(&mut self) -> result::Result<(), VmError> { 1673 event!("vm", "rebooting"); 1674 1675 // First we stop the current VM 1676 let config = if let Some(mut vm) = self.vm.take() { 1677 let config = vm.get_config(); 1678 vm.shutdown()?; 1679 config 1680 } else { 1681 return Err(VmError::VmNotCreated); 1682 }; 1683 1684 // vm.shutdown() closes all the console devices, so set console_info to None 1685 // so that the closed FD #s are not reused. 1686 let _ = self.console_info.take(); 1687 1688 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1689 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1690 #[cfg(feature = "guest_debug")] 1691 let debug_evt = self 1692 .vm_debug_evt 1693 .try_clone() 1694 .map_err(VmError::EventFdClone)?; 1695 let activate_evt = self 1696 .activate_evt 1697 .try_clone() 1698 .map_err(VmError::EventFdClone)?; 1699 1700 // The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be 1701 // an event sitting in the shared reset_evt. Without doing this we get very early reboots 1702 // during the boot process. 1703 if self.reset_evt.read().is_ok() { 1704 warn!("Spurious second reset event received. Ignoring."); 1705 } 1706 1707 self.console_info = 1708 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1709 1710 // Then we create the new VM 1711 let mut vm = Vm::new( 1712 config, 1713 exit_evt, 1714 reset_evt, 1715 #[cfg(feature = "guest_debug")] 1716 debug_evt, 1717 &self.seccomp_action, 1718 self.hypervisor.clone(), 1719 activate_evt, 1720 self.console_info.clone(), 1721 self.console_resize_pipe.clone(), 1722 Arc::clone(&self.original_termios_opt), 1723 None, 1724 None, 1725 None, 1726 )?; 1727 1728 // And we boot it 1729 vm.boot()?; 1730 1731 self.vm = Some(vm); 1732 1733 event!("vm", "rebooted"); 1734 1735 Ok(()) 1736 } 1737 1738 fn vm_info(&self) -> result::Result<VmInfoResponse, VmError> { 1739 match &self.vm_config { 1740 Some(vm_config) => { 1741 let state = match &self.vm { 1742 Some(vm) => vm.get_state()?, 1743 None => VmState::Created, 1744 }; 1745 let config = vm_config.lock().unwrap().clone(); 1746 1747 let mut memory_actual_size = config.memory.total_size(); 1748 if let Some(vm) = &self.vm { 1749 memory_actual_size -= vm.balloon_size(); 1750 } 1751 1752 let device_tree = self 1753 .vm 1754 .as_ref() 1755 .map(|vm| vm.device_tree().lock().unwrap().clone()); 1756 1757 Ok(VmInfoResponse { 1758 config: Box::new(config), 1759 state, 1760 memory_actual_size, 1761 device_tree, 1762 }) 1763 } 1764 None => Err(VmError::VmNotCreated), 1765 } 1766 } 1767 1768 fn vmm_ping(&self) -> VmmPingResponse { 1769 let VmmVersionInfo { 1770 build_version, 1771 version, 1772 } = self.version.clone(); 1773 1774 VmmPingResponse { 1775 build_version, 1776 version, 1777 pid: std::process::id() as i64, 1778 features: feature_list(), 1779 } 1780 } 1781 1782 fn vm_delete(&mut self) -> result::Result<(), VmError> { 1783 if self.vm_config.is_none() { 1784 return Ok(()); 1785 } 1786 1787 // If a VM is booted, we first try to shut it down. 1788 if self.vm.is_some() { 1789 self.vm_shutdown()?; 1790 } 1791 1792 self.vm_config = None; 1793 1794 event!("vm", "deleted"); 1795 1796 Ok(()) 1797 } 1798 1799 fn vmm_shutdown(&mut self) -> result::Result<(), VmError> { 1800 self.vm_delete()?; 1801 event!("vmm", "shutdown"); 1802 Ok(()) 1803 } 1804 1805 fn vm_resize( 1806 &mut self, 1807 desired_vcpus: Option<u8>, 1808 desired_ram: Option<u64>, 1809 desired_balloon: Option<u64>, 1810 ) -> result::Result<(), VmError> { 1811 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1812 1813 if let Some(ref mut vm) = self.vm { 1814 if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) { 1815 error!("Error when resizing VM: {:?}", e); 1816 Err(e) 1817 } else { 1818 Ok(()) 1819 } 1820 } else { 1821 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1822 if let Some(desired_vcpus) = desired_vcpus { 1823 config.cpus.boot_vcpus = desired_vcpus; 1824 } 1825 if let Some(desired_ram) = desired_ram { 1826 config.memory.size = desired_ram; 1827 } 1828 if let Some(desired_balloon) = desired_balloon { 1829 if let Some(balloon_config) = &mut config.balloon { 1830 balloon_config.size = desired_balloon; 1831 } 1832 } 1833 Ok(()) 1834 } 1835 } 1836 1837 fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> { 1838 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1839 1840 if let Some(ref mut vm) = self.vm { 1841 if let Err(e) = vm.resize_zone(id, desired_ram) { 1842 error!("Error when resizing VM: {:?}", e); 1843 Err(e) 1844 } else { 1845 Ok(()) 1846 } 1847 } else { 1848 // Update VmConfig by setting the new desired ram. 1849 let memory_config = &mut self.vm_config.as_ref().unwrap().lock().unwrap().memory; 1850 1851 if let Some(zones) = &mut memory_config.zones { 1852 for zone in zones.iter_mut() { 1853 if zone.id == id { 1854 zone.size = desired_ram; 1855 return Ok(()); 1856 } 1857 } 1858 } 1859 1860 error!("Could not find the memory zone {} for the resize", id); 1861 Err(VmError::ResizeZone) 1862 } 1863 } 1864 1865 fn vm_add_device( 1866 &mut self, 1867 device_cfg: DeviceConfig, 1868 ) -> result::Result<Option<Vec<u8>>, VmError> { 1869 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1870 1871 { 1872 // Validate the configuration change in a cloned configuration 1873 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1874 add_to_config(&mut config.devices, device_cfg.clone()); 1875 config.validate().map_err(VmError::ConfigValidation)?; 1876 } 1877 1878 if let Some(ref mut vm) = self.vm { 1879 let info = vm.add_device(device_cfg).map_err(|e| { 1880 error!("Error when adding new device to the VM: {:?}", e); 1881 e 1882 })?; 1883 serde_json::to_vec(&info) 1884 .map(Some) 1885 .map_err(VmError::SerializeJson) 1886 } else { 1887 // Update VmConfig by adding the new device. 1888 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1889 add_to_config(&mut config.devices, device_cfg); 1890 Ok(None) 1891 } 1892 } 1893 1894 fn vm_add_user_device( 1895 &mut self, 1896 device_cfg: UserDeviceConfig, 1897 ) -> result::Result<Option<Vec<u8>>, VmError> { 1898 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1899 1900 { 1901 // Validate the configuration change in a cloned configuration 1902 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1903 add_to_config(&mut config.user_devices, device_cfg.clone()); 1904 config.validate().map_err(VmError::ConfigValidation)?; 1905 } 1906 1907 if let Some(ref mut vm) = self.vm { 1908 let info = vm.add_user_device(device_cfg).map_err(|e| { 1909 error!("Error when adding new user device to the VM: {:?}", e); 1910 e 1911 })?; 1912 serde_json::to_vec(&info) 1913 .map(Some) 1914 .map_err(VmError::SerializeJson) 1915 } else { 1916 // Update VmConfig by adding the new device. 1917 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1918 add_to_config(&mut config.user_devices, device_cfg); 1919 Ok(None) 1920 } 1921 } 1922 1923 fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> { 1924 if let Some(ref mut vm) = self.vm { 1925 if let Err(e) = vm.remove_device(id) { 1926 error!("Error when removing device from the VM: {:?}", e); 1927 Err(e) 1928 } else { 1929 Ok(()) 1930 } 1931 } else if let Some(ref config) = self.vm_config { 1932 let mut config = config.lock().unwrap(); 1933 if config.remove_device(&id) { 1934 Ok(()) 1935 } else { 1936 Err(VmError::NoDeviceToRemove(id)) 1937 } 1938 } else { 1939 Err(VmError::VmNotCreated) 1940 } 1941 } 1942 1943 fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1944 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1945 1946 { 1947 // Validate the configuration change in a cloned configuration 1948 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1949 add_to_config(&mut config.disks, disk_cfg.clone()); 1950 config.validate().map_err(VmError::ConfigValidation)?; 1951 } 1952 1953 if let Some(ref mut vm) = self.vm { 1954 let info = vm.add_disk(disk_cfg).map_err(|e| { 1955 error!("Error when adding new disk to the VM: {:?}", e); 1956 e 1957 })?; 1958 serde_json::to_vec(&info) 1959 .map(Some) 1960 .map_err(VmError::SerializeJson) 1961 } else { 1962 // Update VmConfig by adding the new device. 1963 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1964 add_to_config(&mut config.disks, disk_cfg); 1965 Ok(None) 1966 } 1967 } 1968 1969 fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1970 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1971 1972 { 1973 // Validate the configuration change in a cloned configuration 1974 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1975 add_to_config(&mut config.fs, fs_cfg.clone()); 1976 config.validate().map_err(VmError::ConfigValidation)?; 1977 } 1978 1979 if let Some(ref mut vm) = self.vm { 1980 let info = vm.add_fs(fs_cfg).map_err(|e| { 1981 error!("Error when adding new fs to the VM: {:?}", e); 1982 e 1983 })?; 1984 serde_json::to_vec(&info) 1985 .map(Some) 1986 .map_err(VmError::SerializeJson) 1987 } else { 1988 // Update VmConfig by adding the new device. 1989 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1990 add_to_config(&mut config.fs, fs_cfg); 1991 Ok(None) 1992 } 1993 } 1994 1995 fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1996 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1997 1998 { 1999 // Validate the configuration change in a cloned configuration 2000 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2001 add_to_config(&mut config.pmem, pmem_cfg.clone()); 2002 config.validate().map_err(VmError::ConfigValidation)?; 2003 } 2004 2005 if let Some(ref mut vm) = self.vm { 2006 let info = vm.add_pmem(pmem_cfg).map_err(|e| { 2007 error!("Error when adding new pmem device to the VM: {:?}", e); 2008 e 2009 })?; 2010 serde_json::to_vec(&info) 2011 .map(Some) 2012 .map_err(VmError::SerializeJson) 2013 } else { 2014 // Update VmConfig by adding the new device. 2015 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2016 add_to_config(&mut config.pmem, pmem_cfg); 2017 Ok(None) 2018 } 2019 } 2020 2021 fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2022 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2023 2024 { 2025 // Validate the configuration change in a cloned configuration 2026 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2027 add_to_config(&mut config.net, net_cfg.clone()); 2028 config.validate().map_err(VmError::ConfigValidation)?; 2029 } 2030 2031 if let Some(ref mut vm) = self.vm { 2032 let info = vm.add_net(net_cfg).map_err(|e| { 2033 error!("Error when adding new network device to the VM: {:?}", e); 2034 e 2035 })?; 2036 serde_json::to_vec(&info) 2037 .map(Some) 2038 .map_err(VmError::SerializeJson) 2039 } else { 2040 // Update VmConfig by adding the new device. 2041 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2042 add_to_config(&mut config.net, net_cfg); 2043 Ok(None) 2044 } 2045 } 2046 2047 fn vm_add_vdpa(&mut self, vdpa_cfg: VdpaConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2048 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2049 2050 { 2051 // Validate the configuration change in a cloned configuration 2052 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2053 add_to_config(&mut config.vdpa, vdpa_cfg.clone()); 2054 config.validate().map_err(VmError::ConfigValidation)?; 2055 } 2056 2057 if let Some(ref mut vm) = self.vm { 2058 let info = vm.add_vdpa(vdpa_cfg).map_err(|e| { 2059 error!("Error when adding new vDPA device to the VM: {:?}", e); 2060 e 2061 })?; 2062 serde_json::to_vec(&info) 2063 .map(Some) 2064 .map_err(VmError::SerializeJson) 2065 } else { 2066 // Update VmConfig by adding the new device. 2067 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2068 add_to_config(&mut config.vdpa, vdpa_cfg); 2069 Ok(None) 2070 } 2071 } 2072 2073 fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2074 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2075 2076 { 2077 // Validate the configuration change in a cloned configuration 2078 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2079 2080 if config.vsock.is_some() { 2081 return Err(VmError::TooManyVsockDevices); 2082 } 2083 2084 config.vsock = Some(vsock_cfg.clone()); 2085 config.validate().map_err(VmError::ConfigValidation)?; 2086 } 2087 2088 if let Some(ref mut vm) = self.vm { 2089 let info = vm.add_vsock(vsock_cfg).map_err(|e| { 2090 error!("Error when adding new vsock device to the VM: {:?}", e); 2091 e 2092 })?; 2093 serde_json::to_vec(&info) 2094 .map(Some) 2095 .map_err(VmError::SerializeJson) 2096 } else { 2097 // Update VmConfig by adding the new device. 2098 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2099 config.vsock = Some(vsock_cfg); 2100 Ok(None) 2101 } 2102 } 2103 2104 fn vm_counters(&mut self) -> result::Result<Option<Vec<u8>>, VmError> { 2105 if let Some(ref mut vm) = self.vm { 2106 let info = vm.counters().map_err(|e| { 2107 error!("Error when getting counters from the VM: {:?}", e); 2108 e 2109 })?; 2110 serde_json::to_vec(&info) 2111 .map(Some) 2112 .map_err(VmError::SerializeJson) 2113 } else { 2114 Err(VmError::VmNotRunning) 2115 } 2116 } 2117 2118 fn vm_power_button(&mut self) -> result::Result<(), VmError> { 2119 if let Some(ref mut vm) = self.vm { 2120 vm.power_button() 2121 } else { 2122 Err(VmError::VmNotRunning) 2123 } 2124 } 2125 2126 fn vm_nmi(&mut self) -> result::Result<(), VmError> { 2127 if let Some(ref mut vm) = self.vm { 2128 vm.nmi() 2129 } else { 2130 Err(VmError::VmNotRunning) 2131 } 2132 } 2133 2134 fn vm_receive_migration( 2135 &mut self, 2136 receive_data_migration: VmReceiveMigrationData, 2137 ) -> result::Result<(), MigratableError> { 2138 info!( 2139 "Receiving migration: receiver_url = {}", 2140 receive_data_migration.receiver_url 2141 ); 2142 2143 // Accept the connection and get the socket 2144 let mut socket = Vmm::receive_migration_socket(&receive_data_migration.receiver_url)?; 2145 2146 let mut started = false; 2147 let mut memory_manager: Option<Arc<Mutex<MemoryManager>>> = None; 2148 let mut existing_memory_files = None; 2149 loop { 2150 let req = Request::read_from(&mut socket)?; 2151 match req.command() { 2152 Command::Invalid => info!("Invalid Command Received"), 2153 Command::Start => { 2154 info!("Start Command Received"); 2155 started = true; 2156 2157 Response::ok().write_to(&mut socket)?; 2158 } 2159 Command::Config => { 2160 info!("Config Command Received"); 2161 2162 if !started { 2163 warn!("Migration not started yet"); 2164 Response::error().write_to(&mut socket)?; 2165 continue; 2166 } 2167 memory_manager = Some(self.vm_receive_config( 2168 &req, 2169 &mut socket, 2170 existing_memory_files.take(), 2171 )?); 2172 } 2173 Command::State => { 2174 info!("State Command Received"); 2175 2176 if !started { 2177 warn!("Migration not started yet"); 2178 Response::error().write_to(&mut socket)?; 2179 continue; 2180 } 2181 if let Some(mm) = memory_manager.take() { 2182 self.vm_receive_state(&req, &mut socket, mm)?; 2183 } else { 2184 warn!("Configuration not sent yet"); 2185 Response::error().write_to(&mut socket)?; 2186 } 2187 } 2188 Command::Memory => { 2189 info!("Memory Command Received"); 2190 2191 if !started { 2192 warn!("Migration not started yet"); 2193 Response::error().write_to(&mut socket)?; 2194 continue; 2195 } 2196 if let Some(mm) = memory_manager.as_ref() { 2197 self.vm_receive_memory(&req, &mut socket, &mut mm.lock().unwrap())?; 2198 } else { 2199 warn!("Configuration not sent yet"); 2200 Response::error().write_to(&mut socket)?; 2201 } 2202 } 2203 Command::MemoryFd => { 2204 info!("MemoryFd Command Received"); 2205 2206 if !started { 2207 warn!("Migration not started yet"); 2208 Response::error().write_to(&mut socket)?; 2209 continue; 2210 } 2211 2212 match &mut socket { 2213 SocketStream::Unix(unix_socket) => { 2214 let mut buf = [0u8; 4]; 2215 let (_, file) = unix_socket.recv_with_fd(&mut buf).map_err(|e| { 2216 MigratableError::MigrateReceive(anyhow!( 2217 "Error receiving slot from socket: {}", 2218 e 2219 )) 2220 })?; 2221 2222 if existing_memory_files.is_none() { 2223 existing_memory_files = Some(HashMap::default()) 2224 } 2225 2226 if let Some(ref mut existing_memory_files) = existing_memory_files { 2227 let slot = u32::from_le_bytes(buf); 2228 existing_memory_files.insert(slot, file.unwrap()); 2229 } 2230 2231 Response::ok().write_to(&mut socket)?; 2232 } 2233 SocketStream::Tcp(_tcp_socket) => { 2234 // For TCP sockets, we cannot transfer file descriptors 2235 warn!( 2236 "MemoryFd command received over TCP socket, which is not supported" 2237 ); 2238 Response::error().write_to(&mut socket)?; 2239 } 2240 } 2241 } 2242 Command::Complete => { 2243 info!("Complete Command Received"); 2244 if let Some(ref mut vm) = self.vm.as_mut() { 2245 vm.resume()?; 2246 Response::ok().write_to(&mut socket)?; 2247 } else { 2248 warn!("VM not created yet"); 2249 Response::error().write_to(&mut socket)?; 2250 } 2251 break; 2252 } 2253 Command::Abandon => { 2254 info!("Abandon Command Received"); 2255 self.vm = None; 2256 self.vm_config = None; 2257 Response::ok().write_to(&mut socket).ok(); 2258 break; 2259 } 2260 } 2261 } 2262 2263 Ok(()) 2264 } 2265 2266 fn vm_send_migration( 2267 &mut self, 2268 send_data_migration: VmSendMigrationData, 2269 ) -> result::Result<(), MigratableError> { 2270 info!( 2271 "Sending migration: destination_url = {}, local = {}", 2272 send_data_migration.destination_url, send_data_migration.local 2273 ); 2274 2275 if !self 2276 .vm_config 2277 .as_ref() 2278 .unwrap() 2279 .lock() 2280 .unwrap() 2281 .backed_by_shared_memory() 2282 && send_data_migration.local 2283 { 2284 return Err(MigratableError::MigrateSend(anyhow!( 2285 "Local migration requires shared memory or hugepages enabled" 2286 ))); 2287 } 2288 2289 if let Some(vm) = self.vm.as_mut() { 2290 Self::send_migration( 2291 vm, 2292 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 2293 self.hypervisor.clone(), 2294 send_data_migration.clone(), 2295 ) 2296 .map_err(|migration_err| { 2297 error!("Migration failed: {:?}", migration_err); 2298 2299 // Stop logging dirty pages only for non-local migrations 2300 if !send_data_migration.local { 2301 if let Err(e) = vm.stop_dirty_log() { 2302 return e; 2303 } 2304 } 2305 2306 if vm.get_state().unwrap() == VmState::Paused { 2307 if let Err(e) = vm.resume() { 2308 return e; 2309 } 2310 } 2311 2312 migration_err 2313 })?; 2314 2315 // Shutdown the VM after the migration succeeded 2316 self.exit_evt.write(1).map_err(|e| { 2317 MigratableError::MigrateSend(anyhow!( 2318 "Failed shutting down the VM after migration: {:?}", 2319 e 2320 )) 2321 }) 2322 } else { 2323 Err(MigratableError::MigrateSend(anyhow!("VM is not running"))) 2324 } 2325 } 2326 } 2327 2328 const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager"; 2329 const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager"; 2330 const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager"; 2331 2332 #[cfg(test)] 2333 mod unit_tests { 2334 use super::*; 2335 #[cfg(target_arch = "x86_64")] 2336 use crate::vm_config::DebugConsoleConfig; 2337 use crate::vm_config::{ 2338 ConsoleConfig, ConsoleOutputMode, CpuFeatures, CpusConfig, HotplugMethod, MemoryConfig, 2339 PayloadConfig, RngConfig, 2340 }; 2341 2342 fn create_dummy_vmm() -> Vmm { 2343 Vmm::new( 2344 VmmVersionInfo::new("dummy", "dummy"), 2345 EventFd::new(EFD_NONBLOCK).unwrap(), 2346 #[cfg(feature = "guest_debug")] 2347 EventFd::new(EFD_NONBLOCK).unwrap(), 2348 #[cfg(feature = "guest_debug")] 2349 EventFd::new(EFD_NONBLOCK).unwrap(), 2350 SeccompAction::Allow, 2351 hypervisor::new().unwrap(), 2352 EventFd::new(EFD_NONBLOCK).unwrap(), 2353 ) 2354 .unwrap() 2355 } 2356 2357 fn create_dummy_vm_config() -> Box<VmConfig> { 2358 Box::new(VmConfig { 2359 cpus: CpusConfig { 2360 boot_vcpus: 1, 2361 max_vcpus: 1, 2362 topology: None, 2363 kvm_hyperv: false, 2364 max_phys_bits: 46, 2365 affinity: None, 2366 features: CpuFeatures::default(), 2367 }, 2368 memory: MemoryConfig { 2369 size: 536_870_912, 2370 mergeable: false, 2371 hotplug_method: HotplugMethod::Acpi, 2372 hotplug_size: None, 2373 hotplugged_size: None, 2374 shared: true, 2375 hugepages: false, 2376 hugepage_size: None, 2377 prefault: false, 2378 zones: None, 2379 thp: true, 2380 }, 2381 payload: Some(PayloadConfig { 2382 kernel: Some(PathBuf::from("/path/to/kernel")), 2383 firmware: None, 2384 cmdline: None, 2385 initramfs: None, 2386 #[cfg(feature = "igvm")] 2387 igvm: None, 2388 #[cfg(feature = "sev_snp")] 2389 host_data: None, 2390 }), 2391 rate_limit_groups: None, 2392 disks: None, 2393 net: None, 2394 rng: RngConfig { 2395 src: PathBuf::from("/dev/urandom"), 2396 iommu: false, 2397 }, 2398 balloon: None, 2399 fs: None, 2400 pmem: None, 2401 serial: ConsoleConfig { 2402 file: None, 2403 mode: ConsoleOutputMode::Null, 2404 iommu: false, 2405 socket: None, 2406 }, 2407 console: ConsoleConfig { 2408 file: None, 2409 mode: ConsoleOutputMode::Tty, 2410 iommu: false, 2411 socket: None, 2412 }, 2413 #[cfg(target_arch = "x86_64")] 2414 debug_console: DebugConsoleConfig::default(), 2415 devices: None, 2416 user_devices: None, 2417 vdpa: None, 2418 vsock: None, 2419 #[cfg(feature = "pvmemcontrol")] 2420 pvmemcontrol: None, 2421 pvpanic: false, 2422 iommu: false, 2423 #[cfg(target_arch = "x86_64")] 2424 sgx_epc: None, 2425 numa: None, 2426 watchdog: false, 2427 #[cfg(feature = "guest_debug")] 2428 gdb: false, 2429 pci_segments: None, 2430 platform: None, 2431 tpm: None, 2432 preserved_fds: None, 2433 landlock_enable: false, 2434 landlock_rules: None, 2435 }) 2436 } 2437 2438 #[test] 2439 fn test_vmm_vm_create() { 2440 let mut vmm = create_dummy_vmm(); 2441 let config = create_dummy_vm_config(); 2442 2443 assert!(matches!(vmm.vm_create(config.clone()), Ok(()))); 2444 assert!(matches!( 2445 vmm.vm_create(config), 2446 Err(VmError::VmAlreadyCreated) 2447 )); 2448 } 2449 2450 #[test] 2451 fn test_vmm_vm_cold_add_device() { 2452 let mut vmm = create_dummy_vmm(); 2453 let device_config = DeviceConfig::parse("path=/path/to/device").unwrap(); 2454 2455 assert!(matches!( 2456 vmm.vm_add_device(device_config.clone()), 2457 Err(VmError::VmNotCreated) 2458 )); 2459 2460 let _ = vmm.vm_create(create_dummy_vm_config()); 2461 assert!(vmm 2462 .vm_config 2463 .as_ref() 2464 .unwrap() 2465 .lock() 2466 .unwrap() 2467 .devices 2468 .is_none()); 2469 2470 assert!(vmm.vm_add_device(device_config.clone()).unwrap().is_none()); 2471 assert_eq!( 2472 vmm.vm_config 2473 .as_ref() 2474 .unwrap() 2475 .lock() 2476 .unwrap() 2477 .devices 2478 .clone() 2479 .unwrap() 2480 .len(), 2481 1 2482 ); 2483 assert_eq!( 2484 vmm.vm_config 2485 .as_ref() 2486 .unwrap() 2487 .lock() 2488 .unwrap() 2489 .devices 2490 .clone() 2491 .unwrap()[0], 2492 device_config 2493 ); 2494 } 2495 2496 #[test] 2497 fn test_vmm_vm_cold_add_user_device() { 2498 let mut vmm = create_dummy_vmm(); 2499 let user_device_config = 2500 UserDeviceConfig::parse("socket=/path/to/socket,id=8,pci_segment=2").unwrap(); 2501 2502 assert!(matches!( 2503 vmm.vm_add_user_device(user_device_config.clone()), 2504 Err(VmError::VmNotCreated) 2505 )); 2506 2507 let _ = vmm.vm_create(create_dummy_vm_config()); 2508 assert!(vmm 2509 .vm_config 2510 .as_ref() 2511 .unwrap() 2512 .lock() 2513 .unwrap() 2514 .user_devices 2515 .is_none()); 2516 2517 assert!(vmm 2518 .vm_add_user_device(user_device_config.clone()) 2519 .unwrap() 2520 .is_none()); 2521 assert_eq!( 2522 vmm.vm_config 2523 .as_ref() 2524 .unwrap() 2525 .lock() 2526 .unwrap() 2527 .user_devices 2528 .clone() 2529 .unwrap() 2530 .len(), 2531 1 2532 ); 2533 assert_eq!( 2534 vmm.vm_config 2535 .as_ref() 2536 .unwrap() 2537 .lock() 2538 .unwrap() 2539 .user_devices 2540 .clone() 2541 .unwrap()[0], 2542 user_device_config 2543 ); 2544 } 2545 2546 #[test] 2547 fn test_vmm_vm_cold_add_disk() { 2548 let mut vmm = create_dummy_vmm(); 2549 let disk_config = DiskConfig::parse("path=/path/to_file").unwrap(); 2550 2551 assert!(matches!( 2552 vmm.vm_add_disk(disk_config.clone()), 2553 Err(VmError::VmNotCreated) 2554 )); 2555 2556 let _ = vmm.vm_create(create_dummy_vm_config()); 2557 assert!(vmm 2558 .vm_config 2559 .as_ref() 2560 .unwrap() 2561 .lock() 2562 .unwrap() 2563 .disks 2564 .is_none()); 2565 2566 assert!(vmm.vm_add_disk(disk_config.clone()).unwrap().is_none()); 2567 assert_eq!( 2568 vmm.vm_config 2569 .as_ref() 2570 .unwrap() 2571 .lock() 2572 .unwrap() 2573 .disks 2574 .clone() 2575 .unwrap() 2576 .len(), 2577 1 2578 ); 2579 assert_eq!( 2580 vmm.vm_config 2581 .as_ref() 2582 .unwrap() 2583 .lock() 2584 .unwrap() 2585 .disks 2586 .clone() 2587 .unwrap()[0], 2588 disk_config 2589 ); 2590 } 2591 2592 #[test] 2593 fn test_vmm_vm_cold_add_fs() { 2594 let mut vmm = create_dummy_vmm(); 2595 let fs_config = FsConfig::parse("tag=mytag,socket=/tmp/sock").unwrap(); 2596 2597 assert!(matches!( 2598 vmm.vm_add_fs(fs_config.clone()), 2599 Err(VmError::VmNotCreated) 2600 )); 2601 2602 let _ = vmm.vm_create(create_dummy_vm_config()); 2603 assert!(vmm.vm_config.as_ref().unwrap().lock().unwrap().fs.is_none()); 2604 2605 assert!(vmm.vm_add_fs(fs_config.clone()).unwrap().is_none()); 2606 assert_eq!( 2607 vmm.vm_config 2608 .as_ref() 2609 .unwrap() 2610 .lock() 2611 .unwrap() 2612 .fs 2613 .clone() 2614 .unwrap() 2615 .len(), 2616 1 2617 ); 2618 assert_eq!( 2619 vmm.vm_config 2620 .as_ref() 2621 .unwrap() 2622 .lock() 2623 .unwrap() 2624 .fs 2625 .clone() 2626 .unwrap()[0], 2627 fs_config 2628 ); 2629 } 2630 2631 #[test] 2632 fn test_vmm_vm_cold_add_pmem() { 2633 let mut vmm = create_dummy_vmm(); 2634 let pmem_config = PmemConfig::parse("file=/tmp/pmem,size=128M").unwrap(); 2635 2636 assert!(matches!( 2637 vmm.vm_add_pmem(pmem_config.clone()), 2638 Err(VmError::VmNotCreated) 2639 )); 2640 2641 let _ = vmm.vm_create(create_dummy_vm_config()); 2642 assert!(vmm 2643 .vm_config 2644 .as_ref() 2645 .unwrap() 2646 .lock() 2647 .unwrap() 2648 .pmem 2649 .is_none()); 2650 2651 assert!(vmm.vm_add_pmem(pmem_config.clone()).unwrap().is_none()); 2652 assert_eq!( 2653 vmm.vm_config 2654 .as_ref() 2655 .unwrap() 2656 .lock() 2657 .unwrap() 2658 .pmem 2659 .clone() 2660 .unwrap() 2661 .len(), 2662 1 2663 ); 2664 assert_eq!( 2665 vmm.vm_config 2666 .as_ref() 2667 .unwrap() 2668 .lock() 2669 .unwrap() 2670 .pmem 2671 .clone() 2672 .unwrap()[0], 2673 pmem_config 2674 ); 2675 } 2676 2677 #[test] 2678 fn test_vmm_vm_cold_add_net() { 2679 let mut vmm = create_dummy_vmm(); 2680 let net_config = NetConfig::parse( 2681 "mac=de:ad:be:ef:12:34,host_mac=12:34:de:ad:be:ef,vhost_user=true,socket=/tmp/sock", 2682 ) 2683 .unwrap(); 2684 2685 assert!(matches!( 2686 vmm.vm_add_net(net_config.clone()), 2687 Err(VmError::VmNotCreated) 2688 )); 2689 2690 let _ = vmm.vm_create(create_dummy_vm_config()); 2691 assert!(vmm 2692 .vm_config 2693 .as_ref() 2694 .unwrap() 2695 .lock() 2696 .unwrap() 2697 .net 2698 .is_none()); 2699 2700 assert!(vmm.vm_add_net(net_config.clone()).unwrap().is_none()); 2701 assert_eq!( 2702 vmm.vm_config 2703 .as_ref() 2704 .unwrap() 2705 .lock() 2706 .unwrap() 2707 .net 2708 .clone() 2709 .unwrap() 2710 .len(), 2711 1 2712 ); 2713 assert_eq!( 2714 vmm.vm_config 2715 .as_ref() 2716 .unwrap() 2717 .lock() 2718 .unwrap() 2719 .net 2720 .clone() 2721 .unwrap()[0], 2722 net_config 2723 ); 2724 } 2725 2726 #[test] 2727 fn test_vmm_vm_cold_add_vdpa() { 2728 let mut vmm = create_dummy_vmm(); 2729 let vdpa_config = VdpaConfig::parse("path=/dev/vhost-vdpa,num_queues=2").unwrap(); 2730 2731 assert!(matches!( 2732 vmm.vm_add_vdpa(vdpa_config.clone()), 2733 Err(VmError::VmNotCreated) 2734 )); 2735 2736 let _ = vmm.vm_create(create_dummy_vm_config()); 2737 assert!(vmm 2738 .vm_config 2739 .as_ref() 2740 .unwrap() 2741 .lock() 2742 .unwrap() 2743 .vdpa 2744 .is_none()); 2745 2746 assert!(vmm.vm_add_vdpa(vdpa_config.clone()).unwrap().is_none()); 2747 assert_eq!( 2748 vmm.vm_config 2749 .as_ref() 2750 .unwrap() 2751 .lock() 2752 .unwrap() 2753 .vdpa 2754 .clone() 2755 .unwrap() 2756 .len(), 2757 1 2758 ); 2759 assert_eq!( 2760 vmm.vm_config 2761 .as_ref() 2762 .unwrap() 2763 .lock() 2764 .unwrap() 2765 .vdpa 2766 .clone() 2767 .unwrap()[0], 2768 vdpa_config 2769 ); 2770 } 2771 2772 #[test] 2773 fn test_vmm_vm_cold_add_vsock() { 2774 let mut vmm = create_dummy_vmm(); 2775 let vsock_config = VsockConfig::parse("socket=/tmp/sock,cid=3,iommu=on").unwrap(); 2776 2777 assert!(matches!( 2778 vmm.vm_add_vsock(vsock_config.clone()), 2779 Err(VmError::VmNotCreated) 2780 )); 2781 2782 let _ = vmm.vm_create(create_dummy_vm_config()); 2783 assert!(vmm 2784 .vm_config 2785 .as_ref() 2786 .unwrap() 2787 .lock() 2788 .unwrap() 2789 .vsock 2790 .is_none()); 2791 2792 assert!(vmm.vm_add_vsock(vsock_config.clone()).unwrap().is_none()); 2793 assert_eq!( 2794 vmm.vm_config 2795 .as_ref() 2796 .unwrap() 2797 .lock() 2798 .unwrap() 2799 .vsock 2800 .clone() 2801 .unwrap(), 2802 vsock_config 2803 ); 2804 } 2805 } 2806