1 // Copyright © 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 6 #[macro_use] 7 extern crate event_monitor; 8 #[macro_use] 9 extern crate log; 10 11 use std::collections::HashMap; 12 use std::fs::File; 13 use std::io::{stdout, Read, Write}; 14 use std::net::{TcpListener, TcpStream}; 15 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; 16 use std::os::unix::net::{UnixListener, UnixStream}; 17 use std::panic::AssertUnwindSafe; 18 use std::path::PathBuf; 19 use std::rc::Rc; 20 use std::sync::mpsc::{Receiver, RecvError, SendError, Sender}; 21 use std::sync::{Arc, Mutex}; 22 #[cfg(not(target_arch = "riscv64"))] 23 use std::time::Instant; 24 use std::{io, result, thread}; 25 26 use anyhow::anyhow; 27 #[cfg(feature = "dbus_api")] 28 use api::dbus::{DBusApiOptions, DBusApiShutdownChannels}; 29 use api::http::HttpApiHandle; 30 use console_devices::{pre_create_console_devices, ConsoleInfo}; 31 use landlock::LandlockError; 32 use libc::{tcsetattr, termios, EFD_NONBLOCK, SIGINT, SIGTERM, TCSANOW}; 33 use memory_manager::MemoryManagerSnapshotData; 34 use pci::PciBdf; 35 use seccompiler::{apply_filter, SeccompAction}; 36 use serde::ser::{SerializeStruct, Serializer}; 37 use serde::{Deserialize, Serialize}; 38 use signal_hook::iterator::{Handle, Signals}; 39 use thiserror::Error; 40 use tracer::trace_scoped; 41 use vm_memory::bitmap::{AtomicBitmap, BitmapSlice}; 42 use vm_memory::{ReadVolatile, VolatileMemoryError, VolatileSlice, WriteVolatile}; 43 use vm_migration::protocol::*; 44 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 45 use vmm_sys_util::eventfd::EventFd; 46 use vmm_sys_util::signal::unblock_signal; 47 use vmm_sys_util::sock_ctrl_msg::ScmSocket; 48 49 use crate::api::{ 50 ApiRequest, ApiResponse, RequestHandler, VmInfoResponse, VmReceiveMigrationData, 51 VmSendMigrationData, VmmPingResponse, 52 }; 53 use crate::config::{add_to_config, RestoreConfig}; 54 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 55 use crate::coredump::GuestDebuggable; 56 use crate::landlock::Landlock; 57 use crate::memory_manager::MemoryManager; 58 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 59 use crate::migration::get_vm_snapshot; 60 use crate::migration::{recv_vm_config, recv_vm_state}; 61 use crate::seccomp_filters::{get_seccomp_filter, Thread}; 62 use crate::vm::{Error as VmError, Vm, VmState}; 63 use crate::vm_config::{ 64 DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig, VdpaConfig, 65 VmConfig, VsockConfig, 66 }; 67 68 #[cfg(not(target_arch = "riscv64"))] 69 mod acpi; 70 pub mod api; 71 mod clone3; 72 pub mod config; 73 pub mod console_devices; 74 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 75 mod coredump; 76 pub mod cpu; 77 pub mod device_manager; 78 pub mod device_tree; 79 #[cfg(feature = "guest_debug")] 80 mod gdb; 81 #[cfg(feature = "igvm")] 82 mod igvm; 83 pub mod interrupt; 84 pub mod landlock; 85 pub mod memory_manager; 86 pub mod migration; 87 mod pci_segment; 88 pub mod seccomp_filters; 89 mod serial_manager; 90 mod sigwinch_listener; 91 pub mod vm; 92 pub mod vm_config; 93 94 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; 95 type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>; 96 97 /// Errors associated with VMM management 98 #[derive(Debug, Error)] 99 pub enum Error { 100 /// API request receive error 101 #[error("Error receiving API request")] 102 ApiRequestRecv(#[source] RecvError), 103 104 /// API response send error 105 #[error("Error sending API request")] 106 ApiResponseSend(#[source] SendError<ApiResponse>), 107 108 /// Cannot bind to the UNIX domain socket path 109 #[error("Error binding to UNIX domain socket")] 110 Bind(#[source] io::Error), 111 112 /// Cannot clone EventFd. 113 #[error("Error cloning EventFd")] 114 EventFdClone(#[source] io::Error), 115 116 /// Cannot create EventFd. 117 #[error("Error creating EventFd")] 118 EventFdCreate(#[source] io::Error), 119 120 /// Cannot read from EventFd. 121 #[error("Error reading from EventFd")] 122 EventFdRead(#[source] io::Error), 123 124 /// Cannot create epoll context. 125 #[error("Error creating epoll context")] 126 Epoll(#[source] io::Error), 127 128 /// Cannot create HTTP thread 129 #[error("Error spawning HTTP thread")] 130 HttpThreadSpawn(#[source] io::Error), 131 132 /// Cannot create D-Bus thread 133 #[cfg(feature = "dbus_api")] 134 #[error("Error spawning D-Bus thread")] 135 DBusThreadSpawn(#[source] io::Error), 136 137 /// Cannot start D-Bus session 138 #[cfg(feature = "dbus_api")] 139 #[error("Error starting D-Bus session")] 140 CreateDBusSession(#[source] zbus::Error), 141 142 /// Cannot create `event-monitor` thread 143 #[error("Error spawning `event-monitor` thread")] 144 EventMonitorThreadSpawn(#[source] io::Error), 145 146 /// Cannot handle the VM STDIN stream 147 #[error("Error handling VM stdin")] 148 Stdin(#[source] VmError), 149 150 /// Cannot handle the VM pty stream 151 #[error("Error handling VM pty")] 152 Pty(#[source] VmError), 153 154 /// Cannot reboot the VM 155 #[error("Error rebooting VM")] 156 VmReboot(#[source] VmError), 157 158 /// Cannot create VMM thread 159 #[error("Error spawning VMM thread")] 160 VmmThreadSpawn(#[source] io::Error), 161 162 /// Cannot shut the VMM down 163 #[error("Error shutting down VMM")] 164 VmmShutdown(#[source] VmError), 165 166 /// Cannot create seccomp filter 167 #[error("Error creating seccomp filter")] 168 CreateSeccompFilter(#[source] seccompiler::Error), 169 170 /// Cannot apply seccomp filter 171 #[error("Error applying seccomp filter")] 172 ApplySeccompFilter(#[source] seccompiler::Error), 173 174 /// Error activating virtio devices 175 #[error("Error activating virtio devices")] 176 ActivateVirtioDevices(#[source] VmError), 177 178 /// Error creating API server 179 // TODO We should add #[source] here once the type implements Error. 180 // Then we also can remove the `: {}` to align with the other errors. 181 #[error("Error creating API server: {0}")] 182 CreateApiServer(micro_http::ServerError), 183 184 /// Error binding API server socket 185 #[error("Error creation API server's socket")] 186 CreateApiServerSocket(#[source] io::Error), 187 188 #[cfg(feature = "guest_debug")] 189 #[error("Failed to start the GDB thread")] 190 GdbThreadSpawn(#[source] io::Error), 191 192 /// GDB request receive error 193 #[cfg(feature = "guest_debug")] 194 #[error("Error receiving GDB request")] 195 GdbRequestRecv(#[source] RecvError), 196 197 /// GDB response send error 198 #[cfg(feature = "guest_debug")] 199 #[error("Error sending GDB request")] 200 GdbResponseSend(#[source] SendError<gdb::GdbResponse>), 201 202 #[error("Cannot spawn a signal handler thread")] 203 SignalHandlerSpawn(#[source] io::Error), 204 205 #[error("Failed to join on threads: {0:?}")] 206 ThreadCleanup(std::boxed::Box<dyn std::any::Any + std::marker::Send>), 207 208 /// Cannot create Landlock object 209 #[error("Error creating landlock object")] 210 CreateLandlock(#[source] LandlockError), 211 212 /// Cannot apply landlock based sandboxing 213 #[error("Error applying landlock")] 214 ApplyLandlock(#[source] LandlockError), 215 } 216 pub type Result<T> = result::Result<T, Error>; 217 218 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 219 #[repr(u64)] 220 pub enum EpollDispatch { 221 Exit = 0, 222 Reset = 1, 223 Api = 2, 224 ActivateVirtioDevices = 3, 225 Debug = 4, 226 Unknown, 227 } 228 229 impl From<u64> for EpollDispatch { 230 fn from(v: u64) -> Self { 231 use EpollDispatch::*; 232 match v { 233 0 => Exit, 234 1 => Reset, 235 2 => Api, 236 3 => ActivateVirtioDevices, 237 4 => Debug, 238 _ => Unknown, 239 } 240 } 241 } 242 243 enum SocketStream { 244 Unix(UnixStream), 245 Tcp(TcpStream), 246 } 247 248 impl Read for SocketStream { 249 fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { 250 match self { 251 SocketStream::Unix(stream) => stream.read(buf), 252 SocketStream::Tcp(stream) => stream.read(buf), 253 } 254 } 255 } 256 257 impl Write for SocketStream { 258 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { 259 match self { 260 SocketStream::Unix(stream) => stream.write(buf), 261 SocketStream::Tcp(stream) => stream.write(buf), 262 } 263 } 264 265 fn flush(&mut self) -> std::io::Result<()> { 266 match self { 267 SocketStream::Unix(stream) => stream.flush(), 268 SocketStream::Tcp(stream) => stream.flush(), 269 } 270 } 271 } 272 273 impl AsRawFd for SocketStream { 274 fn as_raw_fd(&self) -> RawFd { 275 match self { 276 SocketStream::Unix(s) => s.as_raw_fd(), 277 SocketStream::Tcp(s) => s.as_raw_fd(), 278 } 279 } 280 } 281 282 impl ReadVolatile for SocketStream { 283 fn read_volatile<B: BitmapSlice>( 284 &mut self, 285 buf: &mut VolatileSlice<B>, 286 ) -> std::result::Result<usize, VolatileMemoryError> { 287 match self { 288 SocketStream::Unix(s) => s.read_volatile(buf), 289 SocketStream::Tcp(s) => s.read_volatile(buf), 290 } 291 } 292 293 fn read_exact_volatile<B: BitmapSlice>( 294 &mut self, 295 buf: &mut VolatileSlice<B>, 296 ) -> std::result::Result<(), VolatileMemoryError> { 297 match self { 298 SocketStream::Unix(s) => s.read_exact_volatile(buf), 299 SocketStream::Tcp(s) => s.read_exact_volatile(buf), 300 } 301 } 302 } 303 304 impl WriteVolatile for SocketStream { 305 fn write_volatile<B: BitmapSlice>( 306 &mut self, 307 buf: &VolatileSlice<B>, 308 ) -> std::result::Result<usize, VolatileMemoryError> { 309 match self { 310 SocketStream::Unix(s) => s.write_volatile(buf), 311 SocketStream::Tcp(s) => s.write_volatile(buf), 312 } 313 } 314 315 fn write_all_volatile<B: BitmapSlice>( 316 &mut self, 317 buf: &VolatileSlice<B>, 318 ) -> std::result::Result<(), VolatileMemoryError> { 319 match self { 320 SocketStream::Unix(s) => s.write_all_volatile(buf), 321 SocketStream::Tcp(s) => s.write_all_volatile(buf), 322 } 323 } 324 } 325 326 pub struct EpollContext { 327 epoll_file: File, 328 } 329 330 impl EpollContext { 331 pub fn new() -> result::Result<EpollContext, io::Error> { 332 let epoll_fd = epoll::create(true)?; 333 // Use 'File' to enforce closing on 'epoll_fd' 334 // SAFETY: the epoll_fd returned by epoll::create is valid and owned by us. 335 let epoll_file = unsafe { File::from_raw_fd(epoll_fd) }; 336 337 Ok(EpollContext { epoll_file }) 338 } 339 340 pub fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error> 341 where 342 T: AsRawFd, 343 { 344 let dispatch_index = token as u64; 345 epoll::ctl( 346 self.epoll_file.as_raw_fd(), 347 epoll::ControlOptions::EPOLL_CTL_ADD, 348 fd.as_raw_fd(), 349 epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index), 350 )?; 351 352 Ok(()) 353 } 354 355 #[cfg(fuzzing)] 356 pub fn add_event_custom<T>( 357 &mut self, 358 fd: &T, 359 id: u64, 360 evts: epoll::Events, 361 ) -> result::Result<(), io::Error> 362 where 363 T: AsRawFd, 364 { 365 epoll::ctl( 366 self.epoll_file.as_raw_fd(), 367 epoll::ControlOptions::EPOLL_CTL_ADD, 368 fd.as_raw_fd(), 369 epoll::Event::new(evts, id), 370 )?; 371 372 Ok(()) 373 } 374 } 375 376 impl AsRawFd for EpollContext { 377 fn as_raw_fd(&self) -> RawFd { 378 self.epoll_file.as_raw_fd() 379 } 380 } 381 382 pub struct PciDeviceInfo { 383 pub id: String, 384 pub bdf: PciBdf, 385 } 386 387 impl Serialize for PciDeviceInfo { 388 fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> 389 where 390 S: Serializer, 391 { 392 let bdf_str = self.bdf.to_string(); 393 394 // Serialize the structure. 395 let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?; 396 state.serialize_field("id", &self.id)?; 397 state.serialize_field("bdf", &bdf_str)?; 398 state.end() 399 } 400 } 401 402 pub fn feature_list() -> Vec<String> { 403 vec![ 404 #[cfg(feature = "dbus_api")] 405 "dbus_api".to_string(), 406 #[cfg(feature = "dhat-heap")] 407 "dhat-heap".to_string(), 408 #[cfg(feature = "guest_debug")] 409 "guest_debug".to_string(), 410 #[cfg(feature = "igvm")] 411 "igvm".to_string(), 412 #[cfg(feature = "io_uring")] 413 "io_uring".to_string(), 414 #[cfg(feature = "kvm")] 415 "kvm".to_string(), 416 #[cfg(feature = "mshv")] 417 "mshv".to_string(), 418 #[cfg(feature = "sev_snp")] 419 "sev_snp".to_string(), 420 #[cfg(feature = "tdx")] 421 "tdx".to_string(), 422 #[cfg(feature = "tracing")] 423 "tracing".to_string(), 424 ] 425 } 426 427 pub fn start_event_monitor_thread( 428 mut monitor: event_monitor::Monitor, 429 seccomp_action: &SeccompAction, 430 landlock_enable: bool, 431 hypervisor_type: hypervisor::HypervisorType, 432 exit_event: EventFd, 433 ) -> Result<thread::JoinHandle<Result<()>>> { 434 // Retrieve seccomp filter 435 let seccomp_filter = get_seccomp_filter(seccomp_action, Thread::EventMonitor, hypervisor_type) 436 .map_err(Error::CreateSeccompFilter)?; 437 438 thread::Builder::new() 439 .name("event-monitor".to_owned()) 440 .spawn(move || { 441 // Apply seccomp filter 442 if !seccomp_filter.is_empty() { 443 apply_filter(&seccomp_filter) 444 .map_err(Error::ApplySeccompFilter) 445 .map_err(|e| { 446 error!("Error applying seccomp filter: {:?}", e); 447 exit_event.write(1).ok(); 448 e 449 })?; 450 } 451 if landlock_enable { 452 Landlock::new() 453 .map_err(Error::CreateLandlock)? 454 .restrict_self() 455 .map_err(Error::ApplyLandlock) 456 .map_err(|e| { 457 error!("Error applying landlock to event monitor thread: {:?}", e); 458 exit_event.write(1).ok(); 459 e 460 })?; 461 } 462 463 std::panic::catch_unwind(AssertUnwindSafe(move || { 464 while let Ok(event) = monitor.rx.recv() { 465 let event = Arc::new(event); 466 467 if let Some(ref mut file) = monitor.file { 468 file.write_all(event.as_bytes().as_ref()).ok(); 469 file.write_all(b"\n\n").ok(); 470 } 471 472 for tx in monitor.broadcast.iter() { 473 tx.send(event.clone()).ok(); 474 } 475 } 476 })) 477 .map_err(|_| { 478 error!("`event-monitor` thread panicked"); 479 exit_event.write(1).ok(); 480 }) 481 .ok(); 482 483 Ok(()) 484 }) 485 .map_err(Error::EventMonitorThreadSpawn) 486 } 487 488 #[allow(unused_variables)] 489 #[allow(clippy::too_many_arguments)] 490 pub fn start_vmm_thread( 491 vmm_version: VmmVersionInfo, 492 http_path: &Option<String>, 493 http_fd: Option<RawFd>, 494 #[cfg(feature = "dbus_api")] dbus_options: Option<DBusApiOptions>, 495 api_event: EventFd, 496 api_sender: Sender<ApiRequest>, 497 api_receiver: Receiver<ApiRequest>, 498 #[cfg(feature = "guest_debug")] debug_path: Option<PathBuf>, 499 #[cfg(feature = "guest_debug")] debug_event: EventFd, 500 #[cfg(feature = "guest_debug")] vm_debug_event: EventFd, 501 exit_event: EventFd, 502 seccomp_action: &SeccompAction, 503 hypervisor: Arc<dyn hypervisor::Hypervisor>, 504 landlock_enable: bool, 505 ) -> Result<VmmThreadHandle> { 506 #[cfg(feature = "guest_debug")] 507 let gdb_hw_breakpoints = hypervisor.get_guest_debug_hw_bps(); 508 #[cfg(feature = "guest_debug")] 509 let (gdb_sender, gdb_receiver) = std::sync::mpsc::channel(); 510 #[cfg(feature = "guest_debug")] 511 let gdb_debug_event = debug_event.try_clone().map_err(Error::EventFdClone)?; 512 #[cfg(feature = "guest_debug")] 513 let gdb_vm_debug_event = vm_debug_event.try_clone().map_err(Error::EventFdClone)?; 514 515 let api_event_clone = api_event.try_clone().map_err(Error::EventFdClone)?; 516 let hypervisor_type = hypervisor.hypervisor_type(); 517 518 // Retrieve seccomp filter 519 let vmm_seccomp_filter = get_seccomp_filter(seccomp_action, Thread::Vmm, hypervisor_type) 520 .map_err(Error::CreateSeccompFilter)?; 521 522 let vmm_seccomp_action = seccomp_action.clone(); 523 let thread = { 524 let exit_event = exit_event.try_clone().map_err(Error::EventFdClone)?; 525 thread::Builder::new() 526 .name("vmm".to_string()) 527 .spawn(move || { 528 // Apply seccomp filter for VMM thread. 529 if !vmm_seccomp_filter.is_empty() { 530 apply_filter(&vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?; 531 } 532 533 let mut vmm = Vmm::new( 534 vmm_version, 535 api_event, 536 #[cfg(feature = "guest_debug")] 537 debug_event, 538 #[cfg(feature = "guest_debug")] 539 vm_debug_event, 540 vmm_seccomp_action, 541 hypervisor, 542 exit_event, 543 )?; 544 545 vmm.setup_signal_handler(landlock_enable)?; 546 547 vmm.control_loop( 548 Rc::new(api_receiver), 549 #[cfg(feature = "guest_debug")] 550 Rc::new(gdb_receiver), 551 ) 552 }) 553 .map_err(Error::VmmThreadSpawn)? 554 }; 555 556 // The VMM thread is started, we can start the dbus thread 557 // and start serving HTTP requests 558 #[cfg(feature = "dbus_api")] 559 let dbus_shutdown_chs = match dbus_options { 560 Some(opts) => { 561 let (_, chs) = api::start_dbus_thread( 562 opts, 563 api_event_clone.try_clone().map_err(Error::EventFdClone)?, 564 api_sender.clone(), 565 seccomp_action, 566 exit_event.try_clone().map_err(Error::EventFdClone)?, 567 hypervisor_type, 568 )?; 569 Some(chs) 570 } 571 None => None, 572 }; 573 574 let http_api_handle = if let Some(http_path) = http_path { 575 Some(api::start_http_path_thread( 576 http_path, 577 api_event_clone, 578 api_sender, 579 seccomp_action, 580 exit_event, 581 hypervisor_type, 582 landlock_enable, 583 )?) 584 } else if let Some(http_fd) = http_fd { 585 Some(api::start_http_fd_thread( 586 http_fd, 587 api_event_clone, 588 api_sender, 589 seccomp_action, 590 exit_event, 591 hypervisor_type, 592 landlock_enable, 593 )?) 594 } else { 595 None 596 }; 597 598 #[cfg(feature = "guest_debug")] 599 if let Some(debug_path) = debug_path { 600 let target = gdb::GdbStub::new( 601 gdb_sender, 602 gdb_debug_event, 603 gdb_vm_debug_event, 604 gdb_hw_breakpoints, 605 ); 606 thread::Builder::new() 607 .name("gdb".to_owned()) 608 .spawn(move || gdb::gdb_thread(target, &debug_path)) 609 .map_err(Error::GdbThreadSpawn)?; 610 } 611 612 Ok(VmmThreadHandle { 613 thread_handle: thread, 614 #[cfg(feature = "dbus_api")] 615 dbus_shutdown_chs, 616 http_api_handle, 617 }) 618 } 619 620 #[derive(Clone, Deserialize, Serialize)] 621 struct VmMigrationConfig { 622 vm_config: Arc<Mutex<VmConfig>>, 623 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 624 common_cpuid: Vec<hypervisor::arch::x86::CpuIdEntry>, 625 memory_manager_data: MemoryManagerSnapshotData, 626 } 627 628 #[derive(Debug, Clone)] 629 pub struct VmmVersionInfo { 630 pub build_version: String, 631 pub version: String, 632 } 633 634 impl VmmVersionInfo { 635 pub fn new(build_version: &str, version: &str) -> Self { 636 Self { 637 build_version: build_version.to_owned(), 638 version: version.to_owned(), 639 } 640 } 641 } 642 643 pub struct VmmThreadHandle { 644 pub thread_handle: thread::JoinHandle<Result<()>>, 645 #[cfg(feature = "dbus_api")] 646 pub dbus_shutdown_chs: Option<DBusApiShutdownChannels>, 647 pub http_api_handle: Option<HttpApiHandle>, 648 } 649 650 pub struct Vmm { 651 epoll: EpollContext, 652 exit_evt: EventFd, 653 reset_evt: EventFd, 654 api_evt: EventFd, 655 #[cfg(feature = "guest_debug")] 656 debug_evt: EventFd, 657 #[cfg(feature = "guest_debug")] 658 vm_debug_evt: EventFd, 659 version: VmmVersionInfo, 660 vm: Option<Vm>, 661 vm_config: Option<Arc<Mutex<VmConfig>>>, 662 seccomp_action: SeccompAction, 663 hypervisor: Arc<dyn hypervisor::Hypervisor>, 664 activate_evt: EventFd, 665 signals: Option<Handle>, 666 threads: Vec<thread::JoinHandle<()>>, 667 original_termios_opt: Arc<Mutex<Option<termios>>>, 668 console_resize_pipe: Option<Arc<File>>, 669 console_info: Option<ConsoleInfo>, 670 } 671 672 impl Vmm { 673 pub const HANDLED_SIGNALS: [i32; 2] = [SIGTERM, SIGINT]; 674 675 fn signal_handler( 676 mut signals: Signals, 677 original_termios_opt: Arc<Mutex<Option<termios>>>, 678 exit_evt: &EventFd, 679 ) { 680 for sig in &Self::HANDLED_SIGNALS { 681 unblock_signal(*sig).unwrap(); 682 } 683 684 for signal in signals.forever() { 685 match signal { 686 SIGTERM | SIGINT => { 687 if exit_evt.write(1).is_err() { 688 // Resetting the terminal is usually done as the VMM exits 689 if let Ok(lock) = original_termios_opt.lock() { 690 if let Some(termios) = *lock { 691 // SAFETY: FFI call 692 let _ = unsafe { 693 tcsetattr(stdout().lock().as_raw_fd(), TCSANOW, &termios) 694 }; 695 } 696 } else { 697 warn!("Failed to lock original termios"); 698 } 699 700 std::process::exit(1); 701 } 702 } 703 _ => (), 704 } 705 } 706 } 707 708 fn setup_signal_handler(&mut self, landlock_enable: bool) -> Result<()> { 709 let signals = Signals::new(Self::HANDLED_SIGNALS); 710 match signals { 711 Ok(signals) => { 712 self.signals = Some(signals.handle()); 713 let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?; 714 let original_termios_opt = Arc::clone(&self.original_termios_opt); 715 716 let signal_handler_seccomp_filter = get_seccomp_filter( 717 &self.seccomp_action, 718 Thread::SignalHandler, 719 self.hypervisor.hypervisor_type(), 720 ) 721 .map_err(Error::CreateSeccompFilter)?; 722 self.threads.push( 723 thread::Builder::new() 724 .name("vmm_signal_handler".to_string()) 725 .spawn(move || { 726 if !signal_handler_seccomp_filter.is_empty() { 727 if let Err(e) = apply_filter(&signal_handler_seccomp_filter) 728 .map_err(Error::ApplySeccompFilter) 729 { 730 error!("Error applying seccomp filter: {:?}", e); 731 exit_evt.write(1).ok(); 732 return; 733 } 734 } 735 if landlock_enable{ 736 match Landlock::new() { 737 Ok(landlock) => { 738 let _ = landlock.restrict_self().map_err(Error::ApplyLandlock).map_err(|e| { 739 error!("Error applying Landlock to signal handler thread: {:?}", e); 740 exit_evt.write(1).ok(); 741 }); 742 } 743 Err(e) => { 744 error!("Error creating Landlock object: {:?}", e); 745 exit_evt.write(1).ok(); 746 } 747 }; 748 } 749 750 std::panic::catch_unwind(AssertUnwindSafe(|| { 751 Vmm::signal_handler(signals, original_termios_opt, &exit_evt); 752 })) 753 .map_err(|_| { 754 error!("vmm signal_handler thread panicked"); 755 exit_evt.write(1).ok() 756 }) 757 .ok(); 758 }) 759 .map_err(Error::SignalHandlerSpawn)?, 760 ); 761 } 762 Err(e) => error!("Signal not found {}", e), 763 } 764 Ok(()) 765 } 766 767 #[allow(clippy::too_many_arguments)] 768 fn new( 769 vmm_version: VmmVersionInfo, 770 api_evt: EventFd, 771 #[cfg(feature = "guest_debug")] debug_evt: EventFd, 772 #[cfg(feature = "guest_debug")] vm_debug_evt: EventFd, 773 seccomp_action: SeccompAction, 774 hypervisor: Arc<dyn hypervisor::Hypervisor>, 775 exit_evt: EventFd, 776 ) -> Result<Self> { 777 let mut epoll = EpollContext::new().map_err(Error::Epoll)?; 778 let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 779 let activate_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 780 781 epoll 782 .add_event(&exit_evt, EpollDispatch::Exit) 783 .map_err(Error::Epoll)?; 784 785 epoll 786 .add_event(&reset_evt, EpollDispatch::Reset) 787 .map_err(Error::Epoll)?; 788 789 epoll 790 .add_event(&activate_evt, EpollDispatch::ActivateVirtioDevices) 791 .map_err(Error::Epoll)?; 792 793 epoll 794 .add_event(&api_evt, EpollDispatch::Api) 795 .map_err(Error::Epoll)?; 796 797 #[cfg(feature = "guest_debug")] 798 epoll 799 .add_event(&debug_evt, EpollDispatch::Debug) 800 .map_err(Error::Epoll)?; 801 802 Ok(Vmm { 803 epoll, 804 exit_evt, 805 reset_evt, 806 api_evt, 807 #[cfg(feature = "guest_debug")] 808 debug_evt, 809 #[cfg(feature = "guest_debug")] 810 vm_debug_evt, 811 version: vmm_version, 812 vm: None, 813 vm_config: None, 814 seccomp_action, 815 hypervisor, 816 activate_evt, 817 signals: None, 818 threads: vec![], 819 original_termios_opt: Arc::new(Mutex::new(None)), 820 console_resize_pipe: None, 821 console_info: None, 822 }) 823 } 824 825 fn vm_receive_config<T>( 826 &mut self, 827 req: &Request, 828 socket: &mut T, 829 existing_memory_files: Option<HashMap<u32, File>>, 830 ) -> std::result::Result<Arc<Mutex<MemoryManager>>, MigratableError> 831 where 832 T: Read + Write, 833 { 834 // Read in config data along with memory manager data 835 let mut data: Vec<u8> = Vec::new(); 836 data.resize_with(req.length() as usize, Default::default); 837 socket 838 .read_exact(&mut data) 839 .map_err(MigratableError::MigrateSocket)?; 840 841 let vm_migration_config: VmMigrationConfig = 842 serde_json::from_slice(&data).map_err(|e| { 843 MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e)) 844 })?; 845 846 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 847 self.vm_check_cpuid_compatibility( 848 &vm_migration_config.vm_config, 849 &vm_migration_config.common_cpuid, 850 )?; 851 852 let config = vm_migration_config.vm_config.clone(); 853 self.vm_config = Some(vm_migration_config.vm_config); 854 self.console_info = Some(pre_create_console_devices(self).map_err(|e| { 855 MigratableError::MigrateReceive(anyhow!("Error creating console devices: {:?}", e)) 856 })?); 857 858 if self 859 .vm_config 860 .as_ref() 861 .unwrap() 862 .lock() 863 .unwrap() 864 .landlock_enable 865 { 866 apply_landlock(self.vm_config.as_ref().unwrap().clone()).map_err(|e| { 867 MigratableError::MigrateReceive(anyhow!("Error applying landlock: {:?}", e)) 868 })?; 869 } 870 871 let vm = Vm::create_hypervisor_vm( 872 &self.hypervisor, 873 #[cfg(feature = "tdx")] 874 false, 875 #[cfg(feature = "sev_snp")] 876 false, 877 #[cfg(feature = "sev_snp")] 878 config.lock().unwrap().memory.total_size(), 879 ) 880 .map_err(|e| { 881 MigratableError::MigrateReceive(anyhow!( 882 "Error creating hypervisor VM from snapshot: {:?}", 883 e 884 )) 885 })?; 886 887 let phys_bits = 888 vm::physical_bits(&self.hypervisor, config.lock().unwrap().cpus.max_phys_bits); 889 890 let memory_manager = MemoryManager::new( 891 vm, 892 &config.lock().unwrap().memory.clone(), 893 None, 894 phys_bits, 895 #[cfg(feature = "tdx")] 896 false, 897 Some(&vm_migration_config.memory_manager_data), 898 existing_memory_files, 899 #[cfg(target_arch = "x86_64")] 900 None, 901 ) 902 .map_err(|e| { 903 MigratableError::MigrateReceive(anyhow!( 904 "Error creating MemoryManager from snapshot: {:?}", 905 e 906 )) 907 })?; 908 909 Response::ok().write_to(socket)?; 910 911 Ok(memory_manager) 912 } 913 914 fn vm_receive_state<T>( 915 &mut self, 916 req: &Request, 917 socket: &mut T, 918 mm: Arc<Mutex<MemoryManager>>, 919 ) -> std::result::Result<(), MigratableError> 920 where 921 T: Read + Write, 922 { 923 // Read in state data 924 let mut data: Vec<u8> = Vec::new(); 925 data.resize_with(req.length() as usize, Default::default); 926 socket 927 .read_exact(&mut data) 928 .map_err(MigratableError::MigrateSocket)?; 929 let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| { 930 MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e)) 931 })?; 932 933 let exit_evt = self.exit_evt.try_clone().map_err(|e| { 934 MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e)) 935 })?; 936 let reset_evt = self.reset_evt.try_clone().map_err(|e| { 937 MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e)) 938 })?; 939 #[cfg(feature = "guest_debug")] 940 let debug_evt = self.vm_debug_evt.try_clone().map_err(|e| { 941 MigratableError::MigrateReceive(anyhow!("Error cloning debug EventFd: {}", e)) 942 })?; 943 let activate_evt = self.activate_evt.try_clone().map_err(|e| { 944 MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e)) 945 })?; 946 947 #[cfg(not(target_arch = "riscv64"))] 948 let timestamp = Instant::now(); 949 let hypervisor_vm = mm.lock().unwrap().vm.clone(); 950 let mut vm = Vm::new_from_memory_manager( 951 self.vm_config.clone().unwrap(), 952 mm, 953 hypervisor_vm, 954 exit_evt, 955 reset_evt, 956 #[cfg(feature = "guest_debug")] 957 debug_evt, 958 &self.seccomp_action, 959 self.hypervisor.clone(), 960 activate_evt, 961 #[cfg(not(target_arch = "riscv64"))] 962 timestamp, 963 self.console_info.clone(), 964 self.console_resize_pipe.clone(), 965 Arc::clone(&self.original_termios_opt), 966 Some(snapshot), 967 ) 968 .map_err(|e| { 969 MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e)) 970 })?; 971 972 // Create VM 973 vm.restore().map_err(|e| { 974 Response::error().write_to(socket).ok(); 975 MigratableError::MigrateReceive(anyhow!("Failed restoring the Vm: {}", e)) 976 })?; 977 self.vm = Some(vm); 978 979 Response::ok().write_to(socket)?; 980 981 Ok(()) 982 } 983 984 fn vm_receive_memory<T>( 985 &mut self, 986 req: &Request, 987 socket: &mut T, 988 memory_manager: &mut MemoryManager, 989 ) -> std::result::Result<(), MigratableError> 990 where 991 T: Read + ReadVolatile + Write, 992 { 993 // Read table 994 let table = MemoryRangeTable::read_from(socket, req.length())?; 995 996 // And then read the memory itself 997 memory_manager 998 .receive_memory_regions(&table, socket) 999 .inspect_err(|_| { 1000 Response::error().write_to(socket).ok(); 1001 })?; 1002 Response::ok().write_to(socket)?; 1003 Ok(()) 1004 } 1005 1006 fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> { 1007 url.strip_prefix("unix:") 1008 .ok_or_else(|| { 1009 MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url)) 1010 }) 1011 .map(|s| s.into()) 1012 } 1013 1014 fn send_migration_socket( 1015 destination_url: &str, 1016 ) -> std::result::Result<SocketStream, MigratableError> { 1017 if let Some(address) = destination_url.strip_prefix("tcp:") { 1018 info!("Connecting to TCP socket at {}", address); 1019 1020 let socket = TcpStream::connect(address).map_err(|e| { 1021 MigratableError::MigrateSend(anyhow!("Error connecting to TCP socket: {}", e)) 1022 })?; 1023 1024 Ok(SocketStream::Tcp(socket)) 1025 } else { 1026 let path = Vmm::socket_url_to_path(destination_url)?; 1027 info!("Connecting to UNIX socket at {:?}", path); 1028 1029 let socket = UnixStream::connect(&path).map_err(|e| { 1030 MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e)) 1031 })?; 1032 1033 Ok(SocketStream::Unix(socket)) 1034 } 1035 } 1036 1037 fn receive_migration_socket( 1038 receiver_url: &str, 1039 ) -> std::result::Result<SocketStream, MigratableError> { 1040 if let Some(address) = receiver_url.strip_prefix("tcp:") { 1041 let listener = TcpListener::bind(address).map_err(|e| { 1042 MigratableError::MigrateReceive(anyhow!("Error binding to TCP socket: {}", e)) 1043 })?; 1044 1045 let (socket, _addr) = listener.accept().map_err(|e| { 1046 MigratableError::MigrateReceive(anyhow!( 1047 "Error accepting connection on TCP socket: {}", 1048 e 1049 )) 1050 })?; 1051 1052 Ok(SocketStream::Tcp(socket)) 1053 } else { 1054 let path = Vmm::socket_url_to_path(receiver_url)?; 1055 let listener = UnixListener::bind(&path).map_err(|e| { 1056 MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e)) 1057 })?; 1058 1059 let (socket, _addr) = listener.accept().map_err(|e| { 1060 MigratableError::MigrateReceive(anyhow!( 1061 "Error accepting connection on UNIX socket: {}", 1062 e 1063 )) 1064 })?; 1065 1066 // Remove the UNIX socket file after accepting the connection 1067 std::fs::remove_file(&path).map_err(|e| { 1068 MigratableError::MigrateReceive(anyhow!("Error removing UNIX socket file: {}", e)) 1069 })?; 1070 1071 Ok(SocketStream::Unix(socket)) 1072 } 1073 } 1074 1075 // Returns true if there were dirty pages to send 1076 fn vm_maybe_send_dirty_pages( 1077 vm: &mut Vm, 1078 socket: &mut SocketStream, 1079 ) -> result::Result<bool, MigratableError> { 1080 // Send (dirty) memory table 1081 let table = vm.dirty_log()?; 1082 1083 // But if there are no regions go straight to pause 1084 if table.regions().is_empty() { 1085 return Ok(false); 1086 } 1087 1088 Request::memory(table.length()).write_to(socket).unwrap(); 1089 table.write_to(socket)?; 1090 // And then the memory itself 1091 vm.send_memory_regions(&table, socket)?; 1092 Response::read_from(socket)?.ok_or_abandon( 1093 socket, 1094 MigratableError::MigrateSend(anyhow!("Error during dirty memory migration")), 1095 )?; 1096 1097 Ok(true) 1098 } 1099 1100 fn send_migration( 1101 vm: &mut Vm, 1102 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc< 1103 dyn hypervisor::Hypervisor, 1104 >, 1105 send_data_migration: VmSendMigrationData, 1106 ) -> result::Result<(), MigratableError> { 1107 // Set up the socket connection 1108 let mut socket = Self::send_migration_socket(&send_data_migration.destination_url)?; 1109 1110 // Start the migration 1111 Request::start().write_to(&mut socket)?; 1112 Response::read_from(&mut socket)?.ok_or_abandon( 1113 &mut socket, 1114 MigratableError::MigrateSend(anyhow!("Error starting migration")), 1115 )?; 1116 1117 // Send config 1118 let vm_config = vm.get_config(); 1119 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1120 let common_cpuid = { 1121 #[cfg(feature = "tdx")] 1122 if vm_config.lock().unwrap().is_tdx_enabled() { 1123 return Err(MigratableError::MigrateSend(anyhow!( 1124 "Live Migration is not supported when TDX is enabled" 1125 ))); 1126 }; 1127 1128 let amx = vm_config.lock().unwrap().cpus.features.amx; 1129 let phys_bits = 1130 vm::physical_bits(&hypervisor, vm_config.lock().unwrap().cpus.max_phys_bits); 1131 arch::generate_common_cpuid( 1132 &hypervisor, 1133 &arch::CpuidConfig { 1134 sgx_epc_sections: None, 1135 phys_bits, 1136 kvm_hyperv: vm_config.lock().unwrap().cpus.kvm_hyperv, 1137 #[cfg(feature = "tdx")] 1138 tdx: false, 1139 amx, 1140 }, 1141 ) 1142 .map_err(|e| { 1143 MigratableError::MigrateSend(anyhow!("Error generating common cpuid': {:?}", e)) 1144 })? 1145 }; 1146 1147 if send_data_migration.local { 1148 match &mut socket { 1149 SocketStream::Unix(unix_socket) => { 1150 // Proceed with sending memory file descriptors over UNIX socket 1151 vm.send_memory_fds(unix_socket)?; 1152 } 1153 SocketStream::Tcp(_tcp_socket) => { 1154 return Err(MigratableError::MigrateSend(anyhow!( 1155 "--local option is not supported with TCP sockets", 1156 ))); 1157 } 1158 } 1159 } 1160 1161 let vm_migration_config = VmMigrationConfig { 1162 vm_config, 1163 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1164 common_cpuid, 1165 memory_manager_data: vm.memory_manager_data(), 1166 }; 1167 let config_data = serde_json::to_vec(&vm_migration_config).unwrap(); 1168 Request::config(config_data.len() as u64).write_to(&mut socket)?; 1169 socket 1170 .write_all(&config_data) 1171 .map_err(MigratableError::MigrateSocket)?; 1172 Response::read_from(&mut socket)?.ok_or_abandon( 1173 &mut socket, 1174 MigratableError::MigrateSend(anyhow!("Error during config migration")), 1175 )?; 1176 1177 // Let every Migratable object know about the migration being started. 1178 vm.start_migration()?; 1179 1180 if send_data_migration.local { 1181 // Now pause VM 1182 vm.pause()?; 1183 } else { 1184 // Start logging dirty pages 1185 vm.start_dirty_log()?; 1186 1187 // Send memory table 1188 let table = vm.memory_range_table()?; 1189 Request::memory(table.length()) 1190 .write_to(&mut socket) 1191 .unwrap(); 1192 table.write_to(&mut socket)?; 1193 // And then the memory itself 1194 vm.send_memory_regions(&table, &mut socket)?; 1195 Response::read_from(&mut socket)?.ok_or_abandon( 1196 &mut socket, 1197 MigratableError::MigrateSend(anyhow!("Error during dirty memory migration")), 1198 )?; 1199 1200 // Try at most 5 passes of dirty memory sending 1201 const MAX_DIRTY_MIGRATIONS: usize = 5; 1202 for i in 0..MAX_DIRTY_MIGRATIONS { 1203 info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS); 1204 if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? { 1205 break; 1206 } 1207 } 1208 1209 // Now pause VM 1210 vm.pause()?; 1211 1212 // Send last batch of dirty pages 1213 Self::vm_maybe_send_dirty_pages(vm, &mut socket)?; 1214 } 1215 1216 // We release the locks early to enable locking them on the destination host. 1217 // The VM is already stopped. 1218 vm.release_disk_locks() 1219 .map_err(|e| MigratableError::UnlockError(anyhow!("{e}")))?; 1220 1221 // Capture snapshot and send it 1222 let vm_snapshot = vm.snapshot()?; 1223 let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap(); 1224 Request::state(snapshot_data.len() as u64).write_to(&mut socket)?; 1225 socket 1226 .write_all(&snapshot_data) 1227 .map_err(MigratableError::MigrateSocket)?; 1228 Response::read_from(&mut socket)?.ok_or_abandon( 1229 &mut socket, 1230 MigratableError::MigrateSend(anyhow!("Error during state migration")), 1231 )?; 1232 // Complete the migration 1233 // At this step, the receiving VMM will acquire disk locks again. 1234 Request::complete().write_to(&mut socket)?; 1235 Response::read_from(&mut socket)?.ok_or_abandon( 1236 &mut socket, 1237 MigratableError::MigrateSend(anyhow!("Error completing migration")), 1238 )?; 1239 1240 // Stop logging dirty pages 1241 if !send_data_migration.local { 1242 vm.stop_dirty_log()?; 1243 } 1244 1245 info!("Migration complete"); 1246 1247 // Let every Migratable object know about the migration being complete 1248 vm.complete_migration() 1249 } 1250 1251 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1252 fn vm_check_cpuid_compatibility( 1253 &self, 1254 src_vm_config: &Arc<Mutex<VmConfig>>, 1255 src_vm_cpuid: &[hypervisor::arch::x86::CpuIdEntry], 1256 ) -> result::Result<(), MigratableError> { 1257 #[cfg(feature = "tdx")] 1258 if src_vm_config.lock().unwrap().is_tdx_enabled() { 1259 return Err(MigratableError::MigrateReceive(anyhow!( 1260 "Live Migration is not supported when TDX is enabled" 1261 ))); 1262 }; 1263 1264 // We check the `CPUID` compatibility of between the source vm and destination, which is 1265 // mostly about feature compatibility and "topology/sgx" leaves are not relevant. 1266 let dest_cpuid = &{ 1267 let vm_config = &src_vm_config.lock().unwrap(); 1268 1269 let phys_bits = vm::physical_bits(&self.hypervisor, vm_config.cpus.max_phys_bits); 1270 arch::generate_common_cpuid( 1271 &self.hypervisor.clone(), 1272 &arch::CpuidConfig { 1273 sgx_epc_sections: None, 1274 phys_bits, 1275 kvm_hyperv: vm_config.cpus.kvm_hyperv, 1276 #[cfg(feature = "tdx")] 1277 tdx: false, 1278 amx: vm_config.cpus.features.amx, 1279 }, 1280 ) 1281 .map_err(|e| { 1282 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e)) 1283 })? 1284 }; 1285 arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| { 1286 MigratableError::MigrateReceive(anyhow!( 1287 "Error checking cpu feature compatibility': {:?}", 1288 e 1289 )) 1290 }) 1291 } 1292 1293 fn vm_restore( 1294 &mut self, 1295 source_url: &str, 1296 vm_config: Arc<Mutex<VmConfig>>, 1297 prefault: bool, 1298 ) -> std::result::Result<(), VmError> { 1299 let snapshot = recv_vm_state(source_url).map_err(VmError::Restore)?; 1300 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1301 let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?; 1302 1303 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1304 self.vm_check_cpuid_compatibility(&vm_config, &vm_snapshot.common_cpuid) 1305 .map_err(VmError::Restore)?; 1306 1307 self.vm_config = Some(Arc::clone(&vm_config)); 1308 1309 // Always re-populate the 'console_info' based on the new 'vm_config' 1310 self.console_info = 1311 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1312 1313 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1314 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1315 #[cfg(feature = "guest_debug")] 1316 let debug_evt = self 1317 .vm_debug_evt 1318 .try_clone() 1319 .map_err(VmError::EventFdClone)?; 1320 let activate_evt = self 1321 .activate_evt 1322 .try_clone() 1323 .map_err(VmError::EventFdClone)?; 1324 1325 let vm = Vm::new( 1326 vm_config, 1327 exit_evt, 1328 reset_evt, 1329 #[cfg(feature = "guest_debug")] 1330 debug_evt, 1331 &self.seccomp_action, 1332 self.hypervisor.clone(), 1333 activate_evt, 1334 self.console_info.clone(), 1335 self.console_resize_pipe.clone(), 1336 Arc::clone(&self.original_termios_opt), 1337 Some(snapshot), 1338 Some(source_url), 1339 Some(prefault), 1340 )?; 1341 self.vm = Some(vm); 1342 1343 if self 1344 .vm_config 1345 .as_ref() 1346 .unwrap() 1347 .lock() 1348 .unwrap() 1349 .landlock_enable 1350 { 1351 apply_landlock(self.vm_config.as_ref().unwrap().clone()) 1352 .map_err(VmError::ApplyLandlock)?; 1353 } 1354 1355 // Now we can restore the rest of the VM. 1356 if let Some(ref mut vm) = self.vm { 1357 vm.restore() 1358 } else { 1359 Err(VmError::VmNotCreated) 1360 } 1361 } 1362 1363 fn control_loop( 1364 &mut self, 1365 api_receiver: Rc<Receiver<ApiRequest>>, 1366 #[cfg(feature = "guest_debug")] gdb_receiver: Rc<Receiver<gdb::GdbRequest>>, 1367 ) -> Result<()> { 1368 const EPOLL_EVENTS_LEN: usize = 100; 1369 1370 let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN]; 1371 let epoll_fd = self.epoll.as_raw_fd(); 1372 1373 'outer: loop { 1374 let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { 1375 Ok(res) => res, 1376 Err(e) => { 1377 if e.kind() == io::ErrorKind::Interrupted { 1378 // It's well defined from the epoll_wait() syscall 1379 // documentation that the epoll loop can be interrupted 1380 // before any of the requested events occurred or the 1381 // timeout expired. In both those cases, epoll_wait() 1382 // returns an error of type EINTR, but this should not 1383 // be considered as a regular error. Instead it is more 1384 // appropriate to retry, by calling into epoll_wait(). 1385 continue; 1386 } 1387 return Err(Error::Epoll(e)); 1388 } 1389 }; 1390 1391 for event in events.iter().take(num_events) { 1392 let dispatch_event: EpollDispatch = event.data.into(); 1393 match dispatch_event { 1394 EpollDispatch::Unknown => { 1395 let event = event.data; 1396 warn!("Unknown VMM loop event: {}", event); 1397 } 1398 EpollDispatch::Exit => { 1399 info!("VM exit event"); 1400 // Consume the event. 1401 self.exit_evt.read().map_err(Error::EventFdRead)?; 1402 self.vmm_shutdown().map_err(Error::VmmShutdown)?; 1403 1404 break 'outer; 1405 } 1406 EpollDispatch::Reset => { 1407 info!("VM reset event"); 1408 // Consume the event. 1409 self.reset_evt.read().map_err(Error::EventFdRead)?; 1410 self.vm_reboot().map_err(Error::VmReboot)?; 1411 } 1412 EpollDispatch::ActivateVirtioDevices => { 1413 if let Some(ref vm) = self.vm { 1414 let count = self.activate_evt.read().map_err(Error::EventFdRead)?; 1415 info!( 1416 "Trying to activate pending virtio devices: count = {}", 1417 count 1418 ); 1419 vm.activate_virtio_devices() 1420 .map_err(Error::ActivateVirtioDevices)?; 1421 } 1422 } 1423 EpollDispatch::Api => { 1424 // Consume the events. 1425 for _ in 0..self.api_evt.read().map_err(Error::EventFdRead)? { 1426 // Read from the API receiver channel 1427 let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?; 1428 1429 if api_request(self)? { 1430 break 'outer; 1431 } 1432 } 1433 } 1434 #[cfg(feature = "guest_debug")] 1435 EpollDispatch::Debug => { 1436 // Consume the events. 1437 for _ in 0..self.debug_evt.read().map_err(Error::EventFdRead)? { 1438 // Read from the API receiver channel 1439 let gdb_request = gdb_receiver.recv().map_err(Error::GdbRequestRecv)?; 1440 1441 let response = if let Some(ref mut vm) = self.vm { 1442 vm.debug_request(&gdb_request.payload, gdb_request.cpu_id) 1443 } else { 1444 Err(VmError::VmNotRunning) 1445 } 1446 .map_err(gdb::Error::Vm); 1447 1448 gdb_request 1449 .sender 1450 .send(response) 1451 .map_err(Error::GdbResponseSend)?; 1452 } 1453 } 1454 #[cfg(not(feature = "guest_debug"))] 1455 EpollDispatch::Debug => {} 1456 } 1457 } 1458 } 1459 1460 // Trigger the termination of the signal_handler thread 1461 if let Some(signals) = self.signals.take() { 1462 signals.close(); 1463 } 1464 1465 // Wait for all the threads to finish 1466 for thread in self.threads.drain(..) { 1467 thread.join().map_err(Error::ThreadCleanup)? 1468 } 1469 1470 Ok(()) 1471 } 1472 } 1473 1474 fn apply_landlock(vm_config: Arc<Mutex<VmConfig>>) -> result::Result<(), LandlockError> { 1475 vm_config.lock().unwrap().apply_landlock()?; 1476 Ok(()) 1477 } 1478 1479 impl RequestHandler for Vmm { 1480 fn vm_create(&mut self, config: Box<VmConfig>) -> result::Result<(), VmError> { 1481 // We only store the passed VM config. 1482 // The VM will be created when being asked to boot it. 1483 if self.vm_config.is_none() { 1484 self.vm_config = Some(Arc::new(Mutex::new(*config))); 1485 self.console_info = 1486 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1487 1488 if self 1489 .vm_config 1490 .as_ref() 1491 .unwrap() 1492 .lock() 1493 .unwrap() 1494 .landlock_enable 1495 { 1496 apply_landlock(self.vm_config.as_ref().unwrap().clone()) 1497 .map_err(VmError::ApplyLandlock)?; 1498 } 1499 Ok(()) 1500 } else { 1501 Err(VmError::VmAlreadyCreated) 1502 } 1503 } 1504 1505 fn vm_boot(&mut self) -> result::Result<(), VmError> { 1506 tracer::start(); 1507 info!("Booting VM"); 1508 event!("vm", "booting"); 1509 let r = { 1510 trace_scoped!("vm_boot"); 1511 // If we don't have a config, we cannot boot a VM. 1512 if self.vm_config.is_none() { 1513 return Err(VmError::VmMissingConfig); 1514 }; 1515 1516 // console_info is set to None in vm_shutdown. re-populate here if empty 1517 if self.console_info.is_none() { 1518 self.console_info = 1519 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1520 } 1521 1522 // Create a new VM if we don't have one yet. 1523 if self.vm.is_none() { 1524 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1525 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1526 #[cfg(feature = "guest_debug")] 1527 let vm_debug_evt = self 1528 .vm_debug_evt 1529 .try_clone() 1530 .map_err(VmError::EventFdClone)?; 1531 let activate_evt = self 1532 .activate_evt 1533 .try_clone() 1534 .map_err(VmError::EventFdClone)?; 1535 1536 if let Some(ref vm_config) = self.vm_config { 1537 let vm = Vm::new( 1538 Arc::clone(vm_config), 1539 exit_evt, 1540 reset_evt, 1541 #[cfg(feature = "guest_debug")] 1542 vm_debug_evt, 1543 &self.seccomp_action, 1544 self.hypervisor.clone(), 1545 activate_evt, 1546 self.console_info.clone(), 1547 self.console_resize_pipe.clone(), 1548 Arc::clone(&self.original_termios_opt), 1549 None, 1550 None, 1551 None, 1552 )?; 1553 1554 self.vm = Some(vm); 1555 } 1556 } 1557 1558 // Now we can boot the VM. 1559 if let Some(ref mut vm) = self.vm { 1560 vm.boot() 1561 } else { 1562 Err(VmError::VmNotCreated) 1563 } 1564 }; 1565 tracer::end(); 1566 if r.is_ok() { 1567 event!("vm", "booted"); 1568 } 1569 r 1570 } 1571 1572 fn vm_pause(&mut self) -> result::Result<(), VmError> { 1573 if let Some(ref mut vm) = self.vm { 1574 vm.pause().map_err(VmError::Pause) 1575 } else { 1576 Err(VmError::VmNotRunning) 1577 } 1578 } 1579 1580 fn vm_resume(&mut self) -> result::Result<(), VmError> { 1581 if let Some(ref mut vm) = self.vm { 1582 vm.resume().map_err(VmError::Resume) 1583 } else { 1584 Err(VmError::VmNotRunning) 1585 } 1586 } 1587 1588 fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> { 1589 if let Some(ref mut vm) = self.vm { 1590 // Drain console_info so that FDs are not reused 1591 let _ = self.console_info.take(); 1592 vm.snapshot() 1593 .map_err(VmError::Snapshot) 1594 .and_then(|snapshot| { 1595 vm.send(&snapshot, destination_url) 1596 .map_err(VmError::SnapshotSend) 1597 }) 1598 } else { 1599 Err(VmError::VmNotRunning) 1600 } 1601 } 1602 1603 fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> { 1604 if self.vm.is_some() || self.vm_config.is_some() { 1605 return Err(VmError::VmAlreadyCreated); 1606 } 1607 1608 let source_url = restore_cfg.source_url.as_path().to_str(); 1609 if source_url.is_none() { 1610 return Err(VmError::InvalidRestoreSourceUrl); 1611 } 1612 // Safe to unwrap as we checked it was Some(&str). 1613 let source_url = source_url.unwrap(); 1614 1615 let vm_config = Arc::new(Mutex::new( 1616 recv_vm_config(source_url).map_err(VmError::Restore)?, 1617 )); 1618 restore_cfg 1619 .validate(&vm_config.lock().unwrap().clone()) 1620 .map_err(VmError::ConfigValidation)?; 1621 1622 // Update VM's net configurations with new fds received for restore operation 1623 if let (Some(restored_nets), Some(vm_net_configs)) = 1624 (restore_cfg.net_fds, &mut vm_config.lock().unwrap().net) 1625 { 1626 for net in restored_nets.iter() { 1627 for net_config in vm_net_configs.iter_mut() { 1628 // update only if the net dev is backed by FDs 1629 if net_config.id == Some(net.id.clone()) && net_config.fds.is_some() { 1630 net_config.fds.clone_from(&net.fds); 1631 } 1632 } 1633 } 1634 } 1635 1636 self.vm_restore(source_url, vm_config, restore_cfg.prefault) 1637 .map_err(|vm_restore_err| { 1638 error!("VM Restore failed: {:?}", vm_restore_err); 1639 1640 // Cleanup the VM being created while vm restore 1641 if let Err(e) = self.vm_delete() { 1642 return e; 1643 } 1644 1645 vm_restore_err 1646 }) 1647 } 1648 1649 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 1650 fn vm_coredump(&mut self, destination_url: &str) -> result::Result<(), VmError> { 1651 if let Some(ref mut vm) = self.vm { 1652 vm.coredump(destination_url).map_err(VmError::Coredump) 1653 } else { 1654 Err(VmError::VmNotRunning) 1655 } 1656 } 1657 1658 fn vm_shutdown(&mut self) -> result::Result<(), VmError> { 1659 let r = if let Some(ref mut vm) = self.vm.take() { 1660 // Drain console_info so that the FDs are not reused 1661 let _ = self.console_info.take(); 1662 vm.shutdown() 1663 } else { 1664 Err(VmError::VmNotRunning) 1665 }; 1666 1667 if r.is_ok() { 1668 event!("vm", "shutdown"); 1669 } 1670 1671 r 1672 } 1673 1674 fn vm_reboot(&mut self) -> result::Result<(), VmError> { 1675 event!("vm", "rebooting"); 1676 1677 // First we stop the current VM 1678 let config = if let Some(mut vm) = self.vm.take() { 1679 let config = vm.get_config(); 1680 vm.shutdown()?; 1681 config 1682 } else { 1683 return Err(VmError::VmNotCreated); 1684 }; 1685 1686 // vm.shutdown() closes all the console devices, so set console_info to None 1687 // so that the closed FD #s are not reused. 1688 let _ = self.console_info.take(); 1689 1690 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1691 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1692 #[cfg(feature = "guest_debug")] 1693 let debug_evt = self 1694 .vm_debug_evt 1695 .try_clone() 1696 .map_err(VmError::EventFdClone)?; 1697 let activate_evt = self 1698 .activate_evt 1699 .try_clone() 1700 .map_err(VmError::EventFdClone)?; 1701 1702 // The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be 1703 // an event sitting in the shared reset_evt. Without doing this we get very early reboots 1704 // during the boot process. 1705 if self.reset_evt.read().is_ok() { 1706 warn!("Spurious second reset event received. Ignoring."); 1707 } 1708 1709 self.console_info = 1710 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1711 1712 // Then we create the new VM 1713 let mut vm = Vm::new( 1714 config, 1715 exit_evt, 1716 reset_evt, 1717 #[cfg(feature = "guest_debug")] 1718 debug_evt, 1719 &self.seccomp_action, 1720 self.hypervisor.clone(), 1721 activate_evt, 1722 self.console_info.clone(), 1723 self.console_resize_pipe.clone(), 1724 Arc::clone(&self.original_termios_opt), 1725 None, 1726 None, 1727 None, 1728 )?; 1729 1730 // And we boot it 1731 vm.boot()?; 1732 1733 self.vm = Some(vm); 1734 1735 event!("vm", "rebooted"); 1736 1737 Ok(()) 1738 } 1739 1740 fn vm_info(&self) -> result::Result<VmInfoResponse, VmError> { 1741 match &self.vm_config { 1742 Some(vm_config) => { 1743 let state = match &self.vm { 1744 Some(vm) => vm.get_state()?, 1745 None => VmState::Created, 1746 }; 1747 let config = vm_config.lock().unwrap().clone(); 1748 1749 let mut memory_actual_size = config.memory.total_size(); 1750 if let Some(vm) = &self.vm { 1751 memory_actual_size -= vm.balloon_size(); 1752 } 1753 1754 let device_tree = self 1755 .vm 1756 .as_ref() 1757 .map(|vm| vm.device_tree().lock().unwrap().clone()); 1758 1759 Ok(VmInfoResponse { 1760 config: Box::new(config), 1761 state, 1762 memory_actual_size, 1763 device_tree, 1764 }) 1765 } 1766 None => Err(VmError::VmNotCreated), 1767 } 1768 } 1769 1770 fn vmm_ping(&self) -> VmmPingResponse { 1771 let VmmVersionInfo { 1772 build_version, 1773 version, 1774 } = self.version.clone(); 1775 1776 VmmPingResponse { 1777 build_version, 1778 version, 1779 pid: std::process::id() as i64, 1780 features: feature_list(), 1781 } 1782 } 1783 1784 fn vm_delete(&mut self) -> result::Result<(), VmError> { 1785 if self.vm_config.is_none() { 1786 return Ok(()); 1787 } 1788 1789 // If a VM is booted, we first try to shut it down. 1790 if self.vm.is_some() { 1791 self.vm_shutdown()?; 1792 } 1793 1794 self.vm_config = None; 1795 1796 event!("vm", "deleted"); 1797 1798 Ok(()) 1799 } 1800 1801 fn vmm_shutdown(&mut self) -> result::Result<(), VmError> { 1802 self.vm_delete()?; 1803 event!("vmm", "shutdown"); 1804 Ok(()) 1805 } 1806 1807 fn vm_resize( 1808 &mut self, 1809 desired_vcpus: Option<u8>, 1810 desired_ram: Option<u64>, 1811 desired_balloon: Option<u64>, 1812 ) -> result::Result<(), VmError> { 1813 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1814 1815 if let Some(ref mut vm) = self.vm { 1816 if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) { 1817 error!("Error when resizing VM: {:?}", e); 1818 Err(e) 1819 } else { 1820 Ok(()) 1821 } 1822 } else { 1823 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1824 if let Some(desired_vcpus) = desired_vcpus { 1825 config.cpus.boot_vcpus = desired_vcpus; 1826 } 1827 if let Some(desired_ram) = desired_ram { 1828 config.memory.size = desired_ram; 1829 } 1830 if let Some(desired_balloon) = desired_balloon { 1831 if let Some(balloon_config) = &mut config.balloon { 1832 balloon_config.size = desired_balloon; 1833 } 1834 } 1835 Ok(()) 1836 } 1837 } 1838 1839 fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> { 1840 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1841 1842 if let Some(ref mut vm) = self.vm { 1843 if let Err(e) = vm.resize_zone(id, desired_ram) { 1844 error!("Error when resizing VM: {:?}", e); 1845 Err(e) 1846 } else { 1847 Ok(()) 1848 } 1849 } else { 1850 // Update VmConfig by setting the new desired ram. 1851 let memory_config = &mut self.vm_config.as_ref().unwrap().lock().unwrap().memory; 1852 1853 if let Some(zones) = &mut memory_config.zones { 1854 for zone in zones.iter_mut() { 1855 if zone.id == id { 1856 zone.size = desired_ram; 1857 return Ok(()); 1858 } 1859 } 1860 } 1861 1862 error!("Could not find the memory zone {} for the resize", id); 1863 Err(VmError::ResizeZone) 1864 } 1865 } 1866 1867 fn vm_add_device( 1868 &mut self, 1869 device_cfg: DeviceConfig, 1870 ) -> result::Result<Option<Vec<u8>>, VmError> { 1871 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1872 1873 { 1874 // Validate the configuration change in a cloned configuration 1875 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1876 add_to_config(&mut config.devices, device_cfg.clone()); 1877 config.validate().map_err(VmError::ConfigValidation)?; 1878 } 1879 1880 if let Some(ref mut vm) = self.vm { 1881 let info = vm.add_device(device_cfg).map_err(|e| { 1882 error!("Error when adding new device to the VM: {:?}", e); 1883 e 1884 })?; 1885 serde_json::to_vec(&info) 1886 .map(Some) 1887 .map_err(VmError::SerializeJson) 1888 } else { 1889 // Update VmConfig by adding the new device. 1890 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1891 add_to_config(&mut config.devices, device_cfg); 1892 Ok(None) 1893 } 1894 } 1895 1896 fn vm_add_user_device( 1897 &mut self, 1898 device_cfg: UserDeviceConfig, 1899 ) -> result::Result<Option<Vec<u8>>, VmError> { 1900 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1901 1902 { 1903 // Validate the configuration change in a cloned configuration 1904 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1905 add_to_config(&mut config.user_devices, device_cfg.clone()); 1906 config.validate().map_err(VmError::ConfigValidation)?; 1907 } 1908 1909 if let Some(ref mut vm) = self.vm { 1910 let info = vm.add_user_device(device_cfg).map_err(|e| { 1911 error!("Error when adding new user device to the VM: {:?}", e); 1912 e 1913 })?; 1914 serde_json::to_vec(&info) 1915 .map(Some) 1916 .map_err(VmError::SerializeJson) 1917 } else { 1918 // Update VmConfig by adding the new device. 1919 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1920 add_to_config(&mut config.user_devices, device_cfg); 1921 Ok(None) 1922 } 1923 } 1924 1925 fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> { 1926 if let Some(ref mut vm) = self.vm { 1927 if let Err(e) = vm.remove_device(id) { 1928 error!("Error when removing device from the VM: {:?}", e); 1929 Err(e) 1930 } else { 1931 Ok(()) 1932 } 1933 } else if let Some(ref config) = self.vm_config { 1934 let mut config = config.lock().unwrap(); 1935 if config.remove_device(&id) { 1936 Ok(()) 1937 } else { 1938 Err(VmError::NoDeviceToRemove(id)) 1939 } 1940 } else { 1941 Err(VmError::VmNotCreated) 1942 } 1943 } 1944 1945 fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1946 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1947 1948 { 1949 // Validate the configuration change in a cloned configuration 1950 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1951 add_to_config(&mut config.disks, disk_cfg.clone()); 1952 config.validate().map_err(VmError::ConfigValidation)?; 1953 } 1954 1955 if let Some(ref mut vm) = self.vm { 1956 let info = vm.add_disk(disk_cfg).map_err(|e| { 1957 error!("Error when adding new disk to the VM: {:?}", e); 1958 e 1959 })?; 1960 serde_json::to_vec(&info) 1961 .map(Some) 1962 .map_err(VmError::SerializeJson) 1963 } else { 1964 // Update VmConfig by adding the new device. 1965 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1966 add_to_config(&mut config.disks, disk_cfg); 1967 Ok(None) 1968 } 1969 } 1970 1971 fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1972 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1973 1974 { 1975 // Validate the configuration change in a cloned configuration 1976 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1977 add_to_config(&mut config.fs, fs_cfg.clone()); 1978 config.validate().map_err(VmError::ConfigValidation)?; 1979 } 1980 1981 if let Some(ref mut vm) = self.vm { 1982 let info = vm.add_fs(fs_cfg).map_err(|e| { 1983 error!("Error when adding new fs to the VM: {:?}", e); 1984 e 1985 })?; 1986 serde_json::to_vec(&info) 1987 .map(Some) 1988 .map_err(VmError::SerializeJson) 1989 } else { 1990 // Update VmConfig by adding the new device. 1991 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1992 add_to_config(&mut config.fs, fs_cfg); 1993 Ok(None) 1994 } 1995 } 1996 1997 fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1998 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1999 2000 { 2001 // Validate the configuration change in a cloned configuration 2002 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2003 add_to_config(&mut config.pmem, pmem_cfg.clone()); 2004 config.validate().map_err(VmError::ConfigValidation)?; 2005 } 2006 2007 if let Some(ref mut vm) = self.vm { 2008 let info = vm.add_pmem(pmem_cfg).map_err(|e| { 2009 error!("Error when adding new pmem device to the VM: {:?}", e); 2010 e 2011 })?; 2012 serde_json::to_vec(&info) 2013 .map(Some) 2014 .map_err(VmError::SerializeJson) 2015 } else { 2016 // Update VmConfig by adding the new device. 2017 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2018 add_to_config(&mut config.pmem, pmem_cfg); 2019 Ok(None) 2020 } 2021 } 2022 2023 fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2024 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2025 2026 { 2027 // Validate the configuration change in a cloned configuration 2028 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2029 add_to_config(&mut config.net, net_cfg.clone()); 2030 config.validate().map_err(VmError::ConfigValidation)?; 2031 } 2032 2033 if let Some(ref mut vm) = self.vm { 2034 let info = vm.add_net(net_cfg).map_err(|e| { 2035 error!("Error when adding new network device to the VM: {:?}", e); 2036 e 2037 })?; 2038 serde_json::to_vec(&info) 2039 .map(Some) 2040 .map_err(VmError::SerializeJson) 2041 } else { 2042 // Update VmConfig by adding the new device. 2043 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2044 add_to_config(&mut config.net, net_cfg); 2045 Ok(None) 2046 } 2047 } 2048 2049 fn vm_add_vdpa(&mut self, vdpa_cfg: VdpaConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2050 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2051 2052 { 2053 // Validate the configuration change in a cloned configuration 2054 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2055 add_to_config(&mut config.vdpa, vdpa_cfg.clone()); 2056 config.validate().map_err(VmError::ConfigValidation)?; 2057 } 2058 2059 if let Some(ref mut vm) = self.vm { 2060 let info = vm.add_vdpa(vdpa_cfg).map_err(|e| { 2061 error!("Error when adding new vDPA device to the VM: {:?}", e); 2062 e 2063 })?; 2064 serde_json::to_vec(&info) 2065 .map(Some) 2066 .map_err(VmError::SerializeJson) 2067 } else { 2068 // Update VmConfig by adding the new device. 2069 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2070 add_to_config(&mut config.vdpa, vdpa_cfg); 2071 Ok(None) 2072 } 2073 } 2074 2075 fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2076 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2077 2078 { 2079 // Validate the configuration change in a cloned configuration 2080 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2081 2082 if config.vsock.is_some() { 2083 return Err(VmError::TooManyVsockDevices); 2084 } 2085 2086 config.vsock = Some(vsock_cfg.clone()); 2087 config.validate().map_err(VmError::ConfigValidation)?; 2088 } 2089 2090 if let Some(ref mut vm) = self.vm { 2091 let info = vm.add_vsock(vsock_cfg).map_err(|e| { 2092 error!("Error when adding new vsock device to the VM: {:?}", e); 2093 e 2094 })?; 2095 serde_json::to_vec(&info) 2096 .map(Some) 2097 .map_err(VmError::SerializeJson) 2098 } else { 2099 // Update VmConfig by adding the new device. 2100 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2101 config.vsock = Some(vsock_cfg); 2102 Ok(None) 2103 } 2104 } 2105 2106 fn vm_counters(&mut self) -> result::Result<Option<Vec<u8>>, VmError> { 2107 if let Some(ref mut vm) = self.vm { 2108 let info = vm.counters().map_err(|e| { 2109 error!("Error when getting counters from the VM: {:?}", e); 2110 e 2111 })?; 2112 serde_json::to_vec(&info) 2113 .map(Some) 2114 .map_err(VmError::SerializeJson) 2115 } else { 2116 Err(VmError::VmNotRunning) 2117 } 2118 } 2119 2120 fn vm_power_button(&mut self) -> result::Result<(), VmError> { 2121 if let Some(ref mut vm) = self.vm { 2122 vm.power_button() 2123 } else { 2124 Err(VmError::VmNotRunning) 2125 } 2126 } 2127 2128 fn vm_nmi(&mut self) -> result::Result<(), VmError> { 2129 if let Some(ref mut vm) = self.vm { 2130 vm.nmi() 2131 } else { 2132 Err(VmError::VmNotRunning) 2133 } 2134 } 2135 2136 fn vm_receive_migration( 2137 &mut self, 2138 receive_data_migration: VmReceiveMigrationData, 2139 ) -> result::Result<(), MigratableError> { 2140 info!( 2141 "Receiving migration: receiver_url = {}", 2142 receive_data_migration.receiver_url 2143 ); 2144 2145 // Accept the connection and get the socket 2146 let mut socket = Vmm::receive_migration_socket(&receive_data_migration.receiver_url)?; 2147 2148 let mut started = false; 2149 let mut memory_manager: Option<Arc<Mutex<MemoryManager>>> = None; 2150 let mut existing_memory_files = None; 2151 loop { 2152 let req = Request::read_from(&mut socket)?; 2153 match req.command() { 2154 Command::Invalid => info!("Invalid Command Received"), 2155 Command::Start => { 2156 info!("Start Command Received"); 2157 started = true; 2158 2159 Response::ok().write_to(&mut socket)?; 2160 } 2161 Command::Config => { 2162 info!("Config Command Received"); 2163 2164 if !started { 2165 warn!("Migration not started yet"); 2166 Response::error().write_to(&mut socket)?; 2167 continue; 2168 } 2169 memory_manager = Some(self.vm_receive_config( 2170 &req, 2171 &mut socket, 2172 existing_memory_files.take(), 2173 )?); 2174 } 2175 Command::State => { 2176 info!("State Command Received"); 2177 2178 if !started { 2179 warn!("Migration not started yet"); 2180 Response::error().write_to(&mut socket)?; 2181 continue; 2182 } 2183 if let Some(mm) = memory_manager.take() { 2184 self.vm_receive_state(&req, &mut socket, mm)?; 2185 } else { 2186 warn!("Configuration not sent yet"); 2187 Response::error().write_to(&mut socket)?; 2188 } 2189 } 2190 Command::Memory => { 2191 info!("Memory Command Received"); 2192 2193 if !started { 2194 warn!("Migration not started yet"); 2195 Response::error().write_to(&mut socket)?; 2196 continue; 2197 } 2198 if let Some(mm) = memory_manager.as_ref() { 2199 self.vm_receive_memory(&req, &mut socket, &mut mm.lock().unwrap())?; 2200 } else { 2201 warn!("Configuration not sent yet"); 2202 Response::error().write_to(&mut socket)?; 2203 } 2204 } 2205 Command::MemoryFd => { 2206 info!("MemoryFd Command Received"); 2207 2208 if !started { 2209 warn!("Migration not started yet"); 2210 Response::error().write_to(&mut socket)?; 2211 continue; 2212 } 2213 2214 match &mut socket { 2215 SocketStream::Unix(unix_socket) => { 2216 let mut buf = [0u8; 4]; 2217 let (_, file) = unix_socket.recv_with_fd(&mut buf).map_err(|e| { 2218 MigratableError::MigrateReceive(anyhow!( 2219 "Error receiving slot from socket: {}", 2220 e 2221 )) 2222 })?; 2223 2224 if existing_memory_files.is_none() { 2225 existing_memory_files = Some(HashMap::default()) 2226 } 2227 2228 if let Some(ref mut existing_memory_files) = existing_memory_files { 2229 let slot = u32::from_le_bytes(buf); 2230 existing_memory_files.insert(slot, file.unwrap()); 2231 } 2232 2233 Response::ok().write_to(&mut socket)?; 2234 } 2235 SocketStream::Tcp(_tcp_socket) => { 2236 // For TCP sockets, we cannot transfer file descriptors 2237 warn!( 2238 "MemoryFd command received over TCP socket, which is not supported" 2239 ); 2240 Response::error().write_to(&mut socket)?; 2241 } 2242 } 2243 } 2244 Command::Complete => { 2245 info!("Complete Command Received"); 2246 if let Some(ref mut vm) = self.vm.as_mut() { 2247 vm.resume()?; 2248 Response::ok().write_to(&mut socket)?; 2249 } else { 2250 warn!("VM not created yet"); 2251 Response::error().write_to(&mut socket)?; 2252 } 2253 break; 2254 } 2255 Command::Abandon => { 2256 info!("Abandon Command Received"); 2257 self.vm = None; 2258 self.vm_config = None; 2259 Response::ok().write_to(&mut socket).ok(); 2260 break; 2261 } 2262 } 2263 } 2264 2265 Ok(()) 2266 } 2267 2268 fn vm_send_migration( 2269 &mut self, 2270 send_data_migration: VmSendMigrationData, 2271 ) -> result::Result<(), MigratableError> { 2272 info!( 2273 "Sending migration: destination_url = {}, local = {}", 2274 send_data_migration.destination_url, send_data_migration.local 2275 ); 2276 2277 if !self 2278 .vm_config 2279 .as_ref() 2280 .unwrap() 2281 .lock() 2282 .unwrap() 2283 .backed_by_shared_memory() 2284 && send_data_migration.local 2285 { 2286 return Err(MigratableError::MigrateSend(anyhow!( 2287 "Local migration requires shared memory or hugepages enabled" 2288 ))); 2289 } 2290 2291 if let Some(vm) = self.vm.as_mut() { 2292 Self::send_migration( 2293 vm, 2294 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 2295 self.hypervisor.clone(), 2296 send_data_migration.clone(), 2297 ) 2298 .map_err(|migration_err| { 2299 error!("Migration failed: {:?}", migration_err); 2300 2301 // Stop logging dirty pages only for non-local migrations 2302 if !send_data_migration.local { 2303 if let Err(e) = vm.stop_dirty_log() { 2304 return e; 2305 } 2306 } 2307 2308 if vm.get_state().unwrap() == VmState::Paused { 2309 if let Err(e) = vm.resume() { 2310 return e; 2311 } 2312 } 2313 2314 migration_err 2315 })?; 2316 2317 // Shutdown the VM after the migration succeeded 2318 self.exit_evt.write(1).map_err(|e| { 2319 MigratableError::MigrateSend(anyhow!( 2320 "Failed shutting down the VM after migration: {:?}", 2321 e 2322 )) 2323 }) 2324 } else { 2325 Err(MigratableError::MigrateSend(anyhow!("VM is not running"))) 2326 } 2327 } 2328 } 2329 2330 const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager"; 2331 const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager"; 2332 const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager"; 2333 2334 #[cfg(test)] 2335 mod unit_tests { 2336 use super::*; 2337 #[cfg(target_arch = "x86_64")] 2338 use crate::vm_config::DebugConsoleConfig; 2339 use crate::vm_config::{ 2340 ConsoleConfig, ConsoleOutputMode, CpuFeatures, CpusConfig, HotplugMethod, MemoryConfig, 2341 PayloadConfig, RngConfig, 2342 }; 2343 2344 fn create_dummy_vmm() -> Vmm { 2345 Vmm::new( 2346 VmmVersionInfo::new("dummy", "dummy"), 2347 EventFd::new(EFD_NONBLOCK).unwrap(), 2348 #[cfg(feature = "guest_debug")] 2349 EventFd::new(EFD_NONBLOCK).unwrap(), 2350 #[cfg(feature = "guest_debug")] 2351 EventFd::new(EFD_NONBLOCK).unwrap(), 2352 SeccompAction::Allow, 2353 hypervisor::new().unwrap(), 2354 EventFd::new(EFD_NONBLOCK).unwrap(), 2355 ) 2356 .unwrap() 2357 } 2358 2359 fn create_dummy_vm_config() -> Box<VmConfig> { 2360 Box::new(VmConfig { 2361 cpus: CpusConfig { 2362 boot_vcpus: 1, 2363 max_vcpus: 1, 2364 topology: None, 2365 kvm_hyperv: false, 2366 max_phys_bits: 46, 2367 affinity: None, 2368 features: CpuFeatures::default(), 2369 }, 2370 memory: MemoryConfig { 2371 size: 536_870_912, 2372 mergeable: false, 2373 hotplug_method: HotplugMethod::Acpi, 2374 hotplug_size: None, 2375 hotplugged_size: None, 2376 shared: true, 2377 hugepages: false, 2378 hugepage_size: None, 2379 prefault: false, 2380 zones: None, 2381 thp: true, 2382 }, 2383 payload: Some(PayloadConfig { 2384 kernel: Some(PathBuf::from("/path/to/kernel")), 2385 firmware: None, 2386 cmdline: None, 2387 initramfs: None, 2388 #[cfg(feature = "igvm")] 2389 igvm: None, 2390 #[cfg(feature = "sev_snp")] 2391 host_data: None, 2392 }), 2393 rate_limit_groups: None, 2394 disks: None, 2395 net: None, 2396 rng: RngConfig { 2397 src: PathBuf::from("/dev/urandom"), 2398 iommu: false, 2399 }, 2400 balloon: None, 2401 fs: None, 2402 pmem: None, 2403 serial: ConsoleConfig { 2404 file: None, 2405 mode: ConsoleOutputMode::Null, 2406 iommu: false, 2407 socket: None, 2408 }, 2409 console: ConsoleConfig { 2410 file: None, 2411 mode: ConsoleOutputMode::Tty, 2412 iommu: false, 2413 socket: None, 2414 }, 2415 #[cfg(target_arch = "x86_64")] 2416 debug_console: DebugConsoleConfig::default(), 2417 devices: None, 2418 user_devices: None, 2419 vdpa: None, 2420 vsock: None, 2421 #[cfg(feature = "pvmemcontrol")] 2422 pvmemcontrol: None, 2423 pvpanic: false, 2424 iommu: false, 2425 #[cfg(target_arch = "x86_64")] 2426 sgx_epc: None, 2427 numa: None, 2428 watchdog: false, 2429 #[cfg(feature = "guest_debug")] 2430 gdb: false, 2431 pci_segments: None, 2432 platform: None, 2433 tpm: None, 2434 preserved_fds: None, 2435 landlock_enable: false, 2436 landlock_rules: None, 2437 }) 2438 } 2439 2440 #[test] 2441 fn test_vmm_vm_create() { 2442 let mut vmm = create_dummy_vmm(); 2443 let config = create_dummy_vm_config(); 2444 2445 assert!(matches!(vmm.vm_create(config.clone()), Ok(()))); 2446 assert!(matches!( 2447 vmm.vm_create(config), 2448 Err(VmError::VmAlreadyCreated) 2449 )); 2450 } 2451 2452 #[test] 2453 fn test_vmm_vm_cold_add_device() { 2454 let mut vmm = create_dummy_vmm(); 2455 let device_config = DeviceConfig::parse("path=/path/to/device").unwrap(); 2456 2457 assert!(matches!( 2458 vmm.vm_add_device(device_config.clone()), 2459 Err(VmError::VmNotCreated) 2460 )); 2461 2462 let _ = vmm.vm_create(create_dummy_vm_config()); 2463 assert!(vmm 2464 .vm_config 2465 .as_ref() 2466 .unwrap() 2467 .lock() 2468 .unwrap() 2469 .devices 2470 .is_none()); 2471 2472 assert!(vmm.vm_add_device(device_config.clone()).unwrap().is_none()); 2473 assert_eq!( 2474 vmm.vm_config 2475 .as_ref() 2476 .unwrap() 2477 .lock() 2478 .unwrap() 2479 .devices 2480 .clone() 2481 .unwrap() 2482 .len(), 2483 1 2484 ); 2485 assert_eq!( 2486 vmm.vm_config 2487 .as_ref() 2488 .unwrap() 2489 .lock() 2490 .unwrap() 2491 .devices 2492 .clone() 2493 .unwrap()[0], 2494 device_config 2495 ); 2496 } 2497 2498 #[test] 2499 fn test_vmm_vm_cold_add_user_device() { 2500 let mut vmm = create_dummy_vmm(); 2501 let user_device_config = 2502 UserDeviceConfig::parse("socket=/path/to/socket,id=8,pci_segment=2").unwrap(); 2503 2504 assert!(matches!( 2505 vmm.vm_add_user_device(user_device_config.clone()), 2506 Err(VmError::VmNotCreated) 2507 )); 2508 2509 let _ = vmm.vm_create(create_dummy_vm_config()); 2510 assert!(vmm 2511 .vm_config 2512 .as_ref() 2513 .unwrap() 2514 .lock() 2515 .unwrap() 2516 .user_devices 2517 .is_none()); 2518 2519 assert!(vmm 2520 .vm_add_user_device(user_device_config.clone()) 2521 .unwrap() 2522 .is_none()); 2523 assert_eq!( 2524 vmm.vm_config 2525 .as_ref() 2526 .unwrap() 2527 .lock() 2528 .unwrap() 2529 .user_devices 2530 .clone() 2531 .unwrap() 2532 .len(), 2533 1 2534 ); 2535 assert_eq!( 2536 vmm.vm_config 2537 .as_ref() 2538 .unwrap() 2539 .lock() 2540 .unwrap() 2541 .user_devices 2542 .clone() 2543 .unwrap()[0], 2544 user_device_config 2545 ); 2546 } 2547 2548 #[test] 2549 fn test_vmm_vm_cold_add_disk() { 2550 let mut vmm = create_dummy_vmm(); 2551 let disk_config = DiskConfig::parse("path=/path/to_file").unwrap(); 2552 2553 assert!(matches!( 2554 vmm.vm_add_disk(disk_config.clone()), 2555 Err(VmError::VmNotCreated) 2556 )); 2557 2558 let _ = vmm.vm_create(create_dummy_vm_config()); 2559 assert!(vmm 2560 .vm_config 2561 .as_ref() 2562 .unwrap() 2563 .lock() 2564 .unwrap() 2565 .disks 2566 .is_none()); 2567 2568 assert!(vmm.vm_add_disk(disk_config.clone()).unwrap().is_none()); 2569 assert_eq!( 2570 vmm.vm_config 2571 .as_ref() 2572 .unwrap() 2573 .lock() 2574 .unwrap() 2575 .disks 2576 .clone() 2577 .unwrap() 2578 .len(), 2579 1 2580 ); 2581 assert_eq!( 2582 vmm.vm_config 2583 .as_ref() 2584 .unwrap() 2585 .lock() 2586 .unwrap() 2587 .disks 2588 .clone() 2589 .unwrap()[0], 2590 disk_config 2591 ); 2592 } 2593 2594 #[test] 2595 fn test_vmm_vm_cold_add_fs() { 2596 let mut vmm = create_dummy_vmm(); 2597 let fs_config = FsConfig::parse("tag=mytag,socket=/tmp/sock").unwrap(); 2598 2599 assert!(matches!( 2600 vmm.vm_add_fs(fs_config.clone()), 2601 Err(VmError::VmNotCreated) 2602 )); 2603 2604 let _ = vmm.vm_create(create_dummy_vm_config()); 2605 assert!(vmm.vm_config.as_ref().unwrap().lock().unwrap().fs.is_none()); 2606 2607 assert!(vmm.vm_add_fs(fs_config.clone()).unwrap().is_none()); 2608 assert_eq!( 2609 vmm.vm_config 2610 .as_ref() 2611 .unwrap() 2612 .lock() 2613 .unwrap() 2614 .fs 2615 .clone() 2616 .unwrap() 2617 .len(), 2618 1 2619 ); 2620 assert_eq!( 2621 vmm.vm_config 2622 .as_ref() 2623 .unwrap() 2624 .lock() 2625 .unwrap() 2626 .fs 2627 .clone() 2628 .unwrap()[0], 2629 fs_config 2630 ); 2631 } 2632 2633 #[test] 2634 fn test_vmm_vm_cold_add_pmem() { 2635 let mut vmm = create_dummy_vmm(); 2636 let pmem_config = PmemConfig::parse("file=/tmp/pmem,size=128M").unwrap(); 2637 2638 assert!(matches!( 2639 vmm.vm_add_pmem(pmem_config.clone()), 2640 Err(VmError::VmNotCreated) 2641 )); 2642 2643 let _ = vmm.vm_create(create_dummy_vm_config()); 2644 assert!(vmm 2645 .vm_config 2646 .as_ref() 2647 .unwrap() 2648 .lock() 2649 .unwrap() 2650 .pmem 2651 .is_none()); 2652 2653 assert!(vmm.vm_add_pmem(pmem_config.clone()).unwrap().is_none()); 2654 assert_eq!( 2655 vmm.vm_config 2656 .as_ref() 2657 .unwrap() 2658 .lock() 2659 .unwrap() 2660 .pmem 2661 .clone() 2662 .unwrap() 2663 .len(), 2664 1 2665 ); 2666 assert_eq!( 2667 vmm.vm_config 2668 .as_ref() 2669 .unwrap() 2670 .lock() 2671 .unwrap() 2672 .pmem 2673 .clone() 2674 .unwrap()[0], 2675 pmem_config 2676 ); 2677 } 2678 2679 #[test] 2680 fn test_vmm_vm_cold_add_net() { 2681 let mut vmm = create_dummy_vmm(); 2682 let net_config = NetConfig::parse( 2683 "mac=de:ad:be:ef:12:34,host_mac=12:34:de:ad:be:ef,vhost_user=true,socket=/tmp/sock", 2684 ) 2685 .unwrap(); 2686 2687 assert!(matches!( 2688 vmm.vm_add_net(net_config.clone()), 2689 Err(VmError::VmNotCreated) 2690 )); 2691 2692 let _ = vmm.vm_create(create_dummy_vm_config()); 2693 assert!(vmm 2694 .vm_config 2695 .as_ref() 2696 .unwrap() 2697 .lock() 2698 .unwrap() 2699 .net 2700 .is_none()); 2701 2702 assert!(vmm.vm_add_net(net_config.clone()).unwrap().is_none()); 2703 assert_eq!( 2704 vmm.vm_config 2705 .as_ref() 2706 .unwrap() 2707 .lock() 2708 .unwrap() 2709 .net 2710 .clone() 2711 .unwrap() 2712 .len(), 2713 1 2714 ); 2715 assert_eq!( 2716 vmm.vm_config 2717 .as_ref() 2718 .unwrap() 2719 .lock() 2720 .unwrap() 2721 .net 2722 .clone() 2723 .unwrap()[0], 2724 net_config 2725 ); 2726 } 2727 2728 #[test] 2729 fn test_vmm_vm_cold_add_vdpa() { 2730 let mut vmm = create_dummy_vmm(); 2731 let vdpa_config = VdpaConfig::parse("path=/dev/vhost-vdpa,num_queues=2").unwrap(); 2732 2733 assert!(matches!( 2734 vmm.vm_add_vdpa(vdpa_config.clone()), 2735 Err(VmError::VmNotCreated) 2736 )); 2737 2738 let _ = vmm.vm_create(create_dummy_vm_config()); 2739 assert!(vmm 2740 .vm_config 2741 .as_ref() 2742 .unwrap() 2743 .lock() 2744 .unwrap() 2745 .vdpa 2746 .is_none()); 2747 2748 assert!(vmm.vm_add_vdpa(vdpa_config.clone()).unwrap().is_none()); 2749 assert_eq!( 2750 vmm.vm_config 2751 .as_ref() 2752 .unwrap() 2753 .lock() 2754 .unwrap() 2755 .vdpa 2756 .clone() 2757 .unwrap() 2758 .len(), 2759 1 2760 ); 2761 assert_eq!( 2762 vmm.vm_config 2763 .as_ref() 2764 .unwrap() 2765 .lock() 2766 .unwrap() 2767 .vdpa 2768 .clone() 2769 .unwrap()[0], 2770 vdpa_config 2771 ); 2772 } 2773 2774 #[test] 2775 fn test_vmm_vm_cold_add_vsock() { 2776 let mut vmm = create_dummy_vmm(); 2777 let vsock_config = VsockConfig::parse("socket=/tmp/sock,cid=3,iommu=on").unwrap(); 2778 2779 assert!(matches!( 2780 vmm.vm_add_vsock(vsock_config.clone()), 2781 Err(VmError::VmNotCreated) 2782 )); 2783 2784 let _ = vmm.vm_create(create_dummy_vm_config()); 2785 assert!(vmm 2786 .vm_config 2787 .as_ref() 2788 .unwrap() 2789 .lock() 2790 .unwrap() 2791 .vsock 2792 .is_none()); 2793 2794 assert!(vmm.vm_add_vsock(vsock_config.clone()).unwrap().is_none()); 2795 assert_eq!( 2796 vmm.vm_config 2797 .as_ref() 2798 .unwrap() 2799 .lock() 2800 .unwrap() 2801 .vsock 2802 .clone() 2803 .unwrap(), 2804 vsock_config 2805 ); 2806 } 2807 } 2808