1 // Copyright © 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 6 #[macro_use] 7 extern crate event_monitor; 8 #[macro_use] 9 extern crate log; 10 11 use crate::api::{ 12 ApiRequest, ApiResponse, RequestHandler, VmInfoResponse, VmReceiveMigrationData, 13 VmSendMigrationData, VmmPingResponse, 14 }; 15 #[cfg(target_arch = "x86_64")] 16 use crate::config::DebugConsoleConfig; 17 use crate::config::{ 18 add_to_config, ConsoleConfig, DeviceConfig, DiskConfig, FsConfig, LandlockConfig, 19 MemoryZoneConfig, NetConfig, PayloadConfig, PmemConfig, RestoreConfig, RngConfig, TpmConfig, 20 UserDeviceConfig, VdpaConfig, VmConfig, VsockConfig, 21 }; 22 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 23 use crate::coredump::GuestDebuggable; 24 use crate::landlock::Landlock; 25 use crate::memory_manager::MemoryManager; 26 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 27 use crate::migration::get_vm_snapshot; 28 use crate::migration::{recv_vm_config, recv_vm_state}; 29 use crate::seccomp_filters::{get_seccomp_filter, Thread}; 30 use crate::vm::{Error as VmError, Vm, VmState}; 31 use anyhow::anyhow; 32 #[cfg(feature = "dbus_api")] 33 use api::dbus::{DBusApiOptions, DBusApiShutdownChannels}; 34 use api::http::HttpApiHandle; 35 use console_devices::{pre_create_console_devices, ConsoleInfo}; 36 use landlock::LandlockError; 37 use libc::{tcsetattr, termios, EFD_NONBLOCK, SIGINT, SIGTERM, TCSANOW}; 38 use memory_manager::MemoryManagerSnapshotData; 39 use pci::PciBdf; 40 use seccompiler::{apply_filter, SeccompAction}; 41 use serde::ser::{SerializeStruct, Serializer}; 42 use serde::{Deserialize, Serialize}; 43 use signal_hook::iterator::{Handle, Signals}; 44 use std::collections::HashMap; 45 use std::fs; 46 use std::fs::File; 47 use std::io; 48 use std::io::{stdout, Read, Write}; 49 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; 50 use std::os::unix::net::UnixListener; 51 use std::os::unix::net::UnixStream; 52 use std::panic::AssertUnwindSafe; 53 use std::path::PathBuf; 54 use std::rc::Rc; 55 use std::sync::mpsc::{Receiver, RecvError, SendError, Sender}; 56 use std::sync::{Arc, Mutex}; 57 use std::time::Instant; 58 use std::{result, thread}; 59 use thiserror::Error; 60 use tracer::trace_scoped; 61 use vm_memory::bitmap::AtomicBitmap; 62 use vm_memory::{ReadVolatile, WriteVolatile}; 63 use vm_migration::{protocol::*, Migratable}; 64 use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 65 use vmm_sys_util::eventfd::EventFd; 66 use vmm_sys_util::signal::unblock_signal; 67 use vmm_sys_util::sock_ctrl_msg::ScmSocket; 68 69 mod acpi; 70 pub mod api; 71 mod clone3; 72 pub mod config; 73 pub mod console_devices; 74 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 75 mod coredump; 76 pub mod cpu; 77 pub mod device_manager; 78 pub mod device_tree; 79 #[cfg(feature = "guest_debug")] 80 mod gdb; 81 #[cfg(feature = "igvm")] 82 mod igvm; 83 pub mod interrupt; 84 pub mod landlock; 85 pub mod memory_manager; 86 pub mod migration; 87 mod pci_segment; 88 pub mod seccomp_filters; 89 mod serial_manager; 90 mod sigwinch_listener; 91 pub mod vm; 92 pub mod vm_config; 93 94 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; 95 type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>; 96 97 /// Errors associated with VMM management 98 #[derive(Debug, Error)] 99 pub enum Error { 100 /// API request receive error 101 #[error("Error receiving API request: {0}")] 102 ApiRequestRecv(#[source] RecvError), 103 104 /// API response send error 105 #[error("Error sending API request: {0}")] 106 ApiResponseSend(#[source] SendError<ApiResponse>), 107 108 /// Cannot bind to the UNIX domain socket path 109 #[error("Error binding to UNIX domain socket: {0}")] 110 Bind(#[source] io::Error), 111 112 /// Cannot clone EventFd. 113 #[error("Error cloning EventFd: {0}")] 114 EventFdClone(#[source] io::Error), 115 116 /// Cannot create EventFd. 117 #[error("Error creating EventFd: {0}")] 118 EventFdCreate(#[source] io::Error), 119 120 /// Cannot read from EventFd. 121 #[error("Error reading from EventFd: {0}")] 122 EventFdRead(#[source] io::Error), 123 124 /// Cannot create epoll context. 125 #[error("Error creating epoll context: {0}")] 126 Epoll(#[source] io::Error), 127 128 /// Cannot create HTTP thread 129 #[error("Error spawning HTTP thread: {0}")] 130 HttpThreadSpawn(#[source] io::Error), 131 132 /// Cannot create D-Bus thread 133 #[cfg(feature = "dbus_api")] 134 #[error("Error spawning D-Bus thread: {0}")] 135 DBusThreadSpawn(#[source] io::Error), 136 137 /// Cannot start D-Bus session 138 #[cfg(feature = "dbus_api")] 139 #[error("Error starting D-Bus session: {0}")] 140 CreateDBusSession(#[source] zbus::Error), 141 142 /// Cannot create `event-monitor` thread 143 #[error("Error spawning `event-monitor` thread: {0}")] 144 EventMonitorThreadSpawn(#[source] io::Error), 145 146 /// Cannot handle the VM STDIN stream 147 #[error("Error handling VM stdin: {0:?}")] 148 Stdin(VmError), 149 150 /// Cannot handle the VM pty stream 151 #[error("Error handling VM pty: {0:?}")] 152 Pty(VmError), 153 154 /// Cannot reboot the VM 155 #[error("Error rebooting VM: {0:?}")] 156 VmReboot(VmError), 157 158 /// Cannot create VMM thread 159 #[error("Error spawning VMM thread {0:?}")] 160 VmmThreadSpawn(#[source] io::Error), 161 162 /// Cannot shut the VMM down 163 #[error("Error shutting down VMM: {0:?}")] 164 VmmShutdown(VmError), 165 166 /// Cannot create seccomp filter 167 #[error("Error creating seccomp filter: {0}")] 168 CreateSeccompFilter(seccompiler::Error), 169 170 /// Cannot apply seccomp filter 171 #[error("Error applying seccomp filter: {0}")] 172 ApplySeccompFilter(seccompiler::Error), 173 174 /// Error activating virtio devices 175 #[error("Error activating virtio devices: {0:?}")] 176 ActivateVirtioDevices(VmError), 177 178 /// Error creating API server 179 #[error("Error creating API server {0:?}")] 180 CreateApiServer(micro_http::ServerError), 181 182 /// Error binding API server socket 183 #[error("Error creation API server's socket {0:?}")] 184 CreateApiServerSocket(#[source] io::Error), 185 186 #[cfg(feature = "guest_debug")] 187 #[error("Failed to start the GDB thread: {0}")] 188 GdbThreadSpawn(io::Error), 189 190 /// GDB request receive error 191 #[cfg(feature = "guest_debug")] 192 #[error("Error receiving GDB request: {0}")] 193 GdbRequestRecv(#[source] RecvError), 194 195 /// GDB response send error 196 #[cfg(feature = "guest_debug")] 197 #[error("Error sending GDB request: {0}")] 198 GdbResponseSend(#[source] SendError<gdb::GdbResponse>), 199 200 #[error("Cannot spawn a signal handler thread: {0}")] 201 SignalHandlerSpawn(#[source] io::Error), 202 203 #[error("Failed to join on threads: {0:?}")] 204 ThreadCleanup(std::boxed::Box<dyn std::any::Any + std::marker::Send>), 205 206 /// Cannot create Landlock object 207 #[error("Error creating landlock object: {0}")] 208 CreateLandlock(LandlockError), 209 210 /// Cannot apply landlock based sandboxing 211 #[error("Error applying landlock: {0}")] 212 ApplyLandlock(LandlockError), 213 } 214 pub type Result<T> = result::Result<T, Error>; 215 216 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 217 #[repr(u64)] 218 pub enum EpollDispatch { 219 Exit = 0, 220 Reset = 1, 221 Api = 2, 222 ActivateVirtioDevices = 3, 223 Debug = 4, 224 Unknown, 225 } 226 227 impl From<u64> for EpollDispatch { 228 fn from(v: u64) -> Self { 229 use EpollDispatch::*; 230 match v { 231 0 => Exit, 232 1 => Reset, 233 2 => Api, 234 3 => ActivateVirtioDevices, 235 4 => Debug, 236 _ => Unknown, 237 } 238 } 239 } 240 241 pub struct EpollContext { 242 epoll_file: File, 243 } 244 245 impl EpollContext { 246 pub fn new() -> result::Result<EpollContext, io::Error> { 247 let epoll_fd = epoll::create(true)?; 248 // Use 'File' to enforce closing on 'epoll_fd' 249 // SAFETY: the epoll_fd returned by epoll::create is valid and owned by us. 250 let epoll_file = unsafe { File::from_raw_fd(epoll_fd) }; 251 252 Ok(EpollContext { epoll_file }) 253 } 254 255 pub fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error> 256 where 257 T: AsRawFd, 258 { 259 let dispatch_index = token as u64; 260 epoll::ctl( 261 self.epoll_file.as_raw_fd(), 262 epoll::ControlOptions::EPOLL_CTL_ADD, 263 fd.as_raw_fd(), 264 epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index), 265 )?; 266 267 Ok(()) 268 } 269 270 #[cfg(fuzzing)] 271 pub fn add_event_custom<T>( 272 &mut self, 273 fd: &T, 274 id: u64, 275 evts: epoll::Events, 276 ) -> result::Result<(), io::Error> 277 where 278 T: AsRawFd, 279 { 280 epoll::ctl( 281 self.epoll_file.as_raw_fd(), 282 epoll::ControlOptions::EPOLL_CTL_ADD, 283 fd.as_raw_fd(), 284 epoll::Event::new(evts, id), 285 )?; 286 287 Ok(()) 288 } 289 } 290 291 impl AsRawFd for EpollContext { 292 fn as_raw_fd(&self) -> RawFd { 293 self.epoll_file.as_raw_fd() 294 } 295 } 296 297 pub struct PciDeviceInfo { 298 pub id: String, 299 pub bdf: PciBdf, 300 } 301 302 impl Serialize for PciDeviceInfo { 303 fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> 304 where 305 S: Serializer, 306 { 307 let bdf_str = self.bdf.to_string(); 308 309 // Serialize the structure. 310 let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?; 311 state.serialize_field("id", &self.id)?; 312 state.serialize_field("bdf", &bdf_str)?; 313 state.end() 314 } 315 } 316 317 pub fn feature_list() -> Vec<String> { 318 vec![ 319 #[cfg(feature = "dbus_api")] 320 "dbus_api".to_string(), 321 #[cfg(feature = "dhat-heap")] 322 "dhat-heap".to_string(), 323 #[cfg(feature = "guest_debug")] 324 "guest_debug".to_string(), 325 #[cfg(feature = "igvm")] 326 "igvm".to_string(), 327 #[cfg(feature = "io_uring")] 328 "io_uring".to_string(), 329 #[cfg(feature = "kvm")] 330 "kvm".to_string(), 331 #[cfg(feature = "mshv")] 332 "mshv".to_string(), 333 #[cfg(feature = "sev_snp")] 334 "sev_snp".to_string(), 335 #[cfg(feature = "tdx")] 336 "tdx".to_string(), 337 #[cfg(feature = "tracing")] 338 "tracing".to_string(), 339 ] 340 } 341 342 pub fn start_event_monitor_thread( 343 mut monitor: event_monitor::Monitor, 344 seccomp_action: &SeccompAction, 345 landlock_enable: bool, 346 hypervisor_type: hypervisor::HypervisorType, 347 exit_event: EventFd, 348 ) -> Result<thread::JoinHandle<Result<()>>> { 349 // Retrieve seccomp filter 350 let seccomp_filter = get_seccomp_filter(seccomp_action, Thread::EventMonitor, hypervisor_type) 351 .map_err(Error::CreateSeccompFilter)?; 352 353 thread::Builder::new() 354 .name("event-monitor".to_owned()) 355 .spawn(move || { 356 // Apply seccomp filter 357 if !seccomp_filter.is_empty() { 358 apply_filter(&seccomp_filter) 359 .map_err(Error::ApplySeccompFilter) 360 .map_err(|e| { 361 error!("Error applying seccomp filter: {:?}", e); 362 exit_event.write(1).ok(); 363 e 364 })?; 365 } 366 if landlock_enable { 367 Landlock::new() 368 .map_err(Error::CreateLandlock)? 369 .restrict_self() 370 .map_err(Error::ApplyLandlock) 371 .map_err(|e| { 372 error!("Error applying landlock to event monitor thread: {:?}", e); 373 exit_event.write(1).ok(); 374 e 375 })?; 376 } 377 378 std::panic::catch_unwind(AssertUnwindSafe(move || { 379 while let Ok(event) = monitor.rx.recv() { 380 let event = Arc::new(event); 381 382 if let Some(ref mut file) = monitor.file { 383 file.write_all(event.as_bytes().as_ref()).ok(); 384 file.write_all(b"\n\n").ok(); 385 } 386 387 for tx in monitor.broadcast.iter() { 388 tx.send(event.clone()).ok(); 389 } 390 } 391 })) 392 .map_err(|_| { 393 error!("`event-monitor` thread panicked"); 394 exit_event.write(1).ok(); 395 }) 396 .ok(); 397 398 Ok(()) 399 }) 400 .map_err(Error::EventMonitorThreadSpawn) 401 } 402 403 #[allow(unused_variables)] 404 #[allow(clippy::too_many_arguments)] 405 pub fn start_vmm_thread( 406 vmm_version: VmmVersionInfo, 407 http_path: &Option<String>, 408 http_fd: Option<RawFd>, 409 #[cfg(feature = "dbus_api")] dbus_options: Option<DBusApiOptions>, 410 api_event: EventFd, 411 api_sender: Sender<ApiRequest>, 412 api_receiver: Receiver<ApiRequest>, 413 #[cfg(feature = "guest_debug")] debug_path: Option<PathBuf>, 414 #[cfg(feature = "guest_debug")] debug_event: EventFd, 415 #[cfg(feature = "guest_debug")] vm_debug_event: EventFd, 416 exit_event: EventFd, 417 seccomp_action: &SeccompAction, 418 hypervisor: Arc<dyn hypervisor::Hypervisor>, 419 landlock_enable: bool, 420 landlock_config: Option<Vec<LandlockConfig>>, 421 ) -> Result<VmmThreadHandle> { 422 #[cfg(feature = "guest_debug")] 423 let gdb_hw_breakpoints = hypervisor.get_guest_debug_hw_bps(); 424 #[cfg(feature = "guest_debug")] 425 let (gdb_sender, gdb_receiver) = std::sync::mpsc::channel(); 426 #[cfg(feature = "guest_debug")] 427 let gdb_debug_event = debug_event.try_clone().map_err(Error::EventFdClone)?; 428 #[cfg(feature = "guest_debug")] 429 let gdb_vm_debug_event = vm_debug_event.try_clone().map_err(Error::EventFdClone)?; 430 431 let api_event_clone = api_event.try_clone().map_err(Error::EventFdClone)?; 432 let hypervisor_type = hypervisor.hypervisor_type(); 433 434 // Retrieve seccomp filter 435 let vmm_seccomp_filter = get_seccomp_filter(seccomp_action, Thread::Vmm, hypervisor_type) 436 .map_err(Error::CreateSeccompFilter)?; 437 438 let vmm_seccomp_action = seccomp_action.clone(); 439 let thread = { 440 let exit_event = exit_event.try_clone().map_err(Error::EventFdClone)?; 441 thread::Builder::new() 442 .name("vmm".to_string()) 443 .spawn(move || { 444 // Apply seccomp filter for VMM thread. 445 if !vmm_seccomp_filter.is_empty() { 446 apply_filter(&vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?; 447 } 448 449 let mut vmm = Vmm::new( 450 vmm_version, 451 api_event, 452 #[cfg(feature = "guest_debug")] 453 debug_event, 454 #[cfg(feature = "guest_debug")] 455 vm_debug_event, 456 vmm_seccomp_action, 457 hypervisor, 458 exit_event, 459 landlock_enable, 460 landlock_config, 461 )?; 462 463 vmm.setup_signal_handler(landlock_enable)?; 464 465 vmm.control_loop( 466 Rc::new(api_receiver), 467 #[cfg(feature = "guest_debug")] 468 Rc::new(gdb_receiver), 469 ) 470 }) 471 .map_err(Error::VmmThreadSpawn)? 472 }; 473 474 // The VMM thread is started, we can start the dbus thread 475 // and start serving HTTP requests 476 #[cfg(feature = "dbus_api")] 477 let dbus_shutdown_chs = match dbus_options { 478 Some(opts) => { 479 let (_, chs) = api::start_dbus_thread( 480 opts, 481 api_event_clone.try_clone().map_err(Error::EventFdClone)?, 482 api_sender.clone(), 483 seccomp_action, 484 exit_event.try_clone().map_err(Error::EventFdClone)?, 485 hypervisor_type, 486 )?; 487 Some(chs) 488 } 489 None => None, 490 }; 491 492 let http_api_handle = if let Some(http_path) = http_path { 493 Some(api::start_http_path_thread( 494 http_path, 495 api_event_clone, 496 api_sender, 497 seccomp_action, 498 exit_event, 499 hypervisor_type, 500 landlock_enable, 501 )?) 502 } else if let Some(http_fd) = http_fd { 503 Some(api::start_http_fd_thread( 504 http_fd, 505 api_event_clone, 506 api_sender, 507 seccomp_action, 508 exit_event, 509 hypervisor_type, 510 landlock_enable, 511 )?) 512 } else { 513 None 514 }; 515 516 #[cfg(feature = "guest_debug")] 517 if let Some(debug_path) = debug_path { 518 let target = gdb::GdbStub::new( 519 gdb_sender, 520 gdb_debug_event, 521 gdb_vm_debug_event, 522 gdb_hw_breakpoints, 523 ); 524 thread::Builder::new() 525 .name("gdb".to_owned()) 526 .spawn(move || gdb::gdb_thread(target, &debug_path)) 527 .map_err(Error::GdbThreadSpawn)?; 528 } 529 530 Ok(VmmThreadHandle { 531 thread_handle: thread, 532 #[cfg(feature = "dbus_api")] 533 dbus_shutdown_chs, 534 http_api_handle, 535 }) 536 } 537 538 #[derive(Clone, Deserialize, Serialize)] 539 struct VmMigrationConfig { 540 vm_config: Arc<Mutex<VmConfig>>, 541 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 542 common_cpuid: Vec<hypervisor::arch::x86::CpuIdEntry>, 543 memory_manager_data: MemoryManagerSnapshotData, 544 } 545 546 #[derive(Debug, Clone)] 547 pub struct VmmVersionInfo { 548 pub build_version: String, 549 pub version: String, 550 } 551 552 impl VmmVersionInfo { 553 pub fn new(build_version: &str, version: &str) -> Self { 554 Self { 555 build_version: build_version.to_owned(), 556 version: version.to_owned(), 557 } 558 } 559 } 560 561 pub struct VmmThreadHandle { 562 pub thread_handle: thread::JoinHandle<Result<()>>, 563 #[cfg(feature = "dbus_api")] 564 pub dbus_shutdown_chs: Option<DBusApiShutdownChannels>, 565 pub http_api_handle: Option<HttpApiHandle>, 566 } 567 568 pub struct Vmm { 569 epoll: EpollContext, 570 exit_evt: EventFd, 571 reset_evt: EventFd, 572 api_evt: EventFd, 573 #[cfg(feature = "guest_debug")] 574 debug_evt: EventFd, 575 #[cfg(feature = "guest_debug")] 576 vm_debug_evt: EventFd, 577 version: VmmVersionInfo, 578 vm: Option<Vm>, 579 vm_config: Option<Arc<Mutex<VmConfig>>>, 580 seccomp_action: SeccompAction, 581 hypervisor: Arc<dyn hypervisor::Hypervisor>, 582 activate_evt: EventFd, 583 signals: Option<Handle>, 584 threads: Vec<thread::JoinHandle<()>>, 585 original_termios_opt: Arc<Mutex<Option<termios>>>, 586 console_resize_pipe: Option<File>, 587 console_info: Option<ConsoleInfo>, 588 landlock_enable: bool, 589 landlock_config: Option<Vec<LandlockConfig>>, 590 } 591 592 impl Vmm { 593 pub const HANDLED_SIGNALS: [i32; 2] = [SIGTERM, SIGINT]; 594 595 fn signal_handler( 596 mut signals: Signals, 597 original_termios_opt: Arc<Mutex<Option<termios>>>, 598 exit_evt: &EventFd, 599 ) { 600 for sig in &Self::HANDLED_SIGNALS { 601 unblock_signal(*sig).unwrap(); 602 } 603 604 for signal in signals.forever() { 605 match signal { 606 SIGTERM | SIGINT => { 607 if exit_evt.write(1).is_err() { 608 // Resetting the terminal is usually done as the VMM exits 609 if let Ok(lock) = original_termios_opt.lock() { 610 if let Some(termios) = *lock { 611 // SAFETY: FFI call 612 let _ = unsafe { 613 tcsetattr(stdout().lock().as_raw_fd(), TCSANOW, &termios) 614 }; 615 } 616 } else { 617 warn!("Failed to lock original termios"); 618 } 619 620 std::process::exit(1); 621 } 622 } 623 _ => (), 624 } 625 } 626 } 627 628 fn setup_signal_handler(&mut self, landlock_enable: bool) -> Result<()> { 629 let signals = Signals::new(Self::HANDLED_SIGNALS); 630 match signals { 631 Ok(signals) => { 632 self.signals = Some(signals.handle()); 633 let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?; 634 let original_termios_opt = Arc::clone(&self.original_termios_opt); 635 636 let signal_handler_seccomp_filter = get_seccomp_filter( 637 &self.seccomp_action, 638 Thread::SignalHandler, 639 self.hypervisor.hypervisor_type(), 640 ) 641 .map_err(Error::CreateSeccompFilter)?; 642 self.threads.push( 643 thread::Builder::new() 644 .name("vmm_signal_handler".to_string()) 645 .spawn(move || { 646 if !signal_handler_seccomp_filter.is_empty() { 647 if let Err(e) = apply_filter(&signal_handler_seccomp_filter) 648 .map_err(Error::ApplySeccompFilter) 649 { 650 error!("Error applying seccomp filter: {:?}", e); 651 exit_evt.write(1).ok(); 652 return; 653 } 654 } 655 if landlock_enable{ 656 match Landlock::new() { 657 Ok(landlock) => { 658 let _ = landlock.restrict_self().map_err(Error::ApplyLandlock).map_err(|e| { 659 error!("Error applying Landlock to signal handler thread: {:?}", e); 660 exit_evt.write(1).ok(); 661 }); 662 } 663 Err(e) => { 664 error!("Error creating Landlock object: {:?}", e); 665 exit_evt.write(1).ok(); 666 } 667 }; 668 } 669 670 std::panic::catch_unwind(AssertUnwindSafe(|| { 671 Vmm::signal_handler(signals, original_termios_opt, &exit_evt); 672 })) 673 .map_err(|_| { 674 error!("vmm signal_handler thread panicked"); 675 exit_evt.write(1).ok() 676 }) 677 .ok(); 678 }) 679 .map_err(Error::SignalHandlerSpawn)?, 680 ); 681 } 682 Err(e) => error!("Signal not found {}", e), 683 } 684 Ok(()) 685 } 686 687 #[allow(clippy::too_many_arguments)] 688 fn new( 689 vmm_version: VmmVersionInfo, 690 api_evt: EventFd, 691 #[cfg(feature = "guest_debug")] debug_evt: EventFd, 692 #[cfg(feature = "guest_debug")] vm_debug_evt: EventFd, 693 seccomp_action: SeccompAction, 694 hypervisor: Arc<dyn hypervisor::Hypervisor>, 695 exit_evt: EventFd, 696 landlock_enable: bool, 697 landlock_config: Option<Vec<LandlockConfig>>, 698 ) -> Result<Self> { 699 let mut epoll = EpollContext::new().map_err(Error::Epoll)?; 700 let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 701 let activate_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 702 703 epoll 704 .add_event(&exit_evt, EpollDispatch::Exit) 705 .map_err(Error::Epoll)?; 706 707 epoll 708 .add_event(&reset_evt, EpollDispatch::Reset) 709 .map_err(Error::Epoll)?; 710 711 epoll 712 .add_event(&activate_evt, EpollDispatch::ActivateVirtioDevices) 713 .map_err(Error::Epoll)?; 714 715 epoll 716 .add_event(&api_evt, EpollDispatch::Api) 717 .map_err(Error::Epoll)?; 718 719 #[cfg(feature = "guest_debug")] 720 epoll 721 .add_event(&debug_evt, EpollDispatch::Debug) 722 .map_err(Error::Epoll)?; 723 724 Ok(Vmm { 725 epoll, 726 exit_evt, 727 reset_evt, 728 api_evt, 729 #[cfg(feature = "guest_debug")] 730 debug_evt, 731 #[cfg(feature = "guest_debug")] 732 vm_debug_evt, 733 version: vmm_version, 734 vm: None, 735 vm_config: None, 736 seccomp_action, 737 hypervisor, 738 activate_evt, 739 signals: None, 740 threads: vec![], 741 original_termios_opt: Arc::new(Mutex::new(None)), 742 console_resize_pipe: None, 743 console_info: None, 744 landlock_enable, 745 landlock_config, 746 }) 747 } 748 749 fn vm_receive_config<T>( 750 &mut self, 751 req: &Request, 752 socket: &mut T, 753 existing_memory_files: Option<HashMap<u32, File>>, 754 ) -> std::result::Result<Arc<Mutex<MemoryManager>>, MigratableError> 755 where 756 T: Read + Write, 757 { 758 // Read in config data along with memory manager data 759 let mut data: Vec<u8> = Vec::new(); 760 data.resize_with(req.length() as usize, Default::default); 761 socket 762 .read_exact(&mut data) 763 .map_err(MigratableError::MigrateSocket)?; 764 765 let vm_migration_config: VmMigrationConfig = 766 serde_json::from_slice(&data).map_err(|e| { 767 MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e)) 768 })?; 769 770 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 771 self.vm_check_cpuid_compatibility( 772 &vm_migration_config.vm_config, 773 &vm_migration_config.common_cpuid, 774 )?; 775 776 let config = vm_migration_config.vm_config.clone(); 777 self.vm_config = Some(vm_migration_config.vm_config); 778 self.console_info = Some(pre_create_console_devices(self).map_err(|e| { 779 MigratableError::MigrateReceive(anyhow!("Error creating console devices: {:?}", e)) 780 })?); 781 782 if self.landlock_enable { 783 apply_landlock( 784 &self.vm_config.as_ref().unwrap().clone(), 785 &self.landlock_config, 786 ) 787 .map_err(|e| { 788 MigratableError::MigrateReceive(anyhow!("Error applying landlock: {:?}", e)) 789 })?; 790 } 791 792 let vm = Vm::create_hypervisor_vm( 793 &self.hypervisor, 794 #[cfg(feature = "tdx")] 795 false, 796 #[cfg(feature = "sev_snp")] 797 false, 798 ) 799 .map_err(|e| { 800 MigratableError::MigrateReceive(anyhow!( 801 "Error creating hypervisor VM from snapshot: {:?}", 802 e 803 )) 804 })?; 805 806 let phys_bits = 807 vm::physical_bits(&self.hypervisor, config.lock().unwrap().cpus.max_phys_bits); 808 809 let memory_manager = MemoryManager::new( 810 vm, 811 &config.lock().unwrap().memory.clone(), 812 None, 813 phys_bits, 814 #[cfg(feature = "tdx")] 815 false, 816 Some(&vm_migration_config.memory_manager_data), 817 existing_memory_files, 818 #[cfg(target_arch = "x86_64")] 819 None, 820 ) 821 .map_err(|e| { 822 MigratableError::MigrateReceive(anyhow!( 823 "Error creating MemoryManager from snapshot: {:?}", 824 e 825 )) 826 })?; 827 828 Response::ok().write_to(socket)?; 829 830 Ok(memory_manager) 831 } 832 833 fn vm_receive_state<T>( 834 &mut self, 835 req: &Request, 836 socket: &mut T, 837 mm: Arc<Mutex<MemoryManager>>, 838 ) -> std::result::Result<(), MigratableError> 839 where 840 T: Read + Write, 841 { 842 // Read in state data 843 let mut data: Vec<u8> = Vec::new(); 844 data.resize_with(req.length() as usize, Default::default); 845 socket 846 .read_exact(&mut data) 847 .map_err(MigratableError::MigrateSocket)?; 848 let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| { 849 MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e)) 850 })?; 851 852 let exit_evt = self.exit_evt.try_clone().map_err(|e| { 853 MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e)) 854 })?; 855 let reset_evt = self.reset_evt.try_clone().map_err(|e| { 856 MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e)) 857 })?; 858 #[cfg(feature = "guest_debug")] 859 let debug_evt = self.vm_debug_evt.try_clone().map_err(|e| { 860 MigratableError::MigrateReceive(anyhow!("Error cloning debug EventFd: {}", e)) 861 })?; 862 let activate_evt = self.activate_evt.try_clone().map_err(|e| { 863 MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e)) 864 })?; 865 866 let timestamp = Instant::now(); 867 let hypervisor_vm = mm.lock().unwrap().vm.clone(); 868 let mut vm = Vm::new_from_memory_manager( 869 self.vm_config.clone().unwrap(), 870 mm, 871 hypervisor_vm, 872 exit_evt, 873 reset_evt, 874 #[cfg(feature = "guest_debug")] 875 debug_evt, 876 &self.seccomp_action, 877 self.hypervisor.clone(), 878 activate_evt, 879 timestamp, 880 self.console_info.clone(), 881 None, 882 Arc::clone(&self.original_termios_opt), 883 Some(snapshot), 884 ) 885 .map_err(|e| { 886 MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e)) 887 })?; 888 889 // Create VM 890 vm.restore().map_err(|e| { 891 Response::error().write_to(socket).ok(); 892 MigratableError::MigrateReceive(anyhow!("Failed restoring the Vm: {}", e)) 893 })?; 894 self.vm = Some(vm); 895 896 Response::ok().write_to(socket)?; 897 898 Ok(()) 899 } 900 901 fn vm_receive_memory<T>( 902 &mut self, 903 req: &Request, 904 socket: &mut T, 905 memory_manager: &mut MemoryManager, 906 ) -> std::result::Result<(), MigratableError> 907 where 908 T: Read + ReadVolatile + Write, 909 { 910 // Read table 911 let table = MemoryRangeTable::read_from(socket, req.length())?; 912 913 // And then read the memory itself 914 memory_manager 915 .receive_memory_regions(&table, socket) 916 .map_err(|e| { 917 Response::error().write_to(socket).ok(); 918 e 919 })?; 920 Response::ok().write_to(socket)?; 921 Ok(()) 922 } 923 924 fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> { 925 url.strip_prefix("unix:") 926 .ok_or_else(|| { 927 MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url)) 928 }) 929 .map(|s| s.into()) 930 } 931 932 // Returns true if there were dirty pages to send 933 fn vm_maybe_send_dirty_pages<T>( 934 vm: &mut Vm, 935 socket: &mut T, 936 ) -> result::Result<bool, MigratableError> 937 where 938 T: Read + Write + WriteVolatile, 939 { 940 // Send (dirty) memory table 941 let table = vm.dirty_log()?; 942 943 // But if there are no regions go straight to pause 944 if table.regions().is_empty() { 945 return Ok(false); 946 } 947 948 Request::memory(table.length()).write_to(socket).unwrap(); 949 table.write_to(socket)?; 950 // And then the memory itself 951 vm.send_memory_regions(&table, socket)?; 952 Response::read_from(socket)?.ok_or_abandon( 953 socket, 954 MigratableError::MigrateSend(anyhow!("Error during dirty memory migration")), 955 )?; 956 957 Ok(true) 958 } 959 960 fn send_migration( 961 vm: &mut Vm, 962 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc< 963 dyn hypervisor::Hypervisor, 964 >, 965 send_data_migration: VmSendMigrationData, 966 ) -> result::Result<(), MigratableError> { 967 let path = Self::socket_url_to_path(&send_data_migration.destination_url)?; 968 let mut socket = UnixStream::connect(path).map_err(|e| { 969 MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e)) 970 })?; 971 972 // Start the migration 973 Request::start().write_to(&mut socket)?; 974 Response::read_from(&mut socket)?.ok_or_abandon( 975 &mut socket, 976 MigratableError::MigrateSend(anyhow!("Error starting migration")), 977 )?; 978 979 // Send config 980 let vm_config = vm.get_config(); 981 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 982 let common_cpuid = { 983 #[cfg(feature = "tdx")] 984 if vm_config.lock().unwrap().is_tdx_enabled() { 985 return Err(MigratableError::MigrateSend(anyhow!( 986 "Live Migration is not supported when TDX is enabled" 987 ))); 988 }; 989 990 let amx = vm_config.lock().unwrap().cpus.features.amx; 991 let phys_bits = 992 vm::physical_bits(&hypervisor, vm_config.lock().unwrap().cpus.max_phys_bits); 993 arch::generate_common_cpuid( 994 &hypervisor, 995 &arch::CpuidConfig { 996 sgx_epc_sections: None, 997 phys_bits, 998 kvm_hyperv: vm_config.lock().unwrap().cpus.kvm_hyperv, 999 #[cfg(feature = "tdx")] 1000 tdx: false, 1001 amx, 1002 }, 1003 ) 1004 .map_err(|e| { 1005 MigratableError::MigrateSend(anyhow!("Error generating common cpuid': {:?}", e)) 1006 })? 1007 }; 1008 1009 if send_data_migration.local { 1010 vm.send_memory_fds(&mut socket)?; 1011 } 1012 1013 let vm_migration_config = VmMigrationConfig { 1014 vm_config, 1015 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1016 common_cpuid, 1017 memory_manager_data: vm.memory_manager_data(), 1018 }; 1019 let config_data = serde_json::to_vec(&vm_migration_config).unwrap(); 1020 Request::config(config_data.len() as u64).write_to(&mut socket)?; 1021 socket 1022 .write_all(&config_data) 1023 .map_err(MigratableError::MigrateSocket)?; 1024 Response::read_from(&mut socket)?.ok_or_abandon( 1025 &mut socket, 1026 MigratableError::MigrateSend(anyhow!("Error during config migration")), 1027 )?; 1028 1029 // Let every Migratable object know about the migration being started. 1030 vm.start_migration()?; 1031 1032 if send_data_migration.local { 1033 // Now pause VM 1034 vm.pause()?; 1035 } else { 1036 // Start logging dirty pages 1037 vm.start_dirty_log()?; 1038 1039 // Send memory table 1040 let table = vm.memory_range_table()?; 1041 Request::memory(table.length()) 1042 .write_to(&mut socket) 1043 .unwrap(); 1044 table.write_to(&mut socket)?; 1045 // And then the memory itself 1046 vm.send_memory_regions(&table, &mut socket)?; 1047 Response::read_from(&mut socket)?.ok_or_abandon( 1048 &mut socket, 1049 MigratableError::MigrateSend(anyhow!("Error during dirty memory migration")), 1050 )?; 1051 1052 // Try at most 5 passes of dirty memory sending 1053 const MAX_DIRTY_MIGRATIONS: usize = 5; 1054 for i in 0..MAX_DIRTY_MIGRATIONS { 1055 info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS); 1056 if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? { 1057 break; 1058 } 1059 } 1060 1061 // Now pause VM 1062 vm.pause()?; 1063 1064 // Send last batch of dirty pages 1065 Self::vm_maybe_send_dirty_pages(vm, &mut socket)?; 1066 1067 // Stop logging dirty pages 1068 vm.stop_dirty_log()?; 1069 } 1070 // Capture snapshot and send it 1071 let vm_snapshot = vm.snapshot()?; 1072 let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap(); 1073 Request::state(snapshot_data.len() as u64).write_to(&mut socket)?; 1074 socket 1075 .write_all(&snapshot_data) 1076 .map_err(MigratableError::MigrateSocket)?; 1077 Response::read_from(&mut socket)?.ok_or_abandon( 1078 &mut socket, 1079 MigratableError::MigrateSend(anyhow!("Error during state migration")), 1080 )?; 1081 // Complete the migration 1082 Request::complete().write_to(&mut socket)?; 1083 Response::read_from(&mut socket)?.ok_or_abandon( 1084 &mut socket, 1085 MigratableError::MigrateSend(anyhow!("Error completing migration")), 1086 )?; 1087 1088 info!("Migration complete"); 1089 1090 // Let every Migratable object know about the migration being complete 1091 vm.complete_migration() 1092 } 1093 1094 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1095 fn vm_check_cpuid_compatibility( 1096 &self, 1097 src_vm_config: &Arc<Mutex<VmConfig>>, 1098 src_vm_cpuid: &[hypervisor::arch::x86::CpuIdEntry], 1099 ) -> result::Result<(), MigratableError> { 1100 #[cfg(feature = "tdx")] 1101 if src_vm_config.lock().unwrap().is_tdx_enabled() { 1102 return Err(MigratableError::MigrateReceive(anyhow!( 1103 "Live Migration is not supported when TDX is enabled" 1104 ))); 1105 }; 1106 1107 // We check the `CPUID` compatibility of between the source vm and destination, which is 1108 // mostly about feature compatibility and "topology/sgx" leaves are not relevant. 1109 let dest_cpuid = &{ 1110 let vm_config = &src_vm_config.lock().unwrap(); 1111 1112 let phys_bits = vm::physical_bits(&self.hypervisor, vm_config.cpus.max_phys_bits); 1113 arch::generate_common_cpuid( 1114 &self.hypervisor.clone(), 1115 &arch::CpuidConfig { 1116 sgx_epc_sections: None, 1117 phys_bits, 1118 kvm_hyperv: vm_config.cpus.kvm_hyperv, 1119 #[cfg(feature = "tdx")] 1120 tdx: false, 1121 amx: vm_config.cpus.features.amx, 1122 }, 1123 ) 1124 .map_err(|e| { 1125 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e)) 1126 })? 1127 }; 1128 arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| { 1129 MigratableError::MigrateReceive(anyhow!( 1130 "Error checking cpu feature compatibility': {:?}", 1131 e 1132 )) 1133 }) 1134 } 1135 1136 fn control_loop( 1137 &mut self, 1138 api_receiver: Rc<Receiver<ApiRequest>>, 1139 #[cfg(feature = "guest_debug")] gdb_receiver: Rc<Receiver<gdb::GdbRequest>>, 1140 ) -> Result<()> { 1141 const EPOLL_EVENTS_LEN: usize = 100; 1142 1143 let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN]; 1144 let epoll_fd = self.epoll.as_raw_fd(); 1145 1146 'outer: loop { 1147 let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { 1148 Ok(res) => res, 1149 Err(e) => { 1150 if e.kind() == io::ErrorKind::Interrupted { 1151 // It's well defined from the epoll_wait() syscall 1152 // documentation that the epoll loop can be interrupted 1153 // before any of the requested events occurred or the 1154 // timeout expired. In both those cases, epoll_wait() 1155 // returns an error of type EINTR, but this should not 1156 // be considered as a regular error. Instead it is more 1157 // appropriate to retry, by calling into epoll_wait(). 1158 continue; 1159 } 1160 return Err(Error::Epoll(e)); 1161 } 1162 }; 1163 1164 for event in events.iter().take(num_events) { 1165 let dispatch_event: EpollDispatch = event.data.into(); 1166 match dispatch_event { 1167 EpollDispatch::Unknown => { 1168 let event = event.data; 1169 warn!("Unknown VMM loop event: {}", event); 1170 } 1171 EpollDispatch::Exit => { 1172 info!("VM exit event"); 1173 // Consume the event. 1174 self.exit_evt.read().map_err(Error::EventFdRead)?; 1175 self.vmm_shutdown().map_err(Error::VmmShutdown)?; 1176 1177 break 'outer; 1178 } 1179 EpollDispatch::Reset => { 1180 info!("VM reset event"); 1181 // Consume the event. 1182 self.reset_evt.read().map_err(Error::EventFdRead)?; 1183 self.vm_reboot().map_err(Error::VmReboot)?; 1184 } 1185 EpollDispatch::ActivateVirtioDevices => { 1186 if let Some(ref vm) = self.vm { 1187 let count = self.activate_evt.read().map_err(Error::EventFdRead)?; 1188 info!( 1189 "Trying to activate pending virtio devices: count = {}", 1190 count 1191 ); 1192 vm.activate_virtio_devices() 1193 .map_err(Error::ActivateVirtioDevices)?; 1194 } 1195 } 1196 EpollDispatch::Api => { 1197 // Consume the events. 1198 for _ in 0..self.api_evt.read().map_err(Error::EventFdRead)? { 1199 // Read from the API receiver channel 1200 let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?; 1201 1202 if api_request(self)? { 1203 break 'outer; 1204 } 1205 } 1206 } 1207 #[cfg(feature = "guest_debug")] 1208 EpollDispatch::Debug => { 1209 // Consume the events. 1210 for _ in 0..self.debug_evt.read().map_err(Error::EventFdRead)? { 1211 // Read from the API receiver channel 1212 let gdb_request = gdb_receiver.recv().map_err(Error::GdbRequestRecv)?; 1213 1214 let response = if let Some(ref mut vm) = self.vm { 1215 vm.debug_request(&gdb_request.payload, gdb_request.cpu_id) 1216 } else { 1217 Err(VmError::VmNotRunning) 1218 } 1219 .map_err(gdb::Error::Vm); 1220 1221 gdb_request 1222 .sender 1223 .send(response) 1224 .map_err(Error::GdbResponseSend)?; 1225 } 1226 } 1227 #[cfg(not(feature = "guest_debug"))] 1228 EpollDispatch::Debug => {} 1229 } 1230 } 1231 } 1232 1233 // Trigger the termination of the signal_handler thread 1234 if let Some(signals) = self.signals.take() { 1235 signals.close(); 1236 } 1237 1238 // Wait for all the threads to finish 1239 for thread in self.threads.drain(..) { 1240 thread.join().map_err(Error::ThreadCleanup)? 1241 } 1242 1243 Ok(()) 1244 } 1245 } 1246 1247 pub type LandlockResult<T> = result::Result<T, LandlockError>; 1248 /// Trait to apply Landlock on VmConfig elements 1249 pub(crate) trait ApplyLandlock { 1250 /// Apply Landlock rules to file paths 1251 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()>; 1252 } 1253 1254 impl ApplyLandlock for MemoryZoneConfig { 1255 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1256 if let Some(file) = &self.file { 1257 landlock.add_rule_with_access(file.to_path_buf(), "rw")?; 1258 } 1259 Ok(()) 1260 } 1261 } 1262 1263 impl ApplyLandlock for DiskConfig { 1264 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1265 if let Some(path) = &self.path { 1266 landlock.add_rule_with_access(path.to_path_buf(), "rw")?; 1267 } 1268 Ok(()) 1269 } 1270 } 1271 1272 impl ApplyLandlock for RngConfig { 1273 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1274 // Rng Path only need read access 1275 landlock.add_rule_with_access(self.src.to_path_buf(), "r")?; 1276 Ok(()) 1277 } 1278 } 1279 1280 impl ApplyLandlock for FsConfig { 1281 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1282 landlock.add_rule_with_access(self.socket.to_path_buf(), "rw")?; 1283 Ok(()) 1284 } 1285 } 1286 1287 impl ApplyLandlock for PmemConfig { 1288 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1289 landlock.add_rule_with_access(self.file.to_path_buf(), "rw")?; 1290 Ok(()) 1291 } 1292 } 1293 1294 impl ApplyLandlock for ConsoleConfig { 1295 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1296 if let Some(file) = &self.file { 1297 landlock.add_rule_with_access(file.to_path_buf(), "rw")?; 1298 } 1299 if let Some(socket) = &self.socket { 1300 landlock.add_rule_with_access(socket.to_path_buf(), "rw")?; 1301 } 1302 Ok(()) 1303 } 1304 } 1305 1306 #[cfg(target_arch = "x86_64")] 1307 impl ApplyLandlock for DebugConsoleConfig { 1308 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1309 if let Some(file) = &self.file { 1310 landlock.add_rule_with_access(file.to_path_buf(), "rw")?; 1311 } 1312 Ok(()) 1313 } 1314 } 1315 1316 impl ApplyLandlock for DeviceConfig { 1317 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1318 let device_path = fs::read_link(self.path.as_path()).map_err(LandlockError::OpenPath)?; 1319 let iommu_group = device_path.file_name(); 1320 let iommu_group_str = iommu_group 1321 .ok_or(LandlockError::InvalidPath)? 1322 .to_str() 1323 .ok_or(LandlockError::InvalidPath)?; 1324 1325 let vfio_group_path = "/dev/vfio/".to_owned() + iommu_group_str; 1326 landlock.add_rule_with_access(vfio_group_path.into(), "rw")?; 1327 1328 Ok(()) 1329 } 1330 } 1331 1332 impl ApplyLandlock for UserDeviceConfig { 1333 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1334 landlock.add_rule_with_access(self.socket.to_path_buf(), "rw")?; 1335 Ok(()) 1336 } 1337 } 1338 1339 impl ApplyLandlock for VdpaConfig { 1340 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1341 landlock.add_rule_with_access(self.path.to_path_buf(), "rw")?; 1342 Ok(()) 1343 } 1344 } 1345 1346 impl ApplyLandlock for VsockConfig { 1347 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1348 landlock.add_rule_with_access(self.socket.to_path_buf(), "rw")?; 1349 Ok(()) 1350 } 1351 } 1352 1353 impl ApplyLandlock for PayloadConfig { 1354 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1355 // Payload only needs read access 1356 if let Some(firmware) = &self.firmware { 1357 landlock.add_rule_with_access(firmware.to_path_buf(), "r")?; 1358 } 1359 1360 if let Some(kernel) = &self.kernel { 1361 landlock.add_rule_with_access(kernel.to_path_buf(), "r")?; 1362 } 1363 1364 if let Some(initramfs) = &self.initramfs { 1365 landlock.add_rule_with_access(initramfs.to_path_buf(), "r")?; 1366 } 1367 1368 #[cfg(feature = "igvm")] 1369 if let Some(igvm) = &self.igvm { 1370 landlock.add_rule_with_access(igvm.to_path_buf(), "r")?; 1371 } 1372 1373 Ok(()) 1374 } 1375 } 1376 1377 impl ApplyLandlock for TpmConfig { 1378 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1379 landlock.add_rule_with_access(self.socket.to_path_buf(), "rw")?; 1380 Ok(()) 1381 } 1382 } 1383 1384 impl ApplyLandlock for LandlockConfig { 1385 fn apply_landlock(&self, landlock: &mut Landlock) -> LandlockResult<()> { 1386 landlock.add_rule_with_access(self.path.to_path_buf(), self.access.clone().as_str())?; 1387 Ok(()) 1388 } 1389 } 1390 1391 fn apply_landlock( 1392 vm_config: &Arc<Mutex<VmConfig>>, 1393 landlock_config: &Option<Vec<LandlockConfig>>, 1394 ) -> LandlockResult<()> { 1395 let vm_config = vm_config.lock().unwrap(); 1396 let mut landlock = Landlock::new()?; 1397 1398 if let Some(mem_zones) = &vm_config.memory.zones { 1399 for zone in mem_zones.iter() { 1400 zone.apply_landlock(&mut landlock)?; 1401 } 1402 } 1403 1404 let disks = &vm_config.disks; 1405 if let Some(disks) = disks { 1406 for disk in disks.iter() { 1407 disk.apply_landlock(&mut landlock)?; 1408 } 1409 } 1410 1411 vm_config.rng.apply_landlock(&mut landlock)?; 1412 1413 if let Some(fs_configs) = &vm_config.fs { 1414 for fs_config in fs_configs.iter() { 1415 fs_config.apply_landlock(&mut landlock)?; 1416 } 1417 } 1418 1419 if let Some(pmem_configs) = &vm_config.pmem { 1420 for pmem_config in pmem_configs.iter() { 1421 pmem_config.apply_landlock(&mut landlock)?; 1422 } 1423 } 1424 1425 vm_config.console.apply_landlock(&mut landlock)?; 1426 vm_config.serial.apply_landlock(&mut landlock)?; 1427 1428 #[cfg(target_arch = "x86_64")] 1429 { 1430 vm_config.debug_console.apply_landlock(&mut landlock)?; 1431 } 1432 1433 if let Some(devices) = &vm_config.devices { 1434 landlock.add_rule_with_access("/dev/vfio/vfio".into(), "rw")?; 1435 1436 for device in devices.iter() { 1437 device.apply_landlock(&mut landlock)?; 1438 } 1439 } 1440 1441 if let Some(user_devices) = &vm_config.user_devices { 1442 for user_devices in user_devices.iter() { 1443 user_devices.apply_landlock(&mut landlock)?; 1444 } 1445 } 1446 1447 if let Some(vdpa_configs) = &vm_config.vdpa { 1448 for vdpa_config in vdpa_configs.iter() { 1449 vdpa_config.apply_landlock(&mut landlock)?; 1450 } 1451 } 1452 1453 if let Some(vsock_config) = &vm_config.vsock { 1454 vsock_config.apply_landlock(&mut landlock)?; 1455 } 1456 1457 if let Some(payload) = &vm_config.payload { 1458 payload.apply_landlock(&mut landlock)?; 1459 } 1460 1461 if let Some(tpm_config) = &vm_config.tpm { 1462 tpm_config.apply_landlock(&mut landlock)?; 1463 } 1464 1465 if vm_config.net.is_some() { 1466 landlock.add_rule_with_access("/dev/net/tun".into(), "rw")?; 1467 } 1468 1469 if let Some(landlock_configs) = landlock_config { 1470 for landlock_config in landlock_configs.iter() { 1471 landlock_config.apply_landlock(&mut landlock)?; 1472 } 1473 } 1474 1475 landlock.restrict_self()?; 1476 1477 Ok(()) 1478 } 1479 1480 impl RequestHandler for Vmm { 1481 fn vm_create(&mut self, config: Arc<Mutex<VmConfig>>) -> result::Result<(), VmError> { 1482 // We only store the passed VM config. 1483 // The VM will be created when being asked to boot it. 1484 if self.vm_config.is_none() { 1485 self.vm_config = Some(config); 1486 self.console_info = 1487 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1488 1489 if self.landlock_enable { 1490 apply_landlock( 1491 &self.vm_config.as_ref().unwrap().clone(), 1492 &self.landlock_config, 1493 ) 1494 .map_err(VmError::ApplyLandlock)?; 1495 } 1496 Ok(()) 1497 } else { 1498 Err(VmError::VmAlreadyCreated) 1499 } 1500 } 1501 1502 fn vm_boot(&mut self) -> result::Result<(), VmError> { 1503 tracer::start(); 1504 info!("Booting VM"); 1505 event!("vm", "booting"); 1506 let r = { 1507 trace_scoped!("vm_boot"); 1508 // If we don't have a config, we cannot boot a VM. 1509 if self.vm_config.is_none() { 1510 return Err(VmError::VmMissingConfig); 1511 }; 1512 1513 // console_info is set to None in vm_shutdown. re-populate here if empty 1514 if self.console_info.is_none() { 1515 self.console_info = 1516 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1517 } 1518 1519 // Create a new VM if we don't have one yet. 1520 if self.vm.is_none() { 1521 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1522 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1523 #[cfg(feature = "guest_debug")] 1524 let vm_debug_evt = self 1525 .vm_debug_evt 1526 .try_clone() 1527 .map_err(VmError::EventFdClone)?; 1528 let activate_evt = self 1529 .activate_evt 1530 .try_clone() 1531 .map_err(VmError::EventFdClone)?; 1532 1533 if let Some(ref vm_config) = self.vm_config { 1534 let vm = Vm::new( 1535 Arc::clone(vm_config), 1536 exit_evt, 1537 reset_evt, 1538 #[cfg(feature = "guest_debug")] 1539 vm_debug_evt, 1540 &self.seccomp_action, 1541 self.hypervisor.clone(), 1542 activate_evt, 1543 self.console_info.clone(), 1544 None, 1545 Arc::clone(&self.original_termios_opt), 1546 None, 1547 None, 1548 None, 1549 )?; 1550 1551 self.vm = Some(vm); 1552 } 1553 } 1554 1555 // Now we can boot the VM. 1556 if let Some(ref mut vm) = self.vm { 1557 vm.boot() 1558 } else { 1559 Err(VmError::VmNotCreated) 1560 } 1561 }; 1562 tracer::end(); 1563 if r.is_ok() { 1564 event!("vm", "booted"); 1565 } 1566 r 1567 } 1568 1569 fn vm_pause(&mut self) -> result::Result<(), VmError> { 1570 if let Some(ref mut vm) = self.vm { 1571 vm.pause().map_err(VmError::Pause) 1572 } else { 1573 Err(VmError::VmNotRunning) 1574 } 1575 } 1576 1577 fn vm_resume(&mut self) -> result::Result<(), VmError> { 1578 if let Some(ref mut vm) = self.vm { 1579 vm.resume().map_err(VmError::Resume) 1580 } else { 1581 Err(VmError::VmNotRunning) 1582 } 1583 } 1584 1585 fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> { 1586 if let Some(ref mut vm) = self.vm { 1587 // Drain console_info so that FDs are not reused 1588 let _ = self.console_info.take(); 1589 vm.snapshot() 1590 .map_err(VmError::Snapshot) 1591 .and_then(|snapshot| { 1592 vm.send(&snapshot, destination_url) 1593 .map_err(VmError::SnapshotSend) 1594 }) 1595 } else { 1596 Err(VmError::VmNotRunning) 1597 } 1598 } 1599 1600 fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> { 1601 if self.vm.is_some() || self.vm_config.is_some() { 1602 return Err(VmError::VmAlreadyCreated); 1603 } 1604 1605 let source_url = restore_cfg.source_url.as_path().to_str(); 1606 if source_url.is_none() { 1607 return Err(VmError::InvalidRestoreSourceUrl); 1608 } 1609 // Safe to unwrap as we checked it was Some(&str). 1610 let source_url = source_url.unwrap(); 1611 1612 let vm_config = Arc::new(Mutex::new( 1613 recv_vm_config(source_url).map_err(VmError::Restore)?, 1614 )); 1615 restore_cfg 1616 .validate(&vm_config.lock().unwrap().clone()) 1617 .map_err(VmError::ConfigValidation)?; 1618 1619 // Update VM's net configurations with new fds received for restore operation 1620 if let (Some(restored_nets), Some(vm_net_configs)) = 1621 (restore_cfg.net_fds, &mut vm_config.lock().unwrap().net) 1622 { 1623 for net in restored_nets.iter() { 1624 for net_config in vm_net_configs.iter_mut() { 1625 // update only if the net dev is backed by FDs 1626 if net_config.id == Some(net.id.clone()) && net_config.fds.is_some() { 1627 net_config.fds.clone_from(&net.fds); 1628 } 1629 } 1630 } 1631 } 1632 1633 let snapshot = recv_vm_state(source_url).map_err(VmError::Restore)?; 1634 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1635 let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?; 1636 1637 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1638 self.vm_check_cpuid_compatibility(&vm_config, &vm_snapshot.common_cpuid) 1639 .map_err(VmError::Restore)?; 1640 1641 self.vm_config = Some(Arc::clone(&vm_config)); 1642 1643 // console_info is set to None in vm_snapshot. re-populate here if empty 1644 if self.console_info.is_none() { 1645 self.console_info = 1646 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1647 } 1648 1649 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1650 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1651 #[cfg(feature = "guest_debug")] 1652 let debug_evt = self 1653 .vm_debug_evt 1654 .try_clone() 1655 .map_err(VmError::EventFdClone)?; 1656 let activate_evt = self 1657 .activate_evt 1658 .try_clone() 1659 .map_err(VmError::EventFdClone)?; 1660 1661 let vm = Vm::new( 1662 vm_config, 1663 exit_evt, 1664 reset_evt, 1665 #[cfg(feature = "guest_debug")] 1666 debug_evt, 1667 &self.seccomp_action, 1668 self.hypervisor.clone(), 1669 activate_evt, 1670 self.console_info.clone(), 1671 None, 1672 Arc::clone(&self.original_termios_opt), 1673 Some(snapshot), 1674 Some(source_url), 1675 Some(restore_cfg.prefault), 1676 )?; 1677 self.vm = Some(vm); 1678 1679 if self.landlock_enable { 1680 apply_landlock( 1681 &self.vm_config.as_ref().unwrap().clone(), 1682 &self.landlock_config, 1683 ) 1684 .map_err(VmError::ApplyLandlock)?; 1685 } 1686 1687 // Now we can restore the rest of the VM. 1688 if let Some(ref mut vm) = self.vm { 1689 vm.restore() 1690 } else { 1691 Err(VmError::VmNotCreated) 1692 } 1693 } 1694 1695 #[cfg(all(target_arch = "x86_64", feature = "guest_debug"))] 1696 fn vm_coredump(&mut self, destination_url: &str) -> result::Result<(), VmError> { 1697 if let Some(ref mut vm) = self.vm { 1698 vm.coredump(destination_url).map_err(VmError::Coredump) 1699 } else { 1700 Err(VmError::VmNotRunning) 1701 } 1702 } 1703 1704 fn vm_shutdown(&mut self) -> result::Result<(), VmError> { 1705 let r = if let Some(ref mut vm) = self.vm.take() { 1706 // Drain console_info so that the FDs are not reused 1707 let _ = self.console_info.take(); 1708 vm.shutdown() 1709 } else { 1710 Err(VmError::VmNotRunning) 1711 }; 1712 1713 if r.is_ok() { 1714 event!("vm", "shutdown"); 1715 } 1716 1717 r 1718 } 1719 1720 fn vm_reboot(&mut self) -> result::Result<(), VmError> { 1721 event!("vm", "rebooting"); 1722 1723 // First we stop the current VM 1724 let (config, console_resize_pipe) = if let Some(mut vm) = self.vm.take() { 1725 let config = vm.get_config(); 1726 let console_resize_pipe = vm 1727 .console_resize_pipe() 1728 .as_ref() 1729 .map(|pipe| pipe.try_clone().unwrap()); 1730 vm.shutdown()?; 1731 (config, console_resize_pipe) 1732 } else { 1733 return Err(VmError::VmNotCreated); 1734 }; 1735 1736 // vm.shutdown() closes all the console devices, so set console_info to None 1737 // so that the closed FD #s are not reused. 1738 let _ = self.console_info.take(); 1739 1740 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 1741 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 1742 #[cfg(feature = "guest_debug")] 1743 let debug_evt = self 1744 .vm_debug_evt 1745 .try_clone() 1746 .map_err(VmError::EventFdClone)?; 1747 let activate_evt = self 1748 .activate_evt 1749 .try_clone() 1750 .map_err(VmError::EventFdClone)?; 1751 1752 // The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be 1753 // an event sitting in the shared reset_evt. Without doing this we get very early reboots 1754 // during the boot process. 1755 if self.reset_evt.read().is_ok() { 1756 warn!("Spurious second reset event received. Ignoring."); 1757 } 1758 1759 self.console_info = 1760 Some(pre_create_console_devices(self).map_err(VmError::CreateConsoleDevices)?); 1761 1762 // Then we create the new VM 1763 let mut vm = Vm::new( 1764 config, 1765 exit_evt, 1766 reset_evt, 1767 #[cfg(feature = "guest_debug")] 1768 debug_evt, 1769 &self.seccomp_action, 1770 self.hypervisor.clone(), 1771 activate_evt, 1772 self.console_info.clone(), 1773 console_resize_pipe, 1774 Arc::clone(&self.original_termios_opt), 1775 None, 1776 None, 1777 None, 1778 )?; 1779 1780 // And we boot it 1781 vm.boot()?; 1782 1783 self.vm = Some(vm); 1784 1785 event!("vm", "rebooted"); 1786 1787 Ok(()) 1788 } 1789 1790 fn vm_info(&self) -> result::Result<VmInfoResponse, VmError> { 1791 match &self.vm_config { 1792 Some(config) => { 1793 let state = match &self.vm { 1794 Some(vm) => vm.get_state()?, 1795 None => VmState::Created, 1796 }; 1797 1798 let config = Arc::clone(config); 1799 1800 let mut memory_actual_size = config.lock().unwrap().memory.total_size(); 1801 if let Some(vm) = &self.vm { 1802 memory_actual_size -= vm.balloon_size(); 1803 } 1804 1805 let device_tree = self.vm.as_ref().map(|vm| vm.device_tree()); 1806 1807 Ok(VmInfoResponse { 1808 config, 1809 state, 1810 memory_actual_size, 1811 device_tree, 1812 }) 1813 } 1814 None => Err(VmError::VmNotCreated), 1815 } 1816 } 1817 1818 fn vmm_ping(&self) -> VmmPingResponse { 1819 let VmmVersionInfo { 1820 build_version, 1821 version, 1822 } = self.version.clone(); 1823 1824 VmmPingResponse { 1825 build_version, 1826 version, 1827 pid: std::process::id() as i64, 1828 features: feature_list(), 1829 } 1830 } 1831 1832 fn vm_delete(&mut self) -> result::Result<(), VmError> { 1833 if self.vm_config.is_none() { 1834 return Ok(()); 1835 } 1836 1837 // If a VM is booted, we first try to shut it down. 1838 if self.vm.is_some() { 1839 self.vm_shutdown()?; 1840 } 1841 1842 self.vm_config = None; 1843 1844 event!("vm", "deleted"); 1845 1846 Ok(()) 1847 } 1848 1849 fn vmm_shutdown(&mut self) -> result::Result<(), VmError> { 1850 self.vm_delete()?; 1851 event!("vmm", "shutdown"); 1852 Ok(()) 1853 } 1854 1855 fn vm_resize( 1856 &mut self, 1857 desired_vcpus: Option<u8>, 1858 desired_ram: Option<u64>, 1859 desired_balloon: Option<u64>, 1860 ) -> result::Result<(), VmError> { 1861 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1862 1863 if let Some(ref mut vm) = self.vm { 1864 if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) { 1865 error!("Error when resizing VM: {:?}", e); 1866 Err(e) 1867 } else { 1868 Ok(()) 1869 } 1870 } else { 1871 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1872 if let Some(desired_vcpus) = desired_vcpus { 1873 config.cpus.boot_vcpus = desired_vcpus; 1874 } 1875 if let Some(desired_ram) = desired_ram { 1876 config.memory.size = desired_ram; 1877 } 1878 if let Some(desired_balloon) = desired_balloon { 1879 if let Some(balloon_config) = &mut config.balloon { 1880 balloon_config.size = desired_balloon; 1881 } 1882 } 1883 Ok(()) 1884 } 1885 } 1886 1887 fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> { 1888 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1889 1890 if let Some(ref mut vm) = self.vm { 1891 if let Err(e) = vm.resize_zone(id, desired_ram) { 1892 error!("Error when resizing VM: {:?}", e); 1893 Err(e) 1894 } else { 1895 Ok(()) 1896 } 1897 } else { 1898 // Update VmConfig by setting the new desired ram. 1899 let memory_config = &mut self.vm_config.as_ref().unwrap().lock().unwrap().memory; 1900 1901 if let Some(zones) = &mut memory_config.zones { 1902 for zone in zones.iter_mut() { 1903 if zone.id == id { 1904 zone.size = desired_ram; 1905 return Ok(()); 1906 } 1907 } 1908 } 1909 1910 error!("Could not find the memory zone {} for the resize", id); 1911 Err(VmError::ResizeZone) 1912 } 1913 } 1914 1915 fn vm_add_device( 1916 &mut self, 1917 device_cfg: DeviceConfig, 1918 ) -> result::Result<Option<Vec<u8>>, VmError> { 1919 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1920 1921 { 1922 // Validate the configuration change in a cloned configuration 1923 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1924 add_to_config(&mut config.devices, device_cfg.clone()); 1925 config.validate().map_err(VmError::ConfigValidation)?; 1926 } 1927 1928 if let Some(ref mut vm) = self.vm { 1929 let info = vm.add_device(device_cfg).map_err(|e| { 1930 error!("Error when adding new device to the VM: {:?}", e); 1931 e 1932 })?; 1933 serde_json::to_vec(&info) 1934 .map(Some) 1935 .map_err(VmError::SerializeJson) 1936 } else { 1937 // Update VmConfig by adding the new device. 1938 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1939 add_to_config(&mut config.devices, device_cfg); 1940 Ok(None) 1941 } 1942 } 1943 1944 fn vm_add_user_device( 1945 &mut self, 1946 device_cfg: UserDeviceConfig, 1947 ) -> result::Result<Option<Vec<u8>>, VmError> { 1948 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1949 1950 { 1951 // Validate the configuration change in a cloned configuration 1952 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1953 add_to_config(&mut config.user_devices, device_cfg.clone()); 1954 config.validate().map_err(VmError::ConfigValidation)?; 1955 } 1956 1957 if let Some(ref mut vm) = self.vm { 1958 let info = vm.add_user_device(device_cfg).map_err(|e| { 1959 error!("Error when adding new user device to the VM: {:?}", e); 1960 e 1961 })?; 1962 serde_json::to_vec(&info) 1963 .map(Some) 1964 .map_err(VmError::SerializeJson) 1965 } else { 1966 // Update VmConfig by adding the new device. 1967 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1968 add_to_config(&mut config.user_devices, device_cfg); 1969 Ok(None) 1970 } 1971 } 1972 1973 fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> { 1974 if let Some(ref mut vm) = self.vm { 1975 if let Err(e) = vm.remove_device(id) { 1976 error!("Error when removing device from the VM: {:?}", e); 1977 Err(e) 1978 } else { 1979 Ok(()) 1980 } 1981 } else if let Some(ref config) = self.vm_config { 1982 let mut config = config.lock().unwrap(); 1983 if config.remove_device(&id) { 1984 Ok(()) 1985 } else { 1986 Err(VmError::NoDeviceToRemove(id)) 1987 } 1988 } else { 1989 Err(VmError::VmNotCreated) 1990 } 1991 } 1992 1993 fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1994 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1995 1996 { 1997 // Validate the configuration change in a cloned configuration 1998 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1999 add_to_config(&mut config.disks, disk_cfg.clone()); 2000 config.validate().map_err(VmError::ConfigValidation)?; 2001 } 2002 2003 if let Some(ref mut vm) = self.vm { 2004 let info = vm.add_disk(disk_cfg).map_err(|e| { 2005 error!("Error when adding new disk to the VM: {:?}", e); 2006 e 2007 })?; 2008 serde_json::to_vec(&info) 2009 .map(Some) 2010 .map_err(VmError::SerializeJson) 2011 } else { 2012 // Update VmConfig by adding the new device. 2013 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2014 add_to_config(&mut config.disks, disk_cfg); 2015 Ok(None) 2016 } 2017 } 2018 2019 fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2020 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2021 2022 { 2023 // Validate the configuration change in a cloned configuration 2024 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2025 add_to_config(&mut config.fs, fs_cfg.clone()); 2026 config.validate().map_err(VmError::ConfigValidation)?; 2027 } 2028 2029 if let Some(ref mut vm) = self.vm { 2030 let info = vm.add_fs(fs_cfg).map_err(|e| { 2031 error!("Error when adding new fs to the VM: {:?}", e); 2032 e 2033 })?; 2034 serde_json::to_vec(&info) 2035 .map(Some) 2036 .map_err(VmError::SerializeJson) 2037 } else { 2038 // Update VmConfig by adding the new device. 2039 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2040 add_to_config(&mut config.fs, fs_cfg); 2041 Ok(None) 2042 } 2043 } 2044 2045 fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2046 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2047 2048 { 2049 // Validate the configuration change in a cloned configuration 2050 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2051 add_to_config(&mut config.pmem, pmem_cfg.clone()); 2052 config.validate().map_err(VmError::ConfigValidation)?; 2053 } 2054 2055 if let Some(ref mut vm) = self.vm { 2056 let info = vm.add_pmem(pmem_cfg).map_err(|e| { 2057 error!("Error when adding new pmem device to the VM: {:?}", e); 2058 e 2059 })?; 2060 serde_json::to_vec(&info) 2061 .map(Some) 2062 .map_err(VmError::SerializeJson) 2063 } else { 2064 // Update VmConfig by adding the new device. 2065 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2066 add_to_config(&mut config.pmem, pmem_cfg); 2067 Ok(None) 2068 } 2069 } 2070 2071 fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2072 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2073 2074 { 2075 // Validate the configuration change in a cloned configuration 2076 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2077 add_to_config(&mut config.net, net_cfg.clone()); 2078 config.validate().map_err(VmError::ConfigValidation)?; 2079 } 2080 2081 if let Some(ref mut vm) = self.vm { 2082 let info = vm.add_net(net_cfg).map_err(|e| { 2083 error!("Error when adding new network device to the VM: {:?}", e); 2084 e 2085 })?; 2086 serde_json::to_vec(&info) 2087 .map(Some) 2088 .map_err(VmError::SerializeJson) 2089 } else { 2090 // Update VmConfig by adding the new device. 2091 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2092 add_to_config(&mut config.net, net_cfg); 2093 Ok(None) 2094 } 2095 } 2096 2097 fn vm_add_vdpa(&mut self, vdpa_cfg: VdpaConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2098 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2099 2100 { 2101 // Validate the configuration change in a cloned configuration 2102 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2103 add_to_config(&mut config.vdpa, vdpa_cfg.clone()); 2104 config.validate().map_err(VmError::ConfigValidation)?; 2105 } 2106 2107 if let Some(ref mut vm) = self.vm { 2108 let info = vm.add_vdpa(vdpa_cfg).map_err(|e| { 2109 error!("Error when adding new vDPA device to the VM: {:?}", e); 2110 e 2111 })?; 2112 serde_json::to_vec(&info) 2113 .map(Some) 2114 .map_err(VmError::SerializeJson) 2115 } else { 2116 // Update VmConfig by adding the new device. 2117 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2118 add_to_config(&mut config.vdpa, vdpa_cfg); 2119 Ok(None) 2120 } 2121 } 2122 2123 fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Option<Vec<u8>>, VmError> { 2124 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 2125 2126 { 2127 // Validate the configuration change in a cloned configuration 2128 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 2129 2130 if config.vsock.is_some() { 2131 return Err(VmError::TooManyVsockDevices); 2132 } 2133 2134 config.vsock = Some(vsock_cfg.clone()); 2135 config.validate().map_err(VmError::ConfigValidation)?; 2136 } 2137 2138 if let Some(ref mut vm) = self.vm { 2139 let info = vm.add_vsock(vsock_cfg).map_err(|e| { 2140 error!("Error when adding new vsock device to the VM: {:?}", e); 2141 e 2142 })?; 2143 serde_json::to_vec(&info) 2144 .map(Some) 2145 .map_err(VmError::SerializeJson) 2146 } else { 2147 // Update VmConfig by adding the new device. 2148 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 2149 config.vsock = Some(vsock_cfg); 2150 Ok(None) 2151 } 2152 } 2153 2154 fn vm_counters(&mut self) -> result::Result<Option<Vec<u8>>, VmError> { 2155 if let Some(ref mut vm) = self.vm { 2156 let info = vm.counters().map_err(|e| { 2157 error!("Error when getting counters from the VM: {:?}", e); 2158 e 2159 })?; 2160 serde_json::to_vec(&info) 2161 .map(Some) 2162 .map_err(VmError::SerializeJson) 2163 } else { 2164 Err(VmError::VmNotRunning) 2165 } 2166 } 2167 2168 fn vm_power_button(&mut self) -> result::Result<(), VmError> { 2169 if let Some(ref mut vm) = self.vm { 2170 vm.power_button() 2171 } else { 2172 Err(VmError::VmNotRunning) 2173 } 2174 } 2175 2176 fn vm_nmi(&mut self) -> result::Result<(), VmError> { 2177 if let Some(ref mut vm) = self.vm { 2178 vm.nmi() 2179 } else { 2180 Err(VmError::VmNotRunning) 2181 } 2182 } 2183 2184 fn vm_receive_migration( 2185 &mut self, 2186 receive_data_migration: VmReceiveMigrationData, 2187 ) -> result::Result<(), MigratableError> { 2188 info!( 2189 "Receiving migration: receiver_url = {}", 2190 receive_data_migration.receiver_url 2191 ); 2192 2193 let path = Self::socket_url_to_path(&receive_data_migration.receiver_url)?; 2194 let listener = UnixListener::bind(&path).map_err(|e| { 2195 MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e)) 2196 })?; 2197 let (mut socket, _addr) = listener.accept().map_err(|e| { 2198 MigratableError::MigrateReceive(anyhow!("Error accepting on UNIX socket: {}", e)) 2199 })?; 2200 std::fs::remove_file(&path).map_err(|e| { 2201 MigratableError::MigrateReceive(anyhow!("Error unlinking UNIX socket: {}", e)) 2202 })?; 2203 2204 let mut started = false; 2205 let mut memory_manager: Option<Arc<Mutex<MemoryManager>>> = None; 2206 let mut existing_memory_files = None; 2207 loop { 2208 let req = Request::read_from(&mut socket)?; 2209 match req.command() { 2210 Command::Invalid => info!("Invalid Command Received"), 2211 Command::Start => { 2212 info!("Start Command Received"); 2213 started = true; 2214 2215 Response::ok().write_to(&mut socket)?; 2216 } 2217 Command::Config => { 2218 info!("Config Command Received"); 2219 2220 if !started { 2221 warn!("Migration not started yet"); 2222 Response::error().write_to(&mut socket)?; 2223 continue; 2224 } 2225 memory_manager = Some(self.vm_receive_config( 2226 &req, 2227 &mut socket, 2228 existing_memory_files.take(), 2229 )?); 2230 } 2231 Command::State => { 2232 info!("State Command Received"); 2233 2234 if !started { 2235 warn!("Migration not started yet"); 2236 Response::error().write_to(&mut socket)?; 2237 continue; 2238 } 2239 if let Some(mm) = memory_manager.take() { 2240 self.vm_receive_state(&req, &mut socket, mm)?; 2241 } else { 2242 warn!("Configuration not sent yet"); 2243 Response::error().write_to(&mut socket)?; 2244 } 2245 } 2246 Command::Memory => { 2247 info!("Memory Command Received"); 2248 2249 if !started { 2250 warn!("Migration not started yet"); 2251 Response::error().write_to(&mut socket)?; 2252 continue; 2253 } 2254 if let Some(mm) = memory_manager.as_ref() { 2255 self.vm_receive_memory(&req, &mut socket, &mut mm.lock().unwrap())?; 2256 } else { 2257 warn!("Configuration not sent yet"); 2258 Response::error().write_to(&mut socket)?; 2259 } 2260 } 2261 Command::MemoryFd => { 2262 info!("MemoryFd Command Received"); 2263 2264 if !started { 2265 warn!("Migration not started yet"); 2266 Response::error().write_to(&mut socket)?; 2267 continue; 2268 } 2269 2270 let mut buf = [0u8; 4]; 2271 let (_, file) = socket.recv_with_fd(&mut buf).map_err(|e| { 2272 MigratableError::MigrateReceive(anyhow!( 2273 "Error receiving slot from socket: {}", 2274 e 2275 )) 2276 })?; 2277 2278 if existing_memory_files.is_none() { 2279 existing_memory_files = Some(HashMap::default()) 2280 } 2281 2282 if let Some(ref mut existing_memory_files) = existing_memory_files { 2283 let slot = u32::from_le_bytes(buf); 2284 existing_memory_files.insert(slot, file.unwrap()); 2285 } 2286 2287 Response::ok().write_to(&mut socket)?; 2288 } 2289 Command::Complete => { 2290 info!("Complete Command Received"); 2291 if let Some(ref mut vm) = self.vm.as_mut() { 2292 vm.resume()?; 2293 Response::ok().write_to(&mut socket)?; 2294 } else { 2295 warn!("VM not created yet"); 2296 Response::error().write_to(&mut socket)?; 2297 } 2298 break; 2299 } 2300 Command::Abandon => { 2301 info!("Abandon Command Received"); 2302 self.vm = None; 2303 self.vm_config = None; 2304 Response::ok().write_to(&mut socket).ok(); 2305 break; 2306 } 2307 } 2308 } 2309 2310 Ok(()) 2311 } 2312 2313 fn vm_send_migration( 2314 &mut self, 2315 send_data_migration: VmSendMigrationData, 2316 ) -> result::Result<(), MigratableError> { 2317 info!( 2318 "Sending migration: destination_url = {}, local = {}", 2319 send_data_migration.destination_url, send_data_migration.local 2320 ); 2321 2322 if !self 2323 .vm_config 2324 .as_ref() 2325 .unwrap() 2326 .lock() 2327 .unwrap() 2328 .backed_by_shared_memory() 2329 && send_data_migration.local 2330 { 2331 return Err(MigratableError::MigrateSend(anyhow!( 2332 "Local migration requires shared memory or hugepages enabled" 2333 ))); 2334 } 2335 2336 if let Some(vm) = self.vm.as_mut() { 2337 Self::send_migration( 2338 vm, 2339 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 2340 self.hypervisor.clone(), 2341 send_data_migration, 2342 ) 2343 .map_err(|migration_err| { 2344 error!("Migration failed: {:?}", migration_err); 2345 2346 // Stop logging dirty pages 2347 if let Err(e) = vm.stop_dirty_log() { 2348 return e; 2349 } 2350 2351 if vm.get_state().unwrap() == VmState::Paused { 2352 if let Err(e) = vm.resume() { 2353 return e; 2354 } 2355 } 2356 2357 migration_err 2358 })?; 2359 2360 // Shutdown the VM after the migration succeeded 2361 self.exit_evt.write(1).map_err(|e| { 2362 MigratableError::MigrateSend(anyhow!( 2363 "Failed shutting down the VM after migration: {:?}", 2364 e 2365 )) 2366 }) 2367 } else { 2368 Err(MigratableError::MigrateSend(anyhow!("VM is not running"))) 2369 } 2370 } 2371 } 2372 2373 const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager"; 2374 const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager"; 2375 const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager"; 2376 2377 #[cfg(test)] 2378 mod unit_tests { 2379 use super::*; 2380 #[cfg(target_arch = "x86_64")] 2381 use crate::config::DebugConsoleConfig; 2382 use config::{ 2383 ConsoleConfig, ConsoleOutputMode, CpusConfig, HotplugMethod, MemoryConfig, PayloadConfig, 2384 RngConfig, 2385 }; 2386 2387 fn create_dummy_vmm() -> Vmm { 2388 Vmm::new( 2389 VmmVersionInfo::new("dummy", "dummy"), 2390 EventFd::new(EFD_NONBLOCK).unwrap(), 2391 #[cfg(feature = "guest_debug")] 2392 EventFd::new(EFD_NONBLOCK).unwrap(), 2393 #[cfg(feature = "guest_debug")] 2394 EventFd::new(EFD_NONBLOCK).unwrap(), 2395 SeccompAction::Allow, 2396 hypervisor::new().unwrap(), 2397 EventFd::new(EFD_NONBLOCK).unwrap(), 2398 false, 2399 None, 2400 ) 2401 .unwrap() 2402 } 2403 2404 fn create_dummy_vm_config() -> Arc<Mutex<VmConfig>> { 2405 Arc::new(Mutex::new(VmConfig { 2406 cpus: CpusConfig { 2407 boot_vcpus: 1, 2408 max_vcpus: 1, 2409 topology: None, 2410 kvm_hyperv: false, 2411 max_phys_bits: 46, 2412 affinity: None, 2413 features: config::CpuFeatures::default(), 2414 }, 2415 memory: MemoryConfig { 2416 size: 536_870_912, 2417 mergeable: false, 2418 hotplug_method: HotplugMethod::Acpi, 2419 hotplug_size: None, 2420 hotplugged_size: None, 2421 shared: true, 2422 hugepages: false, 2423 hugepage_size: None, 2424 prefault: false, 2425 zones: None, 2426 thp: true, 2427 }, 2428 payload: Some(PayloadConfig { 2429 kernel: Some(PathBuf::from("/path/to/kernel")), 2430 firmware: None, 2431 cmdline: None, 2432 initramfs: None, 2433 #[cfg(feature = "igvm")] 2434 igvm: None, 2435 #[cfg(feature = "sev_snp")] 2436 host_data: None, 2437 }), 2438 rate_limit_groups: None, 2439 disks: None, 2440 net: None, 2441 rng: RngConfig { 2442 src: PathBuf::from("/dev/urandom"), 2443 iommu: false, 2444 }, 2445 balloon: None, 2446 fs: None, 2447 pmem: None, 2448 serial: ConsoleConfig { 2449 file: None, 2450 mode: ConsoleOutputMode::Null, 2451 iommu: false, 2452 socket: None, 2453 }, 2454 console: ConsoleConfig { 2455 file: None, 2456 mode: ConsoleOutputMode::Tty, 2457 iommu: false, 2458 socket: None, 2459 }, 2460 #[cfg(target_arch = "x86_64")] 2461 debug_console: DebugConsoleConfig::default(), 2462 devices: None, 2463 user_devices: None, 2464 vdpa: None, 2465 vsock: None, 2466 pvpanic: false, 2467 iommu: false, 2468 #[cfg(target_arch = "x86_64")] 2469 sgx_epc: None, 2470 numa: None, 2471 watchdog: false, 2472 #[cfg(feature = "guest_debug")] 2473 gdb: false, 2474 pci_segments: None, 2475 platform: None, 2476 tpm: None, 2477 preserved_fds: None, 2478 })) 2479 } 2480 2481 #[test] 2482 fn test_vmm_vm_create() { 2483 let mut vmm = create_dummy_vmm(); 2484 let config = create_dummy_vm_config(); 2485 2486 assert!(matches!(vmm.vm_create(config.clone()), Ok(()))); 2487 assert!(matches!( 2488 vmm.vm_create(config), 2489 Err(VmError::VmAlreadyCreated) 2490 )); 2491 } 2492 2493 #[test] 2494 fn test_vmm_vm_cold_add_device() { 2495 let mut vmm = create_dummy_vmm(); 2496 let device_config = DeviceConfig::parse("path=/path/to/device").unwrap(); 2497 2498 assert!(matches!( 2499 vmm.vm_add_device(device_config.clone()), 2500 Err(VmError::VmNotCreated) 2501 )); 2502 2503 let _ = vmm.vm_create(create_dummy_vm_config()); 2504 assert!(vmm 2505 .vm_config 2506 .as_ref() 2507 .unwrap() 2508 .lock() 2509 .unwrap() 2510 .devices 2511 .is_none()); 2512 2513 let result = vmm.vm_add_device(device_config.clone()); 2514 assert!(result.is_ok()); 2515 assert!(result.unwrap().is_none()); 2516 assert_eq!( 2517 vmm.vm_config 2518 .as_ref() 2519 .unwrap() 2520 .lock() 2521 .unwrap() 2522 .devices 2523 .clone() 2524 .unwrap() 2525 .len(), 2526 1 2527 ); 2528 assert_eq!( 2529 vmm.vm_config 2530 .as_ref() 2531 .unwrap() 2532 .lock() 2533 .unwrap() 2534 .devices 2535 .clone() 2536 .unwrap()[0], 2537 device_config 2538 ); 2539 } 2540 2541 #[test] 2542 fn test_vmm_vm_cold_add_user_device() { 2543 let mut vmm = create_dummy_vmm(); 2544 let user_device_config = 2545 UserDeviceConfig::parse("socket=/path/to/socket,id=8,pci_segment=2").unwrap(); 2546 2547 assert!(matches!( 2548 vmm.vm_add_user_device(user_device_config.clone()), 2549 Err(VmError::VmNotCreated) 2550 )); 2551 2552 let _ = vmm.vm_create(create_dummy_vm_config()); 2553 assert!(vmm 2554 .vm_config 2555 .as_ref() 2556 .unwrap() 2557 .lock() 2558 .unwrap() 2559 .user_devices 2560 .is_none()); 2561 2562 let result = vmm.vm_add_user_device(user_device_config.clone()); 2563 assert!(result.is_ok()); 2564 assert!(result.unwrap().is_none()); 2565 assert_eq!( 2566 vmm.vm_config 2567 .as_ref() 2568 .unwrap() 2569 .lock() 2570 .unwrap() 2571 .user_devices 2572 .clone() 2573 .unwrap() 2574 .len(), 2575 1 2576 ); 2577 assert_eq!( 2578 vmm.vm_config 2579 .as_ref() 2580 .unwrap() 2581 .lock() 2582 .unwrap() 2583 .user_devices 2584 .clone() 2585 .unwrap()[0], 2586 user_device_config 2587 ); 2588 } 2589 2590 #[test] 2591 fn test_vmm_vm_cold_add_disk() { 2592 let mut vmm = create_dummy_vmm(); 2593 let disk_config = DiskConfig::parse("path=/path/to_file").unwrap(); 2594 2595 assert!(matches!( 2596 vmm.vm_add_disk(disk_config.clone()), 2597 Err(VmError::VmNotCreated) 2598 )); 2599 2600 let _ = vmm.vm_create(create_dummy_vm_config()); 2601 assert!(vmm 2602 .vm_config 2603 .as_ref() 2604 .unwrap() 2605 .lock() 2606 .unwrap() 2607 .disks 2608 .is_none()); 2609 2610 let result = vmm.vm_add_disk(disk_config.clone()); 2611 assert!(result.is_ok()); 2612 assert!(result.unwrap().is_none()); 2613 assert_eq!( 2614 vmm.vm_config 2615 .as_ref() 2616 .unwrap() 2617 .lock() 2618 .unwrap() 2619 .disks 2620 .clone() 2621 .unwrap() 2622 .len(), 2623 1 2624 ); 2625 assert_eq!( 2626 vmm.vm_config 2627 .as_ref() 2628 .unwrap() 2629 .lock() 2630 .unwrap() 2631 .disks 2632 .clone() 2633 .unwrap()[0], 2634 disk_config 2635 ); 2636 } 2637 2638 #[test] 2639 fn test_vmm_vm_cold_add_fs() { 2640 let mut vmm = create_dummy_vmm(); 2641 let fs_config = FsConfig::parse("tag=mytag,socket=/tmp/sock").unwrap(); 2642 2643 assert!(matches!( 2644 vmm.vm_add_fs(fs_config.clone()), 2645 Err(VmError::VmNotCreated) 2646 )); 2647 2648 let _ = vmm.vm_create(create_dummy_vm_config()); 2649 assert!(vmm.vm_config.as_ref().unwrap().lock().unwrap().fs.is_none()); 2650 2651 let result = vmm.vm_add_fs(fs_config.clone()); 2652 assert!(result.is_ok()); 2653 assert!(result.unwrap().is_none()); 2654 assert_eq!( 2655 vmm.vm_config 2656 .as_ref() 2657 .unwrap() 2658 .lock() 2659 .unwrap() 2660 .fs 2661 .clone() 2662 .unwrap() 2663 .len(), 2664 1 2665 ); 2666 assert_eq!( 2667 vmm.vm_config 2668 .as_ref() 2669 .unwrap() 2670 .lock() 2671 .unwrap() 2672 .fs 2673 .clone() 2674 .unwrap()[0], 2675 fs_config 2676 ); 2677 } 2678 2679 #[test] 2680 fn test_vmm_vm_cold_add_pmem() { 2681 let mut vmm = create_dummy_vmm(); 2682 let pmem_config = PmemConfig::parse("file=/tmp/pmem,size=128M").unwrap(); 2683 2684 assert!(matches!( 2685 vmm.vm_add_pmem(pmem_config.clone()), 2686 Err(VmError::VmNotCreated) 2687 )); 2688 2689 let _ = vmm.vm_create(create_dummy_vm_config()); 2690 assert!(vmm 2691 .vm_config 2692 .as_ref() 2693 .unwrap() 2694 .lock() 2695 .unwrap() 2696 .pmem 2697 .is_none()); 2698 2699 let result = vmm.vm_add_pmem(pmem_config.clone()); 2700 assert!(result.is_ok()); 2701 assert!(result.unwrap().is_none()); 2702 assert_eq!( 2703 vmm.vm_config 2704 .as_ref() 2705 .unwrap() 2706 .lock() 2707 .unwrap() 2708 .pmem 2709 .clone() 2710 .unwrap() 2711 .len(), 2712 1 2713 ); 2714 assert_eq!( 2715 vmm.vm_config 2716 .as_ref() 2717 .unwrap() 2718 .lock() 2719 .unwrap() 2720 .pmem 2721 .clone() 2722 .unwrap()[0], 2723 pmem_config 2724 ); 2725 } 2726 2727 #[test] 2728 fn test_vmm_vm_cold_add_net() { 2729 let mut vmm = create_dummy_vmm(); 2730 let net_config = NetConfig::parse( 2731 "mac=de:ad:be:ef:12:34,host_mac=12:34:de:ad:be:ef,vhost_user=true,socket=/tmp/sock", 2732 ) 2733 .unwrap(); 2734 2735 assert!(matches!( 2736 vmm.vm_add_net(net_config.clone()), 2737 Err(VmError::VmNotCreated) 2738 )); 2739 2740 let _ = vmm.vm_create(create_dummy_vm_config()); 2741 assert!(vmm 2742 .vm_config 2743 .as_ref() 2744 .unwrap() 2745 .lock() 2746 .unwrap() 2747 .net 2748 .is_none()); 2749 2750 let result = vmm.vm_add_net(net_config.clone()); 2751 assert!(result.is_ok()); 2752 assert!(result.unwrap().is_none()); 2753 assert_eq!( 2754 vmm.vm_config 2755 .as_ref() 2756 .unwrap() 2757 .lock() 2758 .unwrap() 2759 .net 2760 .clone() 2761 .unwrap() 2762 .len(), 2763 1 2764 ); 2765 assert_eq!( 2766 vmm.vm_config 2767 .as_ref() 2768 .unwrap() 2769 .lock() 2770 .unwrap() 2771 .net 2772 .clone() 2773 .unwrap()[0], 2774 net_config 2775 ); 2776 } 2777 2778 #[test] 2779 fn test_vmm_vm_cold_add_vdpa() { 2780 let mut vmm = create_dummy_vmm(); 2781 let vdpa_config = VdpaConfig::parse("path=/dev/vhost-vdpa,num_queues=2").unwrap(); 2782 2783 assert!(matches!( 2784 vmm.vm_add_vdpa(vdpa_config.clone()), 2785 Err(VmError::VmNotCreated) 2786 )); 2787 2788 let _ = vmm.vm_create(create_dummy_vm_config()); 2789 assert!(vmm 2790 .vm_config 2791 .as_ref() 2792 .unwrap() 2793 .lock() 2794 .unwrap() 2795 .vdpa 2796 .is_none()); 2797 2798 let result = vmm.vm_add_vdpa(vdpa_config.clone()); 2799 assert!(result.is_ok()); 2800 assert!(result.unwrap().is_none()); 2801 assert_eq!( 2802 vmm.vm_config 2803 .as_ref() 2804 .unwrap() 2805 .lock() 2806 .unwrap() 2807 .vdpa 2808 .clone() 2809 .unwrap() 2810 .len(), 2811 1 2812 ); 2813 assert_eq!( 2814 vmm.vm_config 2815 .as_ref() 2816 .unwrap() 2817 .lock() 2818 .unwrap() 2819 .vdpa 2820 .clone() 2821 .unwrap()[0], 2822 vdpa_config 2823 ); 2824 } 2825 2826 #[test] 2827 fn test_vmm_vm_cold_add_vsock() { 2828 let mut vmm = create_dummy_vmm(); 2829 let vsock_config = VsockConfig::parse("socket=/tmp/sock,cid=3,iommu=on").unwrap(); 2830 2831 assert!(matches!( 2832 vmm.vm_add_vsock(vsock_config.clone()), 2833 Err(VmError::VmNotCreated) 2834 )); 2835 2836 let _ = vmm.vm_create(create_dummy_vm_config()); 2837 assert!(vmm 2838 .vm_config 2839 .as_ref() 2840 .unwrap() 2841 .lock() 2842 .unwrap() 2843 .vsock 2844 .is_none()); 2845 2846 let result = vmm.vm_add_vsock(vsock_config.clone()); 2847 assert!(result.is_ok()); 2848 assert!(result.unwrap().is_none()); 2849 assert_eq!( 2850 vmm.vm_config 2851 .as_ref() 2852 .unwrap() 2853 .lock() 2854 .unwrap() 2855 .vsock 2856 .clone() 2857 .unwrap(), 2858 vsock_config 2859 ); 2860 } 2861 } 2862