1 // Copyright © 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 6 #![allow(clippy::significant_drop_in_scrutinee)] 7 8 #[macro_use] 9 extern crate event_monitor; 10 #[macro_use] 11 extern crate log; 12 13 use crate::api::{ 14 ApiError, ApiRequest, ApiResponse, ApiResponsePayload, VmInfo, VmReceiveMigrationData, 15 VmSendMigrationData, VmmPingResponse, 16 }; 17 use crate::config::{ 18 add_to_config, DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, RestoreConfig, 19 UserDeviceConfig, VdpaConfig, VmConfig, VsockConfig, 20 }; 21 #[cfg(feature = "guest_debug")] 22 use crate::coredump::GuestDebuggable; 23 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 24 use crate::migration::get_vm_snapshot; 25 use crate::migration::{recv_vm_config, recv_vm_state}; 26 use crate::seccomp_filters::{get_seccomp_filter, Thread}; 27 use crate::vm::{Error as VmError, Vm, VmState}; 28 use anyhow::anyhow; 29 use libc::{EFD_NONBLOCK, SIGINT, SIGTERM}; 30 use memory_manager::MemoryManagerSnapshotData; 31 use pci::PciBdf; 32 use seccompiler::{apply_filter, SeccompAction}; 33 use serde::ser::{SerializeStruct, Serializer}; 34 use serde::{Deserialize, Serialize}; 35 use signal_hook::iterator::{Handle, Signals}; 36 use std::collections::HashMap; 37 use std::fs::File; 38 use std::io; 39 use std::io::{Read, Write}; 40 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; 41 use std::os::unix::net::UnixListener; 42 use std::os::unix::net::UnixStream; 43 use std::panic::AssertUnwindSafe; 44 use std::path::PathBuf; 45 use std::sync::mpsc::{Receiver, RecvError, SendError, Sender}; 46 use std::sync::{Arc, Mutex}; 47 use std::{result, thread}; 48 use thiserror::Error; 49 use vm_memory::bitmap::AtomicBitmap; 50 use vm_migration::{protocol::*, Migratable}; 51 use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 52 use vmm_sys_util::eventfd::EventFd; 53 use vmm_sys_util::signal::unblock_signal; 54 use vmm_sys_util::sock_ctrl_msg::ScmSocket; 55 use vmm_sys_util::terminal::Terminal; 56 57 mod acpi; 58 pub mod api; 59 mod clone3; 60 pub mod config; 61 #[cfg(feature = "guest_debug")] 62 mod coredump; 63 pub mod cpu; 64 pub mod device_manager; 65 pub mod device_tree; 66 #[cfg(feature = "gdb")] 67 mod gdb; 68 pub mod interrupt; 69 pub mod memory_manager; 70 pub mod migration; 71 mod pci_segment; 72 pub mod seccomp_filters; 73 mod serial_buffer; 74 mod serial_manager; 75 mod sigwinch_listener; 76 pub mod vm; 77 78 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; 79 type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>; 80 81 /// Errors associated with VMM management 82 #[derive(Debug, Error)] 83 #[allow(clippy::large_enum_variant)] 84 pub enum Error { 85 /// API request receive error 86 #[error("Error receiving API request: {0}")] 87 ApiRequestRecv(#[source] RecvError), 88 89 /// API response send error 90 #[error("Error sending API request: {0}")] 91 ApiResponseSend(#[source] SendError<ApiResponse>), 92 93 /// Cannot bind to the UNIX domain socket path 94 #[error("Error binding to UNIX domain socket: {0}")] 95 Bind(#[source] io::Error), 96 97 /// Cannot clone EventFd. 98 #[error("Error cloning EventFd: {0}")] 99 EventFdClone(#[source] io::Error), 100 101 /// Cannot create EventFd. 102 #[error("Error creating EventFd: {0}")] 103 EventFdCreate(#[source] io::Error), 104 105 /// Cannot read from EventFd. 106 #[error("Error reading from EventFd: {0}")] 107 EventFdRead(#[source] io::Error), 108 109 /// Cannot create epoll context. 110 #[error("Error creating epoll context: {0}")] 111 Epoll(#[source] io::Error), 112 113 /// Cannot create HTTP thread 114 #[error("Error spawning HTTP thread: {0}")] 115 HttpThreadSpawn(#[source] io::Error), 116 117 /// Cannot handle the VM STDIN stream 118 #[error("Error handling VM stdin: {0:?}")] 119 Stdin(VmError), 120 121 /// Cannot handle the VM pty stream 122 #[error("Error handling VM pty: {0:?}")] 123 Pty(VmError), 124 125 /// Cannot reboot the VM 126 #[error("Error rebooting VM: {0:?}")] 127 VmReboot(VmError), 128 129 /// Cannot create VMM thread 130 #[error("Error spawning VMM thread {0:?}")] 131 VmmThreadSpawn(#[source] io::Error), 132 133 /// Cannot shut the VMM down 134 #[error("Error shutting down VMM: {0:?}")] 135 VmmShutdown(VmError), 136 137 /// Cannot create seccomp filter 138 #[error("Error creating seccomp filter: {0}")] 139 CreateSeccompFilter(seccompiler::Error), 140 141 /// Cannot apply seccomp filter 142 #[error("Error applying seccomp filter: {0}")] 143 ApplySeccompFilter(seccompiler::Error), 144 145 /// Error activating virtio devices 146 #[error("Error activating virtio devices: {0:?}")] 147 ActivateVirtioDevices(VmError), 148 149 /// Error creating API server 150 #[error("Error creating API server {0:?}")] 151 CreateApiServer(micro_http::ServerError), 152 153 /// Error binding API server socket 154 #[error("Error creation API server's socket {0:?}")] 155 CreateApiServerSocket(#[source] io::Error), 156 157 #[cfg(feature = "gdb")] 158 #[error("Failed to start the GDB thread: {0}")] 159 GdbThreadSpawn(io::Error), 160 161 /// GDB request receive error 162 #[cfg(feature = "gdb")] 163 #[error("Error receiving GDB request: {0}")] 164 GdbRequestRecv(#[source] RecvError), 165 166 /// GDB response send error 167 #[cfg(feature = "gdb")] 168 #[error("Error sending GDB request: {0}")] 169 GdbResponseSend(#[source] SendError<gdb::GdbResponse>), 170 171 #[error("Cannot spawn a signal handler thread: {0}")] 172 SignalHandlerSpawn(#[source] io::Error), 173 174 #[error("Failed to join on threads: {0:?}")] 175 ThreadCleanup(std::boxed::Box<dyn std::any::Any + std::marker::Send>), 176 } 177 pub type Result<T> = result::Result<T, Error>; 178 179 #[derive(Debug, Clone, Copy, PartialEq, Eq)] 180 #[repr(u64)] 181 pub enum EpollDispatch { 182 Exit = 0, 183 Reset = 1, 184 Api = 2, 185 ActivateVirtioDevices = 3, 186 Debug = 4, 187 Unknown, 188 } 189 190 impl From<u64> for EpollDispatch { 191 fn from(v: u64) -> Self { 192 use EpollDispatch::*; 193 match v { 194 0 => Exit, 195 1 => Reset, 196 2 => Api, 197 3 => ActivateVirtioDevices, 198 4 => Debug, 199 _ => Unknown, 200 } 201 } 202 } 203 204 pub struct EpollContext { 205 epoll_file: File, 206 } 207 208 impl EpollContext { 209 pub fn new() -> result::Result<EpollContext, io::Error> { 210 let epoll_fd = epoll::create(true)?; 211 // Use 'File' to enforce closing on 'epoll_fd' 212 // SAFETY: the epoll_fd returned by epoll::create is valid and owned by us. 213 let epoll_file = unsafe { File::from_raw_fd(epoll_fd) }; 214 215 Ok(EpollContext { epoll_file }) 216 } 217 218 fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error> 219 where 220 T: AsRawFd, 221 { 222 let dispatch_index = token as u64; 223 epoll::ctl( 224 self.epoll_file.as_raw_fd(), 225 epoll::ControlOptions::EPOLL_CTL_ADD, 226 fd.as_raw_fd(), 227 epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index), 228 )?; 229 230 Ok(()) 231 } 232 } 233 234 impl AsRawFd for EpollContext { 235 fn as_raw_fd(&self) -> RawFd { 236 self.epoll_file.as_raw_fd() 237 } 238 } 239 240 pub struct PciDeviceInfo { 241 pub id: String, 242 pub bdf: PciBdf, 243 } 244 245 impl Serialize for PciDeviceInfo { 246 fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> 247 where 248 S: Serializer, 249 { 250 let bdf_str = self.bdf.to_string(); 251 252 // Serialize the structure. 253 let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?; 254 state.serialize_field("id", &self.id)?; 255 state.serialize_field("bdf", &bdf_str)?; 256 state.end() 257 } 258 } 259 260 #[allow(unused_variables)] 261 #[allow(clippy::too_many_arguments)] 262 pub fn start_vmm_thread( 263 vmm_version: String, 264 http_path: &Option<String>, 265 http_fd: Option<RawFd>, 266 api_event: EventFd, 267 api_sender: Sender<ApiRequest>, 268 api_receiver: Receiver<ApiRequest>, 269 #[cfg(feature = "gdb")] debug_path: Option<PathBuf>, 270 #[cfg(feature = "gdb")] debug_event: EventFd, 271 #[cfg(feature = "gdb")] vm_debug_event: EventFd, 272 seccomp_action: &SeccompAction, 273 hypervisor: Arc<dyn hypervisor::Hypervisor>, 274 ) -> Result<thread::JoinHandle<Result<()>>> { 275 #[cfg(feature = "gdb")] 276 let (gdb_sender, gdb_receiver) = std::sync::mpsc::channel(); 277 #[cfg(feature = "gdb")] 278 let gdb_debug_event = debug_event.try_clone().map_err(Error::EventFdClone)?; 279 #[cfg(feature = "gdb")] 280 let gdb_vm_debug_event = vm_debug_event.try_clone().map_err(Error::EventFdClone)?; 281 282 let http_api_event = api_event.try_clone().map_err(Error::EventFdClone)?; 283 let hypervisor_type = hypervisor.hypervisor_type(); 284 285 // Retrieve seccomp filter 286 let vmm_seccomp_filter = get_seccomp_filter(seccomp_action, Thread::Vmm, hypervisor_type) 287 .map_err(Error::CreateSeccompFilter)?; 288 289 let vmm_seccomp_action = seccomp_action.clone(); 290 let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 291 let thread = { 292 let exit_evt = exit_evt.try_clone().map_err(Error::EventFdClone)?; 293 thread::Builder::new() 294 .name("vmm".to_string()) 295 .spawn(move || { 296 // Apply seccomp filter for VMM thread. 297 if !vmm_seccomp_filter.is_empty() { 298 apply_filter(&vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?; 299 } 300 301 let mut vmm = Vmm::new( 302 vmm_version.to_string(), 303 api_event, 304 #[cfg(feature = "gdb")] 305 debug_event, 306 #[cfg(feature = "gdb")] 307 vm_debug_event, 308 vmm_seccomp_action, 309 hypervisor, 310 exit_evt, 311 )?; 312 313 vmm.setup_signal_handler()?; 314 315 vmm.control_loop( 316 Arc::new(api_receiver), 317 #[cfg(feature = "gdb")] 318 Arc::new(gdb_receiver), 319 ) 320 }) 321 .map_err(Error::VmmThreadSpawn)? 322 }; 323 324 // The VMM thread is started, we can start serving HTTP requests 325 if let Some(http_path) = http_path { 326 api::start_http_path_thread( 327 http_path, 328 http_api_event, 329 api_sender, 330 seccomp_action, 331 exit_evt, 332 hypervisor_type, 333 )?; 334 } else if let Some(http_fd) = http_fd { 335 api::start_http_fd_thread( 336 http_fd, 337 http_api_event, 338 api_sender, 339 seccomp_action, 340 exit_evt, 341 hypervisor_type, 342 )?; 343 } 344 345 #[cfg(feature = "gdb")] 346 if let Some(debug_path) = debug_path { 347 let target = gdb::GdbStub::new(gdb_sender, gdb_debug_event, gdb_vm_debug_event); 348 thread::Builder::new() 349 .name("gdb".to_owned()) 350 .spawn(move || gdb::gdb_thread(target, &debug_path)) 351 .map_err(Error::GdbThreadSpawn)?; 352 } 353 354 Ok(thread) 355 } 356 357 #[derive(Clone, Deserialize, Serialize)] 358 struct VmMigrationConfig { 359 vm_config: Arc<Mutex<VmConfig>>, 360 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 361 common_cpuid: Vec<hypervisor::arch::x86::CpuIdEntry>, 362 memory_manager_data: MemoryManagerSnapshotData, 363 } 364 365 pub struct Vmm { 366 epoll: EpollContext, 367 exit_evt: EventFd, 368 reset_evt: EventFd, 369 api_evt: EventFd, 370 #[cfg(feature = "gdb")] 371 debug_evt: EventFd, 372 #[cfg(feature = "gdb")] 373 vm_debug_evt: EventFd, 374 version: String, 375 vm: Option<Vm>, 376 vm_config: Option<Arc<Mutex<VmConfig>>>, 377 seccomp_action: SeccompAction, 378 hypervisor: Arc<dyn hypervisor::Hypervisor>, 379 activate_evt: EventFd, 380 signals: Option<Handle>, 381 threads: Vec<thread::JoinHandle<()>>, 382 } 383 384 impl Vmm { 385 pub const HANDLED_SIGNALS: [i32; 2] = [SIGTERM, SIGINT]; 386 387 fn signal_handler(mut signals: Signals, on_tty: bool, exit_evt: &EventFd) { 388 for sig in &Self::HANDLED_SIGNALS { 389 unblock_signal(*sig).unwrap(); 390 } 391 392 for signal in signals.forever() { 393 match signal { 394 SIGTERM | SIGINT => { 395 if exit_evt.write(1).is_err() { 396 // Resetting the terminal is usually done as the VMM exits 397 if on_tty { 398 io::stdin() 399 .lock() 400 .set_canon_mode() 401 .expect("failed to restore terminal mode"); 402 } 403 std::process::exit(1); 404 } 405 } 406 _ => (), 407 } 408 } 409 } 410 411 fn setup_signal_handler(&mut self) -> Result<()> { 412 let signals = Signals::new(&Self::HANDLED_SIGNALS); 413 match signals { 414 Ok(signals) => { 415 self.signals = Some(signals.handle()); 416 let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?; 417 let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0; 418 419 let signal_handler_seccomp_filter = get_seccomp_filter( 420 &self.seccomp_action, 421 Thread::SignalHandler, 422 self.hypervisor.hypervisor_type(), 423 ) 424 .map_err(Error::CreateSeccompFilter)?; 425 self.threads.push( 426 thread::Builder::new() 427 .name("vmm_signal_handler".to_string()) 428 .spawn(move || { 429 if !signal_handler_seccomp_filter.is_empty() { 430 if let Err(e) = apply_filter(&signal_handler_seccomp_filter) 431 .map_err(Error::ApplySeccompFilter) 432 { 433 error!("Error applying seccomp filter: {:?}", e); 434 exit_evt.write(1).ok(); 435 return; 436 } 437 } 438 std::panic::catch_unwind(AssertUnwindSafe(|| { 439 Vmm::signal_handler(signals, on_tty, &exit_evt); 440 })) 441 .map_err(|_| { 442 error!("signal_handler thead panicked"); 443 exit_evt.write(1).ok() 444 }) 445 .ok(); 446 }) 447 .map_err(Error::SignalHandlerSpawn)?, 448 ); 449 } 450 Err(e) => error!("Signal not found {}", e), 451 } 452 Ok(()) 453 } 454 455 fn new( 456 vmm_version: String, 457 api_evt: EventFd, 458 #[cfg(feature = "gdb")] debug_evt: EventFd, 459 #[cfg(feature = "gdb")] vm_debug_evt: EventFd, 460 seccomp_action: SeccompAction, 461 hypervisor: Arc<dyn hypervisor::Hypervisor>, 462 exit_evt: EventFd, 463 ) -> Result<Self> { 464 let mut epoll = EpollContext::new().map_err(Error::Epoll)?; 465 let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 466 let activate_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 467 468 epoll 469 .add_event(&exit_evt, EpollDispatch::Exit) 470 .map_err(Error::Epoll)?; 471 472 epoll 473 .add_event(&reset_evt, EpollDispatch::Reset) 474 .map_err(Error::Epoll)?; 475 476 epoll 477 .add_event(&activate_evt, EpollDispatch::ActivateVirtioDevices) 478 .map_err(Error::Epoll)?; 479 480 epoll 481 .add_event(&api_evt, EpollDispatch::Api) 482 .map_err(Error::Epoll)?; 483 484 #[cfg(feature = "gdb")] 485 epoll 486 .add_event(&debug_evt, EpollDispatch::Debug) 487 .map_err(Error::Epoll)?; 488 489 Ok(Vmm { 490 epoll, 491 exit_evt, 492 reset_evt, 493 api_evt, 494 #[cfg(feature = "gdb")] 495 debug_evt, 496 #[cfg(feature = "gdb")] 497 vm_debug_evt, 498 version: vmm_version, 499 vm: None, 500 vm_config: None, 501 seccomp_action, 502 hypervisor, 503 activate_evt, 504 signals: None, 505 threads: vec![], 506 }) 507 } 508 509 fn vm_create(&mut self, config: Arc<Mutex<VmConfig>>) -> result::Result<(), VmError> { 510 // We only store the passed VM config. 511 // The VM will be created when being asked to boot it. 512 if self.vm_config.is_none() { 513 self.vm_config = Some(config); 514 Ok(()) 515 } else { 516 Err(VmError::VmAlreadyCreated) 517 } 518 } 519 520 fn vm_boot(&mut self) -> result::Result<(), VmError> { 521 // If we don't have a config, we can not boot a VM. 522 if self.vm_config.is_none() { 523 return Err(VmError::VmMissingConfig); 524 }; 525 526 // Create a new VM if we don't have one yet. 527 if self.vm.is_none() { 528 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 529 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 530 #[cfg(feature = "gdb")] 531 let vm_debug_evt = self 532 .vm_debug_evt 533 .try_clone() 534 .map_err(VmError::EventFdClone)?; 535 let activate_evt = self 536 .activate_evt 537 .try_clone() 538 .map_err(VmError::EventFdClone)?; 539 540 if let Some(ref vm_config) = self.vm_config { 541 let vm = Vm::new( 542 Arc::clone(vm_config), 543 exit_evt, 544 reset_evt, 545 #[cfg(feature = "gdb")] 546 vm_debug_evt, 547 &self.seccomp_action, 548 self.hypervisor.clone(), 549 activate_evt, 550 None, 551 None, 552 None, 553 )?; 554 555 self.vm = Some(vm); 556 } 557 } 558 559 // Now we can boot the VM. 560 if let Some(ref mut vm) = self.vm { 561 vm.boot() 562 } else { 563 Err(VmError::VmNotCreated) 564 } 565 } 566 567 fn vm_pause(&mut self) -> result::Result<(), VmError> { 568 if let Some(ref mut vm) = self.vm { 569 vm.pause().map_err(VmError::Pause) 570 } else { 571 Err(VmError::VmNotRunning) 572 } 573 } 574 575 fn vm_resume(&mut self) -> result::Result<(), VmError> { 576 if let Some(ref mut vm) = self.vm { 577 vm.resume().map_err(VmError::Resume) 578 } else { 579 Err(VmError::VmNotRunning) 580 } 581 } 582 583 fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> { 584 if let Some(ref mut vm) = self.vm { 585 vm.snapshot() 586 .map_err(VmError::Snapshot) 587 .and_then(|snapshot| { 588 vm.send(&snapshot, destination_url) 589 .map_err(VmError::SnapshotSend) 590 }) 591 } else { 592 Err(VmError::VmNotRunning) 593 } 594 } 595 596 fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> { 597 if self.vm.is_some() || self.vm_config.is_some() { 598 return Err(VmError::VmAlreadyCreated); 599 } 600 601 let source_url = restore_cfg.source_url.as_path().to_str(); 602 if source_url.is_none() { 603 return Err(VmError::InvalidRestoreSourceUrl); 604 } 605 // Safe to unwrap as we checked it was Some(&str). 606 let source_url = source_url.unwrap(); 607 608 let vm_config = Arc::new(Mutex::new( 609 recv_vm_config(source_url).map_err(VmError::Restore)?, 610 )); 611 let snapshot = recv_vm_state(source_url).map_err(VmError::Restore)?; 612 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 613 let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?; 614 615 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 616 self.vm_check_cpuid_compatibility(&vm_config, &vm_snapshot.common_cpuid) 617 .map_err(VmError::Restore)?; 618 619 self.vm_config = Some(Arc::clone(&vm_config)); 620 621 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 622 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 623 #[cfg(feature = "gdb")] 624 let debug_evt = self 625 .vm_debug_evt 626 .try_clone() 627 .map_err(VmError::EventFdClone)?; 628 let activate_evt = self 629 .activate_evt 630 .try_clone() 631 .map_err(VmError::EventFdClone)?; 632 633 let vm = Vm::new_from_snapshot( 634 &snapshot, 635 vm_config, 636 exit_evt, 637 reset_evt, 638 #[cfg(feature = "gdb")] 639 debug_evt, 640 Some(source_url), 641 restore_cfg.prefault, 642 &self.seccomp_action, 643 self.hypervisor.clone(), 644 activate_evt, 645 )?; 646 self.vm = Some(vm); 647 648 // Now we can restore the rest of the VM. 649 if let Some(ref mut vm) = self.vm { 650 vm.restore(snapshot).map_err(VmError::Restore) 651 } else { 652 Err(VmError::VmNotCreated) 653 } 654 } 655 656 #[cfg(feature = "guest_debug")] 657 fn vm_coredump(&mut self, destination_url: &str) -> result::Result<(), VmError> { 658 if let Some(ref mut vm) = self.vm { 659 vm.coredump(destination_url).map_err(VmError::Coredump) 660 } else { 661 Err(VmError::VmNotRunning) 662 } 663 } 664 665 fn vm_shutdown(&mut self) -> result::Result<(), VmError> { 666 if let Some(ref mut vm) = self.vm.take() { 667 vm.shutdown() 668 } else { 669 Err(VmError::VmNotRunning) 670 } 671 } 672 673 fn vm_reboot(&mut self) -> result::Result<(), VmError> { 674 // First we stop the current VM 675 let (config, serial_pty, console_pty, console_resize_pipe) = 676 if let Some(mut vm) = self.vm.take() { 677 let config = vm.get_config(); 678 let serial_pty = vm.serial_pty(); 679 let console_pty = vm.console_pty(); 680 let console_resize_pipe = vm 681 .console_resize_pipe() 682 .as_ref() 683 .map(|pipe| pipe.try_clone().unwrap()); 684 vm.shutdown()?; 685 (config, serial_pty, console_pty, console_resize_pipe) 686 } else { 687 return Err(VmError::VmNotCreated); 688 }; 689 690 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 691 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 692 #[cfg(feature = "gdb")] 693 let debug_evt = self 694 .vm_debug_evt 695 .try_clone() 696 .map_err(VmError::EventFdClone)?; 697 let activate_evt = self 698 .activate_evt 699 .try_clone() 700 .map_err(VmError::EventFdClone)?; 701 702 // The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be 703 // an event sitting in the shared reset_evt. Without doing this we get very early reboots 704 // during the boot process. 705 if self.reset_evt.read().is_ok() { 706 warn!("Spurious second reset event received. Ignoring."); 707 } 708 709 // Then we create the new VM 710 let mut vm = Vm::new( 711 config, 712 exit_evt, 713 reset_evt, 714 #[cfg(feature = "gdb")] 715 debug_evt, 716 &self.seccomp_action, 717 self.hypervisor.clone(), 718 activate_evt, 719 serial_pty, 720 console_pty, 721 console_resize_pipe, 722 )?; 723 724 // And we boot it 725 vm.boot()?; 726 727 self.vm = Some(vm); 728 729 Ok(()) 730 } 731 732 fn vm_info(&self) -> result::Result<VmInfo, VmError> { 733 match &self.vm_config { 734 Some(config) => { 735 let state = match &self.vm { 736 Some(vm) => vm.get_state()?, 737 None => VmState::Created, 738 }; 739 740 let config = Arc::clone(config); 741 742 let mut memory_actual_size = config.lock().unwrap().memory.total_size(); 743 if let Some(vm) = &self.vm { 744 memory_actual_size -= vm.balloon_size(); 745 } 746 747 let device_tree = self.vm.as_ref().map(|vm| vm.device_tree()); 748 749 Ok(VmInfo { 750 config, 751 state, 752 memory_actual_size, 753 device_tree, 754 }) 755 } 756 None => Err(VmError::VmNotCreated), 757 } 758 } 759 760 fn vmm_ping(&self) -> VmmPingResponse { 761 VmmPingResponse { 762 version: self.version.clone(), 763 } 764 } 765 766 fn vm_delete(&mut self) -> result::Result<(), VmError> { 767 if self.vm_config.is_none() { 768 return Ok(()); 769 } 770 771 // If a VM is booted, we first try to shut it down. 772 if self.vm.is_some() { 773 self.vm_shutdown()?; 774 } 775 776 self.vm_config = None; 777 778 event!("vm", "deleted"); 779 780 Ok(()) 781 } 782 783 fn vmm_shutdown(&mut self) -> result::Result<(), VmError> { 784 self.vm_delete()?; 785 event!("vmm", "shutdown"); 786 Ok(()) 787 } 788 789 fn vm_resize( 790 &mut self, 791 desired_vcpus: Option<u8>, 792 desired_ram: Option<u64>, 793 desired_balloon: Option<u64>, 794 ) -> result::Result<(), VmError> { 795 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 796 797 if let Some(ref mut vm) = self.vm { 798 if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) { 799 error!("Error when resizing VM: {:?}", e); 800 Err(e) 801 } else { 802 Ok(()) 803 } 804 } else { 805 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 806 if let Some(desired_vcpus) = desired_vcpus { 807 config.cpus.boot_vcpus = desired_vcpus; 808 } 809 if let Some(desired_ram) = desired_ram { 810 config.memory.size = desired_ram; 811 } 812 if let Some(desired_balloon) = desired_balloon { 813 if let Some(balloon_config) = &mut config.balloon { 814 balloon_config.size = desired_balloon; 815 } 816 } 817 Ok(()) 818 } 819 } 820 821 fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> { 822 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 823 824 if let Some(ref mut vm) = self.vm { 825 if let Err(e) = vm.resize_zone(id, desired_ram) { 826 error!("Error when resizing VM: {:?}", e); 827 Err(e) 828 } else { 829 Ok(()) 830 } 831 } else { 832 // Update VmConfig by setting the new desired ram. 833 let memory_config = &mut self.vm_config.as_ref().unwrap().lock().unwrap().memory; 834 835 if let Some(zones) = &mut memory_config.zones { 836 for zone in zones.iter_mut() { 837 if zone.id == id { 838 zone.size = desired_ram; 839 return Ok(()); 840 } 841 } 842 } 843 844 error!("Could not find the memory zone {} for the resize", id); 845 Err(VmError::ResizeZone) 846 } 847 } 848 849 fn vm_add_device( 850 &mut self, 851 device_cfg: DeviceConfig, 852 ) -> result::Result<Option<Vec<u8>>, VmError> { 853 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 854 855 { 856 // Validate the configuration change in a cloned configuration 857 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 858 add_to_config(&mut config.devices, device_cfg.clone()); 859 config.validate().map_err(VmError::ConfigValidation)?; 860 } 861 862 if let Some(ref mut vm) = self.vm { 863 let info = vm.add_device(device_cfg).map_err(|e| { 864 error!("Error when adding new device to the VM: {:?}", e); 865 e 866 })?; 867 serde_json::to_vec(&info) 868 .map(Some) 869 .map_err(VmError::SerializeJson) 870 } else { 871 // Update VmConfig by adding the new device. 872 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 873 add_to_config(&mut config.devices, device_cfg); 874 Ok(None) 875 } 876 } 877 878 fn vm_add_user_device( 879 &mut self, 880 device_cfg: UserDeviceConfig, 881 ) -> result::Result<Option<Vec<u8>>, VmError> { 882 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 883 884 { 885 // Validate the configuration change in a cloned configuration 886 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 887 add_to_config(&mut config.user_devices, device_cfg.clone()); 888 config.validate().map_err(VmError::ConfigValidation)?; 889 } 890 891 if let Some(ref mut vm) = self.vm { 892 let info = vm.add_user_device(device_cfg).map_err(|e| { 893 error!("Error when adding new user device to the VM: {:?}", e); 894 e 895 })?; 896 serde_json::to_vec(&info) 897 .map(Some) 898 .map_err(VmError::SerializeJson) 899 } else { 900 // Update VmConfig by adding the new device. 901 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 902 add_to_config(&mut config.user_devices, device_cfg); 903 Ok(None) 904 } 905 } 906 907 fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> { 908 if let Some(ref mut vm) = self.vm { 909 if let Err(e) = vm.remove_device(id) { 910 error!("Error when removing new device to the VM: {:?}", e); 911 Err(e) 912 } else { 913 Ok(()) 914 } 915 } else { 916 Err(VmError::VmNotRunning) 917 } 918 } 919 920 fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Option<Vec<u8>>, VmError> { 921 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 922 923 { 924 // Validate the configuration change in a cloned configuration 925 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 926 add_to_config(&mut config.disks, disk_cfg.clone()); 927 config.validate().map_err(VmError::ConfigValidation)?; 928 } 929 930 if let Some(ref mut vm) = self.vm { 931 let info = vm.add_disk(disk_cfg).map_err(|e| { 932 error!("Error when adding new disk to the VM: {:?}", e); 933 e 934 })?; 935 serde_json::to_vec(&info) 936 .map(Some) 937 .map_err(VmError::SerializeJson) 938 } else { 939 // Update VmConfig by adding the new device. 940 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 941 add_to_config(&mut config.disks, disk_cfg); 942 Ok(None) 943 } 944 } 945 946 fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Option<Vec<u8>>, VmError> { 947 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 948 949 { 950 // Validate the configuration change in a cloned configuration 951 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 952 add_to_config(&mut config.fs, fs_cfg.clone()); 953 config.validate().map_err(VmError::ConfigValidation)?; 954 } 955 956 if let Some(ref mut vm) = self.vm { 957 let info = vm.add_fs(fs_cfg).map_err(|e| { 958 error!("Error when adding new fs to the VM: {:?}", e); 959 e 960 })?; 961 serde_json::to_vec(&info) 962 .map(Some) 963 .map_err(VmError::SerializeJson) 964 } else { 965 // Update VmConfig by adding the new device. 966 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 967 add_to_config(&mut config.fs, fs_cfg); 968 Ok(None) 969 } 970 } 971 972 fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Option<Vec<u8>>, VmError> { 973 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 974 975 { 976 // Validate the configuration change in a cloned configuration 977 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 978 add_to_config(&mut config.pmem, pmem_cfg.clone()); 979 config.validate().map_err(VmError::ConfigValidation)?; 980 } 981 982 if let Some(ref mut vm) = self.vm { 983 let info = vm.add_pmem(pmem_cfg).map_err(|e| { 984 error!("Error when adding new pmem device to the VM: {:?}", e); 985 e 986 })?; 987 serde_json::to_vec(&info) 988 .map(Some) 989 .map_err(VmError::SerializeJson) 990 } else { 991 // Update VmConfig by adding the new device. 992 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 993 add_to_config(&mut config.pmem, pmem_cfg); 994 Ok(None) 995 } 996 } 997 998 fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Option<Vec<u8>>, VmError> { 999 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1000 1001 { 1002 // Validate the configuration change in a cloned configuration 1003 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1004 add_to_config(&mut config.net, net_cfg.clone()); 1005 config.validate().map_err(VmError::ConfigValidation)?; 1006 } 1007 1008 if let Some(ref mut vm) = self.vm { 1009 let info = vm.add_net(net_cfg).map_err(|e| { 1010 error!("Error when adding new network device to the VM: {:?}", e); 1011 e 1012 })?; 1013 serde_json::to_vec(&info) 1014 .map(Some) 1015 .map_err(VmError::SerializeJson) 1016 } else { 1017 // Update VmConfig by adding the new device. 1018 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1019 add_to_config(&mut config.net, net_cfg); 1020 Ok(None) 1021 } 1022 } 1023 1024 fn vm_add_vdpa(&mut self, vdpa_cfg: VdpaConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1025 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1026 1027 { 1028 // Validate the configuration change in a cloned configuration 1029 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1030 add_to_config(&mut config.vdpa, vdpa_cfg.clone()); 1031 config.validate().map_err(VmError::ConfigValidation)?; 1032 } 1033 1034 if let Some(ref mut vm) = self.vm { 1035 let info = vm.add_vdpa(vdpa_cfg).map_err(|e| { 1036 error!("Error when adding new vDPA device to the VM: {:?}", e); 1037 e 1038 })?; 1039 serde_json::to_vec(&info) 1040 .map(Some) 1041 .map_err(VmError::SerializeJson) 1042 } else { 1043 // Update VmConfig by adding the new device. 1044 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1045 add_to_config(&mut config.vdpa, vdpa_cfg); 1046 Ok(None) 1047 } 1048 } 1049 1050 fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Option<Vec<u8>>, VmError> { 1051 self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?; 1052 1053 { 1054 // Validate the configuration change in a cloned configuration 1055 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone(); 1056 1057 if config.vsock.is_some() { 1058 return Err(VmError::TooManyVsockDevices); 1059 } 1060 1061 config.vsock = Some(vsock_cfg.clone()); 1062 config.validate().map_err(VmError::ConfigValidation)?; 1063 } 1064 1065 if let Some(ref mut vm) = self.vm { 1066 let info = vm.add_vsock(vsock_cfg).map_err(|e| { 1067 error!("Error when adding new vsock device to the VM: {:?}", e); 1068 e 1069 })?; 1070 serde_json::to_vec(&info) 1071 .map(Some) 1072 .map_err(VmError::SerializeJson) 1073 } else { 1074 // Update VmConfig by adding the new device. 1075 let mut config = self.vm_config.as_ref().unwrap().lock().unwrap(); 1076 config.vsock = Some(vsock_cfg); 1077 Ok(None) 1078 } 1079 } 1080 1081 fn vm_counters(&mut self) -> result::Result<Option<Vec<u8>>, VmError> { 1082 if let Some(ref mut vm) = self.vm { 1083 let info = vm.counters().map_err(|e| { 1084 error!("Error when getting counters from the VM: {:?}", e); 1085 e 1086 })?; 1087 serde_json::to_vec(&info) 1088 .map(Some) 1089 .map_err(VmError::SerializeJson) 1090 } else { 1091 Err(VmError::VmNotRunning) 1092 } 1093 } 1094 1095 fn vm_power_button(&mut self) -> result::Result<(), VmError> { 1096 if let Some(ref mut vm) = self.vm { 1097 vm.power_button() 1098 } else { 1099 Err(VmError::VmNotRunning) 1100 } 1101 } 1102 1103 fn vm_receive_config<T>( 1104 &mut self, 1105 req: &Request, 1106 socket: &mut T, 1107 existing_memory_files: Option<HashMap<u32, File>>, 1108 ) -> std::result::Result<Vm, MigratableError> 1109 where 1110 T: Read + Write, 1111 { 1112 // Read in config data along with memory manager data 1113 let mut data: Vec<u8> = Vec::new(); 1114 data.resize_with(req.length() as usize, Default::default); 1115 socket 1116 .read_exact(&mut data) 1117 .map_err(MigratableError::MigrateSocket)?; 1118 1119 let vm_migration_config: VmMigrationConfig = 1120 serde_json::from_slice(&data).map_err(|e| { 1121 MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e)) 1122 })?; 1123 1124 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1125 self.vm_check_cpuid_compatibility( 1126 &vm_migration_config.vm_config, 1127 &vm_migration_config.common_cpuid, 1128 )?; 1129 1130 let exit_evt = self.exit_evt.try_clone().map_err(|e| { 1131 MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e)) 1132 })?; 1133 let reset_evt = self.reset_evt.try_clone().map_err(|e| { 1134 MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e)) 1135 })?; 1136 #[cfg(feature = "gdb")] 1137 let debug_evt = self.vm_debug_evt.try_clone().map_err(|e| { 1138 MigratableError::MigrateReceive(anyhow!("Error cloning debug EventFd: {}", e)) 1139 })?; 1140 let activate_evt = self.activate_evt.try_clone().map_err(|e| { 1141 MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e)) 1142 })?; 1143 1144 self.vm_config = Some(vm_migration_config.vm_config); 1145 let vm = Vm::new_from_migration( 1146 self.vm_config.clone().unwrap(), 1147 exit_evt, 1148 reset_evt, 1149 #[cfg(feature = "gdb")] 1150 debug_evt, 1151 &self.seccomp_action, 1152 self.hypervisor.clone(), 1153 activate_evt, 1154 &vm_migration_config.memory_manager_data, 1155 existing_memory_files, 1156 ) 1157 .map_err(|e| { 1158 MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e)) 1159 })?; 1160 1161 Response::ok().write_to(socket)?; 1162 1163 Ok(vm) 1164 } 1165 1166 fn vm_receive_state<T>( 1167 &mut self, 1168 req: &Request, 1169 socket: &mut T, 1170 mut vm: Vm, 1171 ) -> std::result::Result<(), MigratableError> 1172 where 1173 T: Read + Write, 1174 { 1175 // Read in state data 1176 let mut data: Vec<u8> = Vec::new(); 1177 data.resize_with(req.length() as usize, Default::default); 1178 socket 1179 .read_exact(&mut data) 1180 .map_err(MigratableError::MigrateSocket)?; 1181 let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| { 1182 MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e)) 1183 })?; 1184 1185 // Create VM 1186 vm.restore(snapshot).map_err(|e| { 1187 Response::error().write_to(socket).ok(); 1188 e 1189 })?; 1190 self.vm = Some(vm); 1191 1192 Response::ok().write_to(socket)?; 1193 1194 Ok(()) 1195 } 1196 1197 fn vm_receive_memory<T>( 1198 &mut self, 1199 req: &Request, 1200 socket: &mut T, 1201 vm: &mut Vm, 1202 ) -> std::result::Result<(), MigratableError> 1203 where 1204 T: Read + Write, 1205 { 1206 // Read table 1207 let table = MemoryRangeTable::read_from(socket, req.length())?; 1208 1209 // And then read the memory itself 1210 vm.receive_memory_regions(&table, socket).map_err(|e| { 1211 Response::error().write_to(socket).ok(); 1212 e 1213 })?; 1214 Response::ok().write_to(socket)?; 1215 Ok(()) 1216 } 1217 1218 fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> { 1219 url.strip_prefix("unix:") 1220 .ok_or_else(|| { 1221 MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url)) 1222 }) 1223 .map(|s| s.into()) 1224 } 1225 1226 fn vm_receive_migration( 1227 &mut self, 1228 receive_data_migration: VmReceiveMigrationData, 1229 ) -> result::Result<(), MigratableError> { 1230 info!( 1231 "Receiving migration: receiver_url = {}", 1232 receive_data_migration.receiver_url 1233 ); 1234 1235 let path = Self::socket_url_to_path(&receive_data_migration.receiver_url)?; 1236 let listener = UnixListener::bind(&path).map_err(|e| { 1237 MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e)) 1238 })?; 1239 let (mut socket, _addr) = listener.accept().map_err(|e| { 1240 MigratableError::MigrateReceive(anyhow!("Error accepting on UNIX socket: {}", e)) 1241 })?; 1242 std::fs::remove_file(&path).map_err(|e| { 1243 MigratableError::MigrateReceive(anyhow!("Error unlinking UNIX socket: {}", e)) 1244 })?; 1245 1246 let mut started = false; 1247 let mut vm: Option<Vm> = None; 1248 let mut existing_memory_files = None; 1249 loop { 1250 let req = Request::read_from(&mut socket)?; 1251 match req.command() { 1252 Command::Invalid => info!("Invalid Command Received"), 1253 Command::Start => { 1254 info!("Start Command Received"); 1255 started = true; 1256 1257 Response::ok().write_to(&mut socket)?; 1258 } 1259 Command::Config => { 1260 info!("Config Command Received"); 1261 1262 if !started { 1263 warn!("Migration not started yet"); 1264 Response::error().write_to(&mut socket)?; 1265 continue; 1266 } 1267 vm = Some(self.vm_receive_config( 1268 &req, 1269 &mut socket, 1270 existing_memory_files.take(), 1271 )?); 1272 } 1273 Command::State => { 1274 info!("State Command Received"); 1275 1276 if !started { 1277 warn!("Migration not started yet"); 1278 Response::error().write_to(&mut socket)?; 1279 continue; 1280 } 1281 if let Some(vm) = vm.take() { 1282 self.vm_receive_state(&req, &mut socket, vm)?; 1283 } else { 1284 warn!("Configuration not sent yet"); 1285 Response::error().write_to(&mut socket)?; 1286 } 1287 } 1288 Command::Memory => { 1289 info!("Memory Command Received"); 1290 1291 if !started { 1292 warn!("Migration not started yet"); 1293 Response::error().write_to(&mut socket)?; 1294 continue; 1295 } 1296 if let Some(ref mut vm) = vm.as_mut() { 1297 self.vm_receive_memory(&req, &mut socket, vm)?; 1298 } else { 1299 warn!("Configuration not sent yet"); 1300 Response::error().write_to(&mut socket)?; 1301 } 1302 } 1303 Command::MemoryFd => { 1304 info!("MemoryFd Command Received"); 1305 1306 if !started { 1307 warn!("Migration not started yet"); 1308 Response::error().write_to(&mut socket)?; 1309 continue; 1310 } 1311 1312 let mut buf = [0u8; 4]; 1313 let (_, file) = socket.recv_with_fd(&mut buf).map_err(|e| { 1314 MigratableError::MigrateReceive(anyhow!( 1315 "Error receiving slot from socket: {}", 1316 e 1317 )) 1318 })?; 1319 1320 if existing_memory_files.is_none() { 1321 existing_memory_files = Some(HashMap::default()) 1322 } 1323 1324 if let Some(ref mut existing_memory_files) = existing_memory_files { 1325 let slot = u32::from_le_bytes(buf); 1326 existing_memory_files.insert(slot, file.unwrap()); 1327 } 1328 1329 Response::ok().write_to(&mut socket)?; 1330 } 1331 Command::Complete => { 1332 info!("Complete Command Received"); 1333 if let Some(ref mut vm) = self.vm.as_mut() { 1334 vm.resume()?; 1335 Response::ok().write_to(&mut socket)?; 1336 } else { 1337 warn!("VM not created yet"); 1338 Response::error().write_to(&mut socket)?; 1339 } 1340 break; 1341 } 1342 Command::Abandon => { 1343 info!("Abandon Command Received"); 1344 self.vm = None; 1345 self.vm_config = None; 1346 Response::ok().write_to(&mut socket).ok(); 1347 break; 1348 } 1349 } 1350 } 1351 1352 Ok(()) 1353 } 1354 1355 // Returns true if there were dirty pages to send 1356 fn vm_maybe_send_dirty_pages<T>( 1357 vm: &mut Vm, 1358 socket: &mut T, 1359 ) -> result::Result<bool, MigratableError> 1360 where 1361 T: Read + Write, 1362 { 1363 // Send (dirty) memory table 1364 let table = vm.dirty_log()?; 1365 1366 // But if there are no regions go straight to pause 1367 if table.regions().is_empty() { 1368 return Ok(false); 1369 } 1370 1371 Request::memory(table.length()).write_to(socket).unwrap(); 1372 table.write_to(socket)?; 1373 // And then the memory itself 1374 vm.send_memory_regions(&table, socket)?; 1375 let res = Response::read_from(socket)?; 1376 if res.status() != Status::Ok { 1377 warn!("Error during dirty memory migration"); 1378 Request::abandon().write_to(socket)?; 1379 Response::read_from(socket).ok(); 1380 return Err(MigratableError::MigrateSend(anyhow!( 1381 "Error during dirty memory migration" 1382 ))); 1383 } 1384 1385 Ok(true) 1386 } 1387 1388 fn send_migration( 1389 vm: &mut Vm, 1390 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc< 1391 dyn hypervisor::Hypervisor, 1392 >, 1393 send_data_migration: VmSendMigrationData, 1394 ) -> result::Result<(), MigratableError> { 1395 let path = Self::socket_url_to_path(&send_data_migration.destination_url)?; 1396 let mut socket = UnixStream::connect(&path).map_err(|e| { 1397 MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e)) 1398 })?; 1399 1400 // Start the migration 1401 Request::start().write_to(&mut socket)?; 1402 let res = Response::read_from(&mut socket)?; 1403 if res.status() != Status::Ok { 1404 warn!("Error starting migration"); 1405 Request::abandon().write_to(&mut socket)?; 1406 Response::read_from(&mut socket).ok(); 1407 return Err(MigratableError::MigrateSend(anyhow!( 1408 "Error starting migration" 1409 ))); 1410 } 1411 1412 // Send config 1413 let vm_config = vm.get_config(); 1414 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1415 let common_cpuid = { 1416 #[cfg(feature = "tdx")] 1417 let tdx_enabled = vm_config.lock().unwrap().tdx.is_some(); 1418 let phys_bits = vm::physical_bits(vm_config.lock().unwrap().cpus.max_phys_bits); 1419 arch::generate_common_cpuid( 1420 hypervisor, 1421 None, 1422 None, 1423 phys_bits, 1424 vm_config.lock().unwrap().cpus.kvm_hyperv, 1425 #[cfg(feature = "tdx")] 1426 tdx_enabled, 1427 ) 1428 .map_err(|e| { 1429 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid': {:?}", e)) 1430 })? 1431 }; 1432 1433 if send_data_migration.local { 1434 vm.send_memory_fds(&mut socket)?; 1435 } 1436 1437 let vm_migration_config = VmMigrationConfig { 1438 vm_config, 1439 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1440 common_cpuid, 1441 memory_manager_data: vm.memory_manager_data(), 1442 }; 1443 let config_data = serde_json::to_vec(&vm_migration_config).unwrap(); 1444 Request::config(config_data.len() as u64).write_to(&mut socket)?; 1445 socket 1446 .write_all(&config_data) 1447 .map_err(MigratableError::MigrateSocket)?; 1448 let res = Response::read_from(&mut socket)?; 1449 if res.status() != Status::Ok { 1450 warn!("Error during config migration"); 1451 Request::abandon().write_to(&mut socket)?; 1452 Response::read_from(&mut socket).ok(); 1453 return Err(MigratableError::MigrateSend(anyhow!( 1454 "Error during config migration" 1455 ))); 1456 } 1457 1458 // Let every Migratable object know about the migration being started. 1459 vm.start_migration()?; 1460 1461 if send_data_migration.local { 1462 // Now pause VM 1463 vm.pause()?; 1464 } else { 1465 // Start logging dirty pages 1466 vm.start_dirty_log()?; 1467 1468 // Send memory table 1469 let table = vm.memory_range_table()?; 1470 Request::memory(table.length()) 1471 .write_to(&mut socket) 1472 .unwrap(); 1473 table.write_to(&mut socket)?; 1474 // And then the memory itself 1475 vm.send_memory_regions(&table, &mut socket)?; 1476 let res = Response::read_from(&mut socket)?; 1477 if res.status() != Status::Ok { 1478 warn!("Error during memory migration"); 1479 Request::abandon().write_to(&mut socket)?; 1480 Response::read_from(&mut socket).ok(); 1481 return Err(MigratableError::MigrateSend(anyhow!( 1482 "Error during memory migration" 1483 ))); 1484 } 1485 1486 // Try at most 5 passes of dirty memory sending 1487 const MAX_DIRTY_MIGRATIONS: usize = 5; 1488 for i in 0..MAX_DIRTY_MIGRATIONS { 1489 info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS); 1490 if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? { 1491 break; 1492 } 1493 } 1494 1495 // Now pause VM 1496 vm.pause()?; 1497 1498 // Send last batch of dirty pages 1499 Self::vm_maybe_send_dirty_pages(vm, &mut socket)?; 1500 1501 // Stop logging dirty pages 1502 vm.stop_dirty_log()?; 1503 } 1504 // Capture snapshot and send it 1505 let vm_snapshot = vm.snapshot()?; 1506 let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap(); 1507 Request::state(snapshot_data.len() as u64).write_to(&mut socket)?; 1508 socket 1509 .write_all(&snapshot_data) 1510 .map_err(MigratableError::MigrateSocket)?; 1511 let res = Response::read_from(&mut socket)?; 1512 if res.status() != Status::Ok { 1513 warn!("Error during state migration"); 1514 Request::abandon().write_to(&mut socket)?; 1515 Response::read_from(&mut socket).ok(); 1516 return Err(MigratableError::MigrateSend(anyhow!( 1517 "Error during state migration" 1518 ))); 1519 } 1520 1521 // Complete the migration 1522 Request::complete().write_to(&mut socket)?; 1523 let res = Response::read_from(&mut socket)?; 1524 if res.status() != Status::Ok { 1525 warn!("Error completing migration"); 1526 Request::abandon().write_to(&mut socket)?; 1527 Response::read_from(&mut socket).ok(); 1528 return Err(MigratableError::MigrateSend(anyhow!( 1529 "Error completing migration" 1530 ))); 1531 } 1532 info!("Migration complete"); 1533 1534 // Let every Migratable object know about the migration being complete 1535 vm.complete_migration() 1536 } 1537 1538 fn vm_send_migration( 1539 &mut self, 1540 send_data_migration: VmSendMigrationData, 1541 ) -> result::Result<(), MigratableError> { 1542 info!( 1543 "Sending migration: destination_url = {}, local = {}", 1544 send_data_migration.destination_url, send_data_migration.local 1545 ); 1546 1547 if !self 1548 .vm_config 1549 .as_ref() 1550 .unwrap() 1551 .lock() 1552 .unwrap() 1553 .memory 1554 .shared 1555 && send_data_migration.local 1556 { 1557 return Err(MigratableError::MigrateSend(anyhow!( 1558 "Local migration requires shared memory enabled" 1559 ))); 1560 } 1561 1562 if let Some(vm) = self.vm.as_mut() { 1563 Self::send_migration( 1564 vm, 1565 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1566 self.hypervisor.clone(), 1567 send_data_migration, 1568 ) 1569 .map_err(|migration_err| { 1570 error!("Migration failed: {:?}", migration_err); 1571 1572 // Stop logging dirty pages 1573 if let Err(e) = vm.stop_dirty_log() { 1574 return e; 1575 } 1576 1577 if vm.get_state().unwrap() == VmState::Paused { 1578 if let Err(e) = vm.resume() { 1579 return e; 1580 } 1581 } 1582 1583 migration_err 1584 })?; 1585 1586 // Shutdown the VM after the migration succeeded 1587 self.exit_evt.write(1).map_err(|e| { 1588 MigratableError::MigrateSend(anyhow!( 1589 "Failed shutting down the VM after migration: {:?}", 1590 e 1591 )) 1592 }) 1593 } else { 1594 Err(MigratableError::MigrateSend(anyhow!("VM is not running"))) 1595 } 1596 } 1597 1598 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1599 fn vm_check_cpuid_compatibility( 1600 &self, 1601 src_vm_config: &Arc<Mutex<VmConfig>>, 1602 src_vm_cpuid: &[hypervisor::arch::x86::CpuIdEntry], 1603 ) -> result::Result<(), MigratableError> { 1604 // We check the `CPUID` compatibility of between the source vm and destination, which is 1605 // mostly about feature compatibility and "topology/sgx" leaves are not relevant. 1606 let dest_cpuid = &{ 1607 let vm_config = &src_vm_config.lock().unwrap(); 1608 1609 #[cfg(feature = "tdx")] 1610 let tdx_enabled = vm_config.tdx.is_some(); 1611 let phys_bits = vm::physical_bits(vm_config.cpus.max_phys_bits); 1612 arch::generate_common_cpuid( 1613 self.hypervisor.clone(), 1614 None, 1615 None, 1616 phys_bits, 1617 vm_config.cpus.kvm_hyperv, 1618 #[cfg(feature = "tdx")] 1619 tdx_enabled, 1620 ) 1621 .map_err(|e| { 1622 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e)) 1623 })? 1624 }; 1625 arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| { 1626 MigratableError::MigrateReceive(anyhow!( 1627 "Error checking cpu feature compatibility': {:?}", 1628 e 1629 )) 1630 }) 1631 } 1632 1633 fn control_loop( 1634 &mut self, 1635 api_receiver: Arc<Receiver<ApiRequest>>, 1636 #[cfg(feature = "gdb")] gdb_receiver: Arc<Receiver<gdb::GdbRequest>>, 1637 ) -> Result<()> { 1638 const EPOLL_EVENTS_LEN: usize = 100; 1639 1640 let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN]; 1641 let epoll_fd = self.epoll.as_raw_fd(); 1642 1643 'outer: loop { 1644 let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { 1645 Ok(res) => res, 1646 Err(e) => { 1647 if e.kind() == io::ErrorKind::Interrupted { 1648 // It's well defined from the epoll_wait() syscall 1649 // documentation that the epoll loop can be interrupted 1650 // before any of the requested events occurred or the 1651 // timeout expired. In both those cases, epoll_wait() 1652 // returns an error of type EINTR, but this should not 1653 // be considered as a regular error. Instead it is more 1654 // appropriate to retry, by calling into epoll_wait(). 1655 continue; 1656 } 1657 return Err(Error::Epoll(e)); 1658 } 1659 }; 1660 1661 for event in events.iter().take(num_events) { 1662 let dispatch_event: EpollDispatch = event.data.into(); 1663 match dispatch_event { 1664 EpollDispatch::Unknown => { 1665 let event = event.data; 1666 warn!("Unknown VMM loop event: {}", event); 1667 } 1668 EpollDispatch::Exit => { 1669 info!("VM exit event"); 1670 // Consume the event. 1671 self.exit_evt.read().map_err(Error::EventFdRead)?; 1672 self.vmm_shutdown().map_err(Error::VmmShutdown)?; 1673 1674 break 'outer; 1675 } 1676 EpollDispatch::Reset => { 1677 info!("VM reset event"); 1678 // Consume the event. 1679 self.reset_evt.read().map_err(Error::EventFdRead)?; 1680 self.vm_reboot().map_err(Error::VmReboot)?; 1681 } 1682 EpollDispatch::ActivateVirtioDevices => { 1683 if let Some(ref vm) = self.vm { 1684 let count = self.activate_evt.read().map_err(Error::EventFdRead)?; 1685 info!( 1686 "Trying to activate pending virtio devices: count = {}", 1687 count 1688 ); 1689 vm.activate_virtio_devices() 1690 .map_err(Error::ActivateVirtioDevices)?; 1691 } 1692 } 1693 EpollDispatch::Api => { 1694 // Consume the events. 1695 for _ in 0..self.api_evt.read().map_err(Error::EventFdRead)? { 1696 // Read from the API receiver channel 1697 let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?; 1698 1699 info!("API request event: {:?}", api_request); 1700 match api_request { 1701 ApiRequest::VmCreate(config, sender) => { 1702 let response = self 1703 .vm_create(config) 1704 .map_err(ApiError::VmCreate) 1705 .map(|_| ApiResponsePayload::Empty); 1706 1707 sender.send(response).map_err(Error::ApiResponseSend)?; 1708 } 1709 ApiRequest::VmDelete(sender) => { 1710 let response = self 1711 .vm_delete() 1712 .map_err(ApiError::VmDelete) 1713 .map(|_| ApiResponsePayload::Empty); 1714 1715 sender.send(response).map_err(Error::ApiResponseSend)?; 1716 } 1717 ApiRequest::VmBoot(sender) => { 1718 let response = self 1719 .vm_boot() 1720 .map_err(ApiError::VmBoot) 1721 .map(|_| ApiResponsePayload::Empty); 1722 1723 sender.send(response).map_err(Error::ApiResponseSend)?; 1724 } 1725 ApiRequest::VmShutdown(sender) => { 1726 let response = self 1727 .vm_shutdown() 1728 .map_err(ApiError::VmShutdown) 1729 .map(|_| ApiResponsePayload::Empty); 1730 1731 sender.send(response).map_err(Error::ApiResponseSend)?; 1732 } 1733 ApiRequest::VmReboot(sender) => { 1734 let response = self 1735 .vm_reboot() 1736 .map_err(ApiError::VmReboot) 1737 .map(|_| ApiResponsePayload::Empty); 1738 1739 sender.send(response).map_err(Error::ApiResponseSend)?; 1740 } 1741 ApiRequest::VmInfo(sender) => { 1742 let response = self 1743 .vm_info() 1744 .map_err(ApiError::VmInfo) 1745 .map(ApiResponsePayload::VmInfo); 1746 1747 sender.send(response).map_err(Error::ApiResponseSend)?; 1748 } 1749 ApiRequest::VmmPing(sender) => { 1750 let response = ApiResponsePayload::VmmPing(self.vmm_ping()); 1751 1752 sender.send(Ok(response)).map_err(Error::ApiResponseSend)?; 1753 } 1754 ApiRequest::VmPause(sender) => { 1755 let response = self 1756 .vm_pause() 1757 .map_err(ApiError::VmPause) 1758 .map(|_| ApiResponsePayload::Empty); 1759 1760 sender.send(response).map_err(Error::ApiResponseSend)?; 1761 } 1762 ApiRequest::VmResume(sender) => { 1763 let response = self 1764 .vm_resume() 1765 .map_err(ApiError::VmResume) 1766 .map(|_| ApiResponsePayload::Empty); 1767 1768 sender.send(response).map_err(Error::ApiResponseSend)?; 1769 } 1770 ApiRequest::VmSnapshot(snapshot_data, sender) => { 1771 let response = self 1772 .vm_snapshot(&snapshot_data.destination_url) 1773 .map_err(ApiError::VmSnapshot) 1774 .map(|_| ApiResponsePayload::Empty); 1775 1776 sender.send(response).map_err(Error::ApiResponseSend)?; 1777 } 1778 ApiRequest::VmRestore(restore_data, sender) => { 1779 let response = self 1780 .vm_restore(restore_data.as_ref().clone()) 1781 .map_err(ApiError::VmRestore) 1782 .map(|_| ApiResponsePayload::Empty); 1783 1784 sender.send(response).map_err(Error::ApiResponseSend)?; 1785 } 1786 #[cfg(feature = "guest_debug")] 1787 ApiRequest::VmCoredump(coredump_data, sender) => { 1788 let response = self 1789 .vm_coredump(&coredump_data.destination_url) 1790 .map_err(ApiError::VmCoredump) 1791 .map(|_| ApiResponsePayload::Empty); 1792 1793 sender.send(response).map_err(Error::ApiResponseSend)?; 1794 } 1795 ApiRequest::VmmShutdown(sender) => { 1796 let response = self 1797 .vmm_shutdown() 1798 .map_err(ApiError::VmmShutdown) 1799 .map(|_| ApiResponsePayload::Empty); 1800 1801 sender.send(response).map_err(Error::ApiResponseSend)?; 1802 1803 break 'outer; 1804 } 1805 ApiRequest::VmResize(resize_data, sender) => { 1806 let response = self 1807 .vm_resize( 1808 resize_data.desired_vcpus, 1809 resize_data.desired_ram, 1810 resize_data.desired_balloon, 1811 ) 1812 .map_err(ApiError::VmResize) 1813 .map(|_| ApiResponsePayload::Empty); 1814 sender.send(response).map_err(Error::ApiResponseSend)?; 1815 } 1816 ApiRequest::VmResizeZone(resize_zone_data, sender) => { 1817 let response = self 1818 .vm_resize_zone( 1819 resize_zone_data.id.clone(), 1820 resize_zone_data.desired_ram, 1821 ) 1822 .map_err(ApiError::VmResizeZone) 1823 .map(|_| ApiResponsePayload::Empty); 1824 sender.send(response).map_err(Error::ApiResponseSend)?; 1825 } 1826 ApiRequest::VmAddDevice(add_device_data, sender) => { 1827 let response = self 1828 .vm_add_device(add_device_data.as_ref().clone()) 1829 .map_err(ApiError::VmAddDevice) 1830 .map(ApiResponsePayload::VmAction); 1831 sender.send(response).map_err(Error::ApiResponseSend)?; 1832 } 1833 ApiRequest::VmAddUserDevice(add_device_data, sender) => { 1834 let response = self 1835 .vm_add_user_device(add_device_data.as_ref().clone()) 1836 .map_err(ApiError::VmAddUserDevice) 1837 .map(ApiResponsePayload::VmAction); 1838 sender.send(response).map_err(Error::ApiResponseSend)?; 1839 } 1840 ApiRequest::VmRemoveDevice(remove_device_data, sender) => { 1841 let response = self 1842 .vm_remove_device(remove_device_data.id.clone()) 1843 .map_err(ApiError::VmRemoveDevice) 1844 .map(|_| ApiResponsePayload::Empty); 1845 sender.send(response).map_err(Error::ApiResponseSend)?; 1846 } 1847 ApiRequest::VmAddDisk(add_disk_data, sender) => { 1848 let response = self 1849 .vm_add_disk(add_disk_data.as_ref().clone()) 1850 .map_err(ApiError::VmAddDisk) 1851 .map(ApiResponsePayload::VmAction); 1852 sender.send(response).map_err(Error::ApiResponseSend)?; 1853 } 1854 ApiRequest::VmAddFs(add_fs_data, sender) => { 1855 let response = self 1856 .vm_add_fs(add_fs_data.as_ref().clone()) 1857 .map_err(ApiError::VmAddFs) 1858 .map(ApiResponsePayload::VmAction); 1859 sender.send(response).map_err(Error::ApiResponseSend)?; 1860 } 1861 ApiRequest::VmAddPmem(add_pmem_data, sender) => { 1862 let response = self 1863 .vm_add_pmem(add_pmem_data.as_ref().clone()) 1864 .map_err(ApiError::VmAddPmem) 1865 .map(ApiResponsePayload::VmAction); 1866 sender.send(response).map_err(Error::ApiResponseSend)?; 1867 } 1868 ApiRequest::VmAddNet(add_net_data, sender) => { 1869 let response = self 1870 .vm_add_net(add_net_data.as_ref().clone()) 1871 .map_err(ApiError::VmAddNet) 1872 .map(ApiResponsePayload::VmAction); 1873 sender.send(response).map_err(Error::ApiResponseSend)?; 1874 } 1875 ApiRequest::VmAddVdpa(add_vdpa_data, sender) => { 1876 let response = self 1877 .vm_add_vdpa(add_vdpa_data.as_ref().clone()) 1878 .map_err(ApiError::VmAddVdpa) 1879 .map(ApiResponsePayload::VmAction); 1880 sender.send(response).map_err(Error::ApiResponseSend)?; 1881 } 1882 ApiRequest::VmAddVsock(add_vsock_data, sender) => { 1883 let response = self 1884 .vm_add_vsock(add_vsock_data.as_ref().clone()) 1885 .map_err(ApiError::VmAddVsock) 1886 .map(ApiResponsePayload::VmAction); 1887 sender.send(response).map_err(Error::ApiResponseSend)?; 1888 } 1889 ApiRequest::VmCounters(sender) => { 1890 let response = self 1891 .vm_counters() 1892 .map_err(ApiError::VmInfo) 1893 .map(ApiResponsePayload::VmAction); 1894 sender.send(response).map_err(Error::ApiResponseSend)?; 1895 } 1896 ApiRequest::VmReceiveMigration(receive_migration_data, sender) => { 1897 let response = self 1898 .vm_receive_migration( 1899 receive_migration_data.as_ref().clone(), 1900 ) 1901 .map_err(ApiError::VmReceiveMigration) 1902 .map(|_| ApiResponsePayload::Empty); 1903 sender.send(response).map_err(Error::ApiResponseSend)?; 1904 } 1905 ApiRequest::VmSendMigration(send_migration_data, sender) => { 1906 let response = self 1907 .vm_send_migration(send_migration_data.as_ref().clone()) 1908 .map_err(ApiError::VmSendMigration) 1909 .map(|_| ApiResponsePayload::Empty); 1910 sender.send(response).map_err(Error::ApiResponseSend)?; 1911 } 1912 ApiRequest::VmPowerButton(sender) => { 1913 let response = self 1914 .vm_power_button() 1915 .map_err(ApiError::VmPowerButton) 1916 .map(|_| ApiResponsePayload::Empty); 1917 1918 sender.send(response).map_err(Error::ApiResponseSend)?; 1919 } 1920 } 1921 } 1922 } 1923 #[cfg(feature = "gdb")] 1924 EpollDispatch::Debug => { 1925 // Consume the events. 1926 for _ in 0..self.debug_evt.read().map_err(Error::EventFdRead)? { 1927 // Read from the API receiver channel 1928 let gdb_request = gdb_receiver.recv().map_err(Error::GdbRequestRecv)?; 1929 1930 let response = if let Some(ref mut vm) = self.vm { 1931 vm.debug_request(&gdb_request.payload, gdb_request.cpu_id) 1932 } else { 1933 Err(VmError::VmNotRunning) 1934 } 1935 .map_err(gdb::Error::Vm); 1936 1937 gdb_request 1938 .sender 1939 .send(response) 1940 .map_err(Error::GdbResponseSend)?; 1941 } 1942 } 1943 #[cfg(not(feature = "gdb"))] 1944 EpollDispatch::Debug => {} 1945 } 1946 } 1947 } 1948 1949 // Trigger the termination of the signal_handler thread 1950 if let Some(signals) = self.signals.take() { 1951 signals.close(); 1952 } 1953 1954 // Wait for all the threads to finish 1955 for thread in self.threads.drain(..) { 1956 thread.join().map_err(Error::ThreadCleanup)? 1957 } 1958 1959 Ok(()) 1960 } 1961 } 1962 1963 const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager"; 1964 const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager"; 1965 const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager"; 1966 1967 #[cfg(test)] 1968 mod unit_tests { 1969 use super::*; 1970 use config::{ 1971 CmdlineConfig, ConsoleConfig, ConsoleOutputMode, CpusConfig, HotplugMethod, KernelConfig, 1972 MemoryConfig, RngConfig, VmConfig, 1973 }; 1974 1975 fn create_dummy_vmm() -> Vmm { 1976 Vmm::new( 1977 "dummy".to_string(), 1978 EventFd::new(EFD_NONBLOCK).unwrap(), 1979 #[cfg(feature = "gdb")] 1980 EventFd::new(EFD_NONBLOCK).unwrap(), 1981 #[cfg(feature = "gdb")] 1982 EventFd::new(EFD_NONBLOCK).unwrap(), 1983 SeccompAction::Allow, 1984 hypervisor::new().unwrap(), 1985 EventFd::new(EFD_NONBLOCK).unwrap(), 1986 ) 1987 .unwrap() 1988 } 1989 1990 fn create_dummy_vm_config() -> Arc<Mutex<VmConfig>> { 1991 Arc::new(Mutex::new(VmConfig { 1992 cpus: CpusConfig { 1993 boot_vcpus: 1, 1994 max_vcpus: 1, 1995 topology: None, 1996 kvm_hyperv: false, 1997 max_phys_bits: 46, 1998 affinity: None, 1999 features: config::CpuFeatures::default(), 2000 }, 2001 memory: MemoryConfig { 2002 size: 536_870_912, 2003 mergeable: false, 2004 hotplug_method: HotplugMethod::Acpi, 2005 hotplug_size: None, 2006 hotplugged_size: None, 2007 shared: true, 2008 hugepages: false, 2009 hugepage_size: None, 2010 prefault: false, 2011 zones: None, 2012 }, 2013 kernel: Some(KernelConfig { 2014 path: PathBuf::from("/path/to/kernel"), 2015 }), 2016 initramfs: None, 2017 cmdline: CmdlineConfig { 2018 args: String::from(""), 2019 }, 2020 disks: None, 2021 net: None, 2022 rng: RngConfig { 2023 src: PathBuf::from("/dev/urandom"), 2024 iommu: false, 2025 }, 2026 balloon: None, 2027 fs: None, 2028 pmem: None, 2029 serial: ConsoleConfig { 2030 file: None, 2031 mode: ConsoleOutputMode::Null, 2032 iommu: false, 2033 }, 2034 console: ConsoleConfig { 2035 file: None, 2036 mode: ConsoleOutputMode::Tty, 2037 iommu: false, 2038 }, 2039 devices: None, 2040 user_devices: None, 2041 vdpa: None, 2042 vsock: None, 2043 iommu: false, 2044 #[cfg(target_arch = "x86_64")] 2045 sgx_epc: None, 2046 numa: None, 2047 watchdog: false, 2048 #[cfg(feature = "tdx")] 2049 tdx: None, 2050 #[cfg(feature = "gdb")] 2051 gdb: false, 2052 platform: None, 2053 })) 2054 } 2055 2056 #[test] 2057 fn test_vmm_vm_create() { 2058 let mut vmm = create_dummy_vmm(); 2059 let config = create_dummy_vm_config(); 2060 2061 assert!(matches!(vmm.vm_create(config.clone()), Ok(()))); 2062 assert!(matches!( 2063 vmm.vm_create(config), 2064 Err(VmError::VmAlreadyCreated) 2065 )); 2066 } 2067 2068 #[test] 2069 fn test_vmm_vm_cold_add_device() { 2070 let mut vmm = create_dummy_vmm(); 2071 let device_config = DeviceConfig::parse("path=/path/to/device").unwrap(); 2072 2073 assert!(matches!( 2074 vmm.vm_add_device(device_config.clone()), 2075 Err(VmError::VmNotCreated) 2076 )); 2077 2078 let _ = vmm.vm_create(create_dummy_vm_config()); 2079 assert!(vmm 2080 .vm_config 2081 .as_ref() 2082 .unwrap() 2083 .lock() 2084 .unwrap() 2085 .devices 2086 .is_none()); 2087 2088 let result = vmm.vm_add_device(device_config.clone()); 2089 assert!(result.is_ok()); 2090 assert!(result.unwrap().is_none()); 2091 assert_eq!( 2092 vmm.vm_config 2093 .as_ref() 2094 .unwrap() 2095 .lock() 2096 .unwrap() 2097 .devices 2098 .clone() 2099 .unwrap() 2100 .len(), 2101 1 2102 ); 2103 assert_eq!( 2104 vmm.vm_config 2105 .as_ref() 2106 .unwrap() 2107 .lock() 2108 .unwrap() 2109 .devices 2110 .clone() 2111 .unwrap()[0], 2112 device_config 2113 ); 2114 } 2115 2116 #[test] 2117 fn test_vmm_vm_cold_add_user_device() { 2118 let mut vmm = create_dummy_vmm(); 2119 let user_device_config = 2120 UserDeviceConfig::parse("socket=/path/to/socket,id=8,pci_segment=2").unwrap(); 2121 2122 assert!(matches!( 2123 vmm.vm_add_user_device(user_device_config.clone()), 2124 Err(VmError::VmNotCreated) 2125 )); 2126 2127 let _ = vmm.vm_create(create_dummy_vm_config()); 2128 assert!(vmm 2129 .vm_config 2130 .as_ref() 2131 .unwrap() 2132 .lock() 2133 .unwrap() 2134 .user_devices 2135 .is_none()); 2136 2137 let result = vmm.vm_add_user_device(user_device_config.clone()); 2138 assert!(result.is_ok()); 2139 assert!(result.unwrap().is_none()); 2140 assert_eq!( 2141 vmm.vm_config 2142 .as_ref() 2143 .unwrap() 2144 .lock() 2145 .unwrap() 2146 .user_devices 2147 .clone() 2148 .unwrap() 2149 .len(), 2150 1 2151 ); 2152 assert_eq!( 2153 vmm.vm_config 2154 .as_ref() 2155 .unwrap() 2156 .lock() 2157 .unwrap() 2158 .user_devices 2159 .clone() 2160 .unwrap()[0], 2161 user_device_config 2162 ); 2163 } 2164 2165 #[test] 2166 fn test_vmm_vm_cold_add_disk() { 2167 let mut vmm = create_dummy_vmm(); 2168 let disk_config = DiskConfig::parse("path=/path/to_file").unwrap(); 2169 2170 assert!(matches!( 2171 vmm.vm_add_disk(disk_config.clone()), 2172 Err(VmError::VmNotCreated) 2173 )); 2174 2175 let _ = vmm.vm_create(create_dummy_vm_config()); 2176 assert!(vmm 2177 .vm_config 2178 .as_ref() 2179 .unwrap() 2180 .lock() 2181 .unwrap() 2182 .disks 2183 .is_none()); 2184 2185 let result = vmm.vm_add_disk(disk_config.clone()); 2186 assert!(result.is_ok()); 2187 assert!(result.unwrap().is_none()); 2188 assert_eq!( 2189 vmm.vm_config 2190 .as_ref() 2191 .unwrap() 2192 .lock() 2193 .unwrap() 2194 .disks 2195 .clone() 2196 .unwrap() 2197 .len(), 2198 1 2199 ); 2200 assert_eq!( 2201 vmm.vm_config 2202 .as_ref() 2203 .unwrap() 2204 .lock() 2205 .unwrap() 2206 .disks 2207 .clone() 2208 .unwrap()[0], 2209 disk_config 2210 ); 2211 } 2212 2213 #[test] 2214 fn test_vmm_vm_cold_add_fs() { 2215 let mut vmm = create_dummy_vmm(); 2216 let fs_config = FsConfig::parse("tag=mytag,socket=/tmp/sock").unwrap(); 2217 2218 assert!(matches!( 2219 vmm.vm_add_fs(fs_config.clone()), 2220 Err(VmError::VmNotCreated) 2221 )); 2222 2223 let _ = vmm.vm_create(create_dummy_vm_config()); 2224 assert!(vmm.vm_config.as_ref().unwrap().lock().unwrap().fs.is_none()); 2225 2226 let result = vmm.vm_add_fs(fs_config.clone()); 2227 assert!(result.is_ok()); 2228 assert!(result.unwrap().is_none()); 2229 assert_eq!( 2230 vmm.vm_config 2231 .as_ref() 2232 .unwrap() 2233 .lock() 2234 .unwrap() 2235 .fs 2236 .clone() 2237 .unwrap() 2238 .len(), 2239 1 2240 ); 2241 assert_eq!( 2242 vmm.vm_config 2243 .as_ref() 2244 .unwrap() 2245 .lock() 2246 .unwrap() 2247 .fs 2248 .clone() 2249 .unwrap()[0], 2250 fs_config 2251 ); 2252 } 2253 2254 #[test] 2255 fn test_vmm_vm_cold_add_pmem() { 2256 let mut vmm = create_dummy_vmm(); 2257 let pmem_config = PmemConfig::parse("file=/tmp/pmem,size=128M").unwrap(); 2258 2259 assert!(matches!( 2260 vmm.vm_add_pmem(pmem_config.clone()), 2261 Err(VmError::VmNotCreated) 2262 )); 2263 2264 let _ = vmm.vm_create(create_dummy_vm_config()); 2265 assert!(vmm 2266 .vm_config 2267 .as_ref() 2268 .unwrap() 2269 .lock() 2270 .unwrap() 2271 .pmem 2272 .is_none()); 2273 2274 let result = vmm.vm_add_pmem(pmem_config.clone()); 2275 assert!(result.is_ok()); 2276 assert!(result.unwrap().is_none()); 2277 assert_eq!( 2278 vmm.vm_config 2279 .as_ref() 2280 .unwrap() 2281 .lock() 2282 .unwrap() 2283 .pmem 2284 .clone() 2285 .unwrap() 2286 .len(), 2287 1 2288 ); 2289 assert_eq!( 2290 vmm.vm_config 2291 .as_ref() 2292 .unwrap() 2293 .lock() 2294 .unwrap() 2295 .pmem 2296 .clone() 2297 .unwrap()[0], 2298 pmem_config 2299 ); 2300 } 2301 2302 #[test] 2303 fn test_vmm_vm_cold_add_net() { 2304 let mut vmm = create_dummy_vmm(); 2305 let net_config = NetConfig::parse( 2306 "mac=de:ad:be:ef:12:34,host_mac=12:34:de:ad:be:ef,vhost_user=true,socket=/tmp/sock", 2307 ) 2308 .unwrap(); 2309 2310 assert!(matches!( 2311 vmm.vm_add_net(net_config.clone()), 2312 Err(VmError::VmNotCreated) 2313 )); 2314 2315 let _ = vmm.vm_create(create_dummy_vm_config()); 2316 assert!(vmm 2317 .vm_config 2318 .as_ref() 2319 .unwrap() 2320 .lock() 2321 .unwrap() 2322 .net 2323 .is_none()); 2324 2325 let result = vmm.vm_add_net(net_config.clone()); 2326 assert!(result.is_ok()); 2327 assert!(result.unwrap().is_none()); 2328 assert_eq!( 2329 vmm.vm_config 2330 .as_ref() 2331 .unwrap() 2332 .lock() 2333 .unwrap() 2334 .net 2335 .clone() 2336 .unwrap() 2337 .len(), 2338 1 2339 ); 2340 assert_eq!( 2341 vmm.vm_config 2342 .as_ref() 2343 .unwrap() 2344 .lock() 2345 .unwrap() 2346 .net 2347 .clone() 2348 .unwrap()[0], 2349 net_config 2350 ); 2351 } 2352 2353 #[test] 2354 fn test_vmm_vm_cold_add_vdpa() { 2355 let mut vmm = create_dummy_vmm(); 2356 let vdpa_config = VdpaConfig::parse("path=/dev/vhost-vdpa,num_queues=2").unwrap(); 2357 2358 assert!(matches!( 2359 vmm.vm_add_vdpa(vdpa_config.clone()), 2360 Err(VmError::VmNotCreated) 2361 )); 2362 2363 let _ = vmm.vm_create(create_dummy_vm_config()); 2364 assert!(vmm 2365 .vm_config 2366 .as_ref() 2367 .unwrap() 2368 .lock() 2369 .unwrap() 2370 .vdpa 2371 .is_none()); 2372 2373 let result = vmm.vm_add_vdpa(vdpa_config.clone()); 2374 assert!(result.is_ok()); 2375 assert!(result.unwrap().is_none()); 2376 assert_eq!( 2377 vmm.vm_config 2378 .as_ref() 2379 .unwrap() 2380 .lock() 2381 .unwrap() 2382 .vdpa 2383 .clone() 2384 .unwrap() 2385 .len(), 2386 1 2387 ); 2388 assert_eq!( 2389 vmm.vm_config 2390 .as_ref() 2391 .unwrap() 2392 .lock() 2393 .unwrap() 2394 .vdpa 2395 .clone() 2396 .unwrap()[0], 2397 vdpa_config 2398 ); 2399 } 2400 2401 #[test] 2402 fn test_vmm_vm_cold_add_vsock() { 2403 let mut vmm = create_dummy_vmm(); 2404 let vsock_config = VsockConfig::parse("socket=/tmp/sock,cid=1,iommu=on").unwrap(); 2405 2406 assert!(matches!( 2407 vmm.vm_add_vsock(vsock_config.clone()), 2408 Err(VmError::VmNotCreated) 2409 )); 2410 2411 let _ = vmm.vm_create(create_dummy_vm_config()); 2412 assert!(vmm 2413 .vm_config 2414 .as_ref() 2415 .unwrap() 2416 .lock() 2417 .unwrap() 2418 .vsock 2419 .is_none()); 2420 2421 let result = vmm.vm_add_vsock(vsock_config.clone()); 2422 assert!(result.is_ok()); 2423 assert!(result.unwrap().is_none()); 2424 assert_eq!( 2425 vmm.vm_config 2426 .as_ref() 2427 .unwrap() 2428 .lock() 2429 .unwrap() 2430 .vsock 2431 .clone() 2432 .unwrap(), 2433 vsock_config 2434 ); 2435 } 2436 } 2437