1 // Copyright © 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 6 #[macro_use] 7 extern crate event_monitor; 8 #[macro_use] 9 extern crate lazy_static; 10 #[macro_use] 11 extern crate log; 12 #[macro_use] 13 extern crate serde_derive; 14 #[cfg(test)] 15 #[macro_use] 16 extern crate credibility; 17 18 use crate::api::{ 19 ApiError, ApiRequest, ApiResponse, ApiResponsePayload, VmInfo, VmReceiveMigrationData, 20 VmSendMigrationData, VmmPingResponse, 21 }; 22 use crate::config::{ 23 DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, RestoreConfig, UserDeviceConfig, 24 VmConfig, VsockConfig, 25 }; 26 use crate::migration::{get_vm_snapshot, recv_vm_snapshot}; 27 use crate::seccomp_filters::{get_seccomp_filter, Thread}; 28 use crate::vm::{Error as VmError, Vm, VmState}; 29 use anyhow::anyhow; 30 use libc::EFD_NONBLOCK; 31 use seccompiler::{apply_filter, SeccompAction}; 32 use serde::ser::{Serialize, SerializeStruct, Serializer}; 33 use std::fs::File; 34 use std::io; 35 use std::io::{Read, Write}; 36 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; 37 use std::os::unix::net::UnixListener; 38 use std::os::unix::net::UnixStream; 39 use std::path::PathBuf; 40 use std::sync::mpsc::{Receiver, RecvError, SendError, Sender}; 41 use std::sync::{Arc, Mutex}; 42 use std::{result, thread}; 43 use thiserror::Error; 44 use vm_memory::bitmap::AtomicBitmap; 45 use vm_migration::{protocol::*, Migratable}; 46 use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 47 use vmm_sys_util::eventfd::EventFd; 48 49 pub mod api; 50 mod clone3; 51 pub mod config; 52 pub mod cpu; 53 pub mod device_manager; 54 pub mod device_tree; 55 pub mod interrupt; 56 pub mod memory_manager; 57 pub mod migration; 58 pub mod seccomp_filters; 59 mod sigwinch_listener; 60 pub mod vm; 61 62 #[cfg(feature = "acpi")] 63 mod acpi; 64 mod serial_buffer; 65 66 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; 67 type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>; 68 69 /// Errors associated with VMM management 70 #[derive(Debug, Error)] 71 #[allow(clippy::large_enum_variant)] 72 pub enum Error { 73 /// API request receive error 74 #[error("Error receiving API request: {0}")] 75 ApiRequestRecv(#[source] RecvError), 76 77 /// API response send error 78 #[error("Error sending API request: {0}")] 79 ApiResponseSend(#[source] SendError<ApiResponse>), 80 81 /// Cannot bind to the UNIX domain socket path 82 #[error("Error binding to UNIX domain socket: {0}")] 83 Bind(#[source] io::Error), 84 85 /// Cannot clone EventFd. 86 #[error("Error cloning EventFd: {0}")] 87 EventFdClone(#[source] io::Error), 88 89 /// Cannot create EventFd. 90 #[error("Error creating EventFd: {0}")] 91 EventFdCreate(#[source] io::Error), 92 93 /// Cannot read from EventFd. 94 #[error("Error reading from EventFd: {0}")] 95 EventFdRead(#[source] io::Error), 96 97 /// Cannot create epoll context. 98 #[error("Error creating epoll context: {0}")] 99 Epoll(#[source] io::Error), 100 101 /// Cannot create HTTP thread 102 #[error("Error spawning HTTP thread: {0}")] 103 HttpThreadSpawn(#[source] io::Error), 104 105 /// Cannot handle the VM STDIN stream 106 #[error("Error handling VM stdin: {0:?}")] 107 Stdin(VmError), 108 109 /// Cannot handle the VM pty stream 110 #[error("Error handling VM pty: {0:?}")] 111 Pty(VmError), 112 113 /// Cannot reboot the VM 114 #[error("Error rebooting VM: {0:?}")] 115 VmReboot(VmError), 116 117 /// Cannot create VMM thread 118 #[error("Error spawning VMM thread {0:?}")] 119 VmmThreadSpawn(#[source] io::Error), 120 121 /// Cannot shut the VMM down 122 #[error("Error shutting down VMM: {0:?}")] 123 VmmShutdown(VmError), 124 125 /// Cannot create seccomp filter 126 #[error("Error creating seccomp filter: {0}")] 127 CreateSeccompFilter(seccompiler::Error), 128 129 /// Cannot apply seccomp filter 130 #[error("Error applying seccomp filter: {0}")] 131 ApplySeccompFilter(seccompiler::Error), 132 133 /// Error activating virtio devices 134 #[error("Error activating virtio devices: {0:?}")] 135 ActivateVirtioDevices(VmError), 136 137 /// Error creating API server 138 #[error("Error creating API server {0:?}")] 139 CreateApiServer(micro_http::ServerError), 140 141 /// Error binding API server socket 142 #[error("Error creation API server's socket {0:?}")] 143 CreateApiServerSocket(#[source] io::Error), 144 } 145 pub type Result<T> = result::Result<T, Error>; 146 147 #[derive(Debug, Clone, Copy, PartialEq)] 148 #[repr(u64)] 149 pub enum EpollDispatch { 150 Exit = 0, 151 Reset = 1, 152 Stdin = 2, 153 Api = 3, 154 ActivateVirtioDevices = 4, 155 SerialPty = 5, 156 Unknown, 157 } 158 159 impl From<u64> for EpollDispatch { 160 fn from(v: u64) -> Self { 161 use EpollDispatch::*; 162 match v { 163 0 => Exit, 164 1 => Reset, 165 2 => Stdin, 166 3 => Api, 167 4 => ActivateVirtioDevices, 168 5 => SerialPty, 169 _ => Unknown, 170 } 171 } 172 } 173 174 pub struct EpollContext { 175 epoll_file: File, 176 } 177 178 impl EpollContext { 179 pub fn new() -> result::Result<EpollContext, io::Error> { 180 let epoll_fd = epoll::create(true)?; 181 // Use 'File' to enforce closing on 'epoll_fd' 182 let epoll_file = unsafe { File::from_raw_fd(epoll_fd) }; 183 184 Ok(EpollContext { epoll_file }) 185 } 186 187 pub fn add_stdin(&mut self) -> result::Result<(), io::Error> { 188 let dispatch_index = EpollDispatch::Stdin as u64; 189 epoll::ctl( 190 self.epoll_file.as_raw_fd(), 191 epoll::ControlOptions::EPOLL_CTL_ADD, 192 libc::STDIN_FILENO, 193 epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index), 194 )?; 195 196 Ok(()) 197 } 198 199 fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error> 200 where 201 T: AsRawFd, 202 { 203 let dispatch_index = token as u64; 204 epoll::ctl( 205 self.epoll_file.as_raw_fd(), 206 epoll::ControlOptions::EPOLL_CTL_ADD, 207 fd.as_raw_fd(), 208 epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index), 209 )?; 210 211 Ok(()) 212 } 213 } 214 215 impl AsRawFd for EpollContext { 216 fn as_raw_fd(&self) -> RawFd { 217 self.epoll_file.as_raw_fd() 218 } 219 } 220 221 pub struct PciDeviceInfo { 222 pub id: String, 223 pub bdf: u32, 224 } 225 226 impl Serialize for PciDeviceInfo { 227 fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> 228 where 229 S: Serializer, 230 { 231 // Transform the PCI b/d/f into a standardized string. 232 let segment = (self.bdf >> 16) & 0xffff; 233 let bus = (self.bdf >> 8) & 0xff; 234 let device = (self.bdf >> 3) & 0x1f; 235 let function = self.bdf & 0x7; 236 let bdf_str = format!( 237 "{:04x}:{:02x}:{:02x}.{:01x}", 238 segment, bus, device, function 239 ); 240 241 // Serialize the structure. 242 let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?; 243 state.serialize_field("id", &self.id)?; 244 state.serialize_field("bdf", &bdf_str)?; 245 state.end() 246 } 247 } 248 249 #[allow(clippy::too_many_arguments)] 250 pub fn start_vmm_thread( 251 vmm_version: String, 252 http_path: &Option<String>, 253 http_fd: Option<RawFd>, 254 api_event: EventFd, 255 api_sender: Sender<ApiRequest>, 256 api_receiver: Receiver<ApiRequest>, 257 seccomp_action: &SeccompAction, 258 hypervisor: Arc<dyn hypervisor::Hypervisor>, 259 ) -> Result<thread::JoinHandle<Result<()>>> { 260 let http_api_event = api_event.try_clone().map_err(Error::EventFdClone)?; 261 262 // Retrieve seccomp filter 263 let vmm_seccomp_filter = 264 get_seccomp_filter(seccomp_action, Thread::Vmm).map_err(Error::CreateSeccompFilter)?; 265 266 let vmm_seccomp_action = seccomp_action.clone(); 267 let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 268 let thread = { 269 let exit_evt = exit_evt.try_clone().map_err(Error::EventFdClone)?; 270 thread::Builder::new() 271 .name("vmm".to_string()) 272 .spawn(move || { 273 // Apply seccomp filter for VMM thread. 274 if !vmm_seccomp_filter.is_empty() { 275 apply_filter(&vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?; 276 } 277 278 let mut vmm = Vmm::new( 279 vmm_version.to_string(), 280 api_event, 281 vmm_seccomp_action, 282 hypervisor, 283 exit_evt, 284 )?; 285 286 vmm.control_loop(Arc::new(api_receiver)) 287 }) 288 .map_err(Error::VmmThreadSpawn)? 289 }; 290 291 // The VMM thread is started, we can start serving HTTP requests 292 if let Some(http_path) = http_path { 293 api::start_http_path_thread( 294 http_path, 295 http_api_event, 296 api_sender, 297 seccomp_action, 298 exit_evt, 299 )?; 300 } else if let Some(http_fd) = http_fd { 301 api::start_http_fd_thread( 302 http_fd, 303 http_api_event, 304 api_sender, 305 seccomp_action, 306 exit_evt, 307 )?; 308 } 309 Ok(thread) 310 } 311 312 #[derive(Clone, Deserialize, Serialize)] 313 struct VmMigrationConfig { 314 vm_config: Arc<Mutex<VmConfig>>, 315 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 316 common_cpuid: hypervisor::CpuId, 317 } 318 319 pub struct Vmm { 320 epoll: EpollContext, 321 exit_evt: EventFd, 322 reset_evt: EventFd, 323 api_evt: EventFd, 324 version: String, 325 vm: Option<Vm>, 326 vm_config: Option<Arc<Mutex<VmConfig>>>, 327 seccomp_action: SeccompAction, 328 hypervisor: Arc<dyn hypervisor::Hypervisor>, 329 activate_evt: EventFd, 330 } 331 332 impl Vmm { 333 fn new( 334 vmm_version: String, 335 api_evt: EventFd, 336 seccomp_action: SeccompAction, 337 hypervisor: Arc<dyn hypervisor::Hypervisor>, 338 exit_evt: EventFd, 339 ) -> Result<Self> { 340 let mut epoll = EpollContext::new().map_err(Error::Epoll)?; 341 let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 342 let activate_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 343 344 epoll 345 .add_event(&exit_evt, EpollDispatch::Exit) 346 .map_err(Error::Epoll)?; 347 348 epoll 349 .add_event(&reset_evt, EpollDispatch::Reset) 350 .map_err(Error::Epoll)?; 351 352 epoll 353 .add_event(&activate_evt, EpollDispatch::ActivateVirtioDevices) 354 .map_err(Error::Epoll)?; 355 356 epoll 357 .add_event(&api_evt, EpollDispatch::Api) 358 .map_err(Error::Epoll)?; 359 360 Ok(Vmm { 361 epoll, 362 exit_evt, 363 reset_evt, 364 api_evt, 365 version: vmm_version, 366 vm: None, 367 vm_config: None, 368 seccomp_action, 369 hypervisor, 370 activate_evt, 371 }) 372 } 373 374 fn vm_create(&mut self, config: Arc<Mutex<VmConfig>>) -> result::Result<(), VmError> { 375 // We only store the passed VM config. 376 // The VM will be created when being asked to boot it. 377 if self.vm_config.is_none() { 378 self.vm_config = Some(config); 379 Ok(()) 380 } else { 381 Err(VmError::VmAlreadyCreated) 382 } 383 } 384 385 fn vm_boot(&mut self) -> result::Result<(), VmError> { 386 // If we don't have a config, we can not boot a VM. 387 if self.vm_config.is_none() { 388 return Err(VmError::VmMissingConfig); 389 }; 390 391 // Create a new VM if we don't have one yet. 392 if self.vm.is_none() { 393 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 394 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 395 let activate_evt = self 396 .activate_evt 397 .try_clone() 398 .map_err(VmError::EventFdClone)?; 399 400 if let Some(ref vm_config) = self.vm_config { 401 let vm = Vm::new( 402 Arc::clone(vm_config), 403 exit_evt, 404 reset_evt, 405 &self.seccomp_action, 406 self.hypervisor.clone(), 407 activate_evt, 408 None, 409 None, 410 None, 411 )?; 412 if let Some(serial_pty) = vm.serial_pty() { 413 self.epoll 414 .add_event(&serial_pty.main, EpollDispatch::SerialPty) 415 .map_err(VmError::EventfdError)?; 416 }; 417 if matches!( 418 vm_config.lock().unwrap().serial.mode, 419 config::ConsoleOutputMode::Tty 420 ) && unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0 421 { 422 self.epoll.add_stdin().map_err(VmError::EventfdError)?; 423 } 424 425 self.vm = Some(vm); 426 } 427 } 428 429 // Now we can boot the VM. 430 if let Some(ref mut vm) = self.vm { 431 vm.boot() 432 } else { 433 Err(VmError::VmNotCreated) 434 } 435 } 436 437 fn vm_pause(&mut self) -> result::Result<(), VmError> { 438 if let Some(ref mut vm) = self.vm { 439 vm.pause().map_err(VmError::Pause) 440 } else { 441 Err(VmError::VmNotRunning) 442 } 443 } 444 445 fn vm_resume(&mut self) -> result::Result<(), VmError> { 446 if let Some(ref mut vm) = self.vm { 447 vm.resume().map_err(VmError::Resume) 448 } else { 449 Err(VmError::VmNotRunning) 450 } 451 } 452 453 fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> { 454 if let Some(ref mut vm) = self.vm { 455 vm.snapshot() 456 .map_err(VmError::Snapshot) 457 .and_then(|snapshot| { 458 vm.send(&snapshot, destination_url) 459 .map_err(VmError::SnapshotSend) 460 }) 461 } else { 462 Err(VmError::VmNotRunning) 463 } 464 } 465 466 fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> { 467 if self.vm.is_some() || self.vm_config.is_some() { 468 return Err(VmError::VmAlreadyCreated); 469 } 470 471 let source_url = restore_cfg.source_url.as_path().to_str(); 472 if source_url.is_none() { 473 return Err(VmError::RestoreSourceUrlPathToStr); 474 } 475 // Safe to unwrap as we checked it was Some(&str). 476 let source_url = source_url.unwrap(); 477 478 let snapshot = recv_vm_snapshot(source_url).map_err(VmError::Restore)?; 479 let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?; 480 481 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 482 self.vm_check_cpuid_compatibility(&vm_snapshot.config, &vm_snapshot.common_cpuid) 483 .map_err(VmError::Restore)?; 484 485 self.vm_config = Some(Arc::clone(&vm_snapshot.config)); 486 487 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 488 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 489 let activate_evt = self 490 .activate_evt 491 .try_clone() 492 .map_err(VmError::EventFdClone)?; 493 494 let vm = Vm::new_from_snapshot( 495 &snapshot, 496 exit_evt, 497 reset_evt, 498 Some(source_url), 499 restore_cfg.prefault, 500 &self.seccomp_action, 501 self.hypervisor.clone(), 502 activate_evt, 503 )?; 504 self.vm = Some(vm); 505 506 // Now we can restore the rest of the VM. 507 if let Some(ref mut vm) = self.vm { 508 vm.restore(snapshot).map_err(VmError::Restore) 509 } else { 510 Err(VmError::VmNotCreated) 511 } 512 } 513 514 fn vm_shutdown(&mut self) -> result::Result<(), VmError> { 515 if let Some(ref mut vm) = self.vm.take() { 516 vm.shutdown() 517 } else { 518 Err(VmError::VmNotRunning) 519 } 520 } 521 522 fn vm_reboot(&mut self) -> result::Result<(), VmError> { 523 // Without ACPI, a reset is equivalent to a shutdown 524 // On AArch64, before ACPI is supported, we simply jump over this check and continue to reset. 525 #[cfg(all(target_arch = "x86_64", not(feature = "acpi")))] 526 { 527 if self.vm.is_some() { 528 self.exit_evt.write(1).unwrap(); 529 return Ok(()); 530 } 531 } 532 533 // First we stop the current VM and create a new one. 534 if let Some(ref mut vm) = self.vm { 535 let config = vm.get_config(); 536 let serial_pty = vm.serial_pty(); 537 let console_pty = vm.console_pty(); 538 let console_resize_pipe = vm 539 .console_resize_pipe() 540 .as_ref() 541 .map(|pipe| pipe.try_clone().unwrap()); 542 self.vm_shutdown()?; 543 544 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 545 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 546 let activate_evt = self 547 .activate_evt 548 .try_clone() 549 .map_err(VmError::EventFdClone)?; 550 551 // The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be 552 // an event sitting in the shared reset_evt. Without doing this we get very early reboots 553 // during the boot process. 554 if self.reset_evt.read().is_ok() { 555 warn!("Spurious second reset event received. Ignoring."); 556 } 557 self.vm = Some(Vm::new( 558 config, 559 exit_evt, 560 reset_evt, 561 &self.seccomp_action, 562 self.hypervisor.clone(), 563 activate_evt, 564 serial_pty, 565 console_pty, 566 console_resize_pipe, 567 )?); 568 } 569 570 // Then we start the new VM. 571 if let Some(ref mut vm) = self.vm { 572 vm.boot() 573 } else { 574 Err(VmError::VmNotCreated) 575 } 576 } 577 578 fn vm_info(&self) -> result::Result<VmInfo, VmError> { 579 match &self.vm_config { 580 Some(config) => { 581 let state = match &self.vm { 582 Some(vm) => vm.get_state()?, 583 None => VmState::Created, 584 }; 585 586 let config = Arc::clone(config); 587 588 let mut memory_actual_size = config.lock().unwrap().memory.total_size(); 589 if let Some(vm) = &self.vm { 590 memory_actual_size -= vm.balloon_size(); 591 } 592 593 let device_tree = self.vm.as_ref().map(|vm| vm.device_tree()); 594 595 Ok(VmInfo { 596 config, 597 state, 598 memory_actual_size, 599 device_tree, 600 }) 601 } 602 None => Err(VmError::VmNotCreated), 603 } 604 } 605 606 fn vmm_ping(&self) -> VmmPingResponse { 607 VmmPingResponse { 608 version: self.version.clone(), 609 } 610 } 611 612 fn vm_delete(&mut self) -> result::Result<(), VmError> { 613 if self.vm_config.is_none() { 614 return Ok(()); 615 } 616 617 // If a VM is booted, we first try to shut it down. 618 if self.vm.is_some() { 619 self.vm_shutdown()?; 620 } 621 622 self.vm_config = None; 623 624 event!("vm", "deleted"); 625 626 Ok(()) 627 } 628 629 fn vmm_shutdown(&mut self) -> result::Result<(), VmError> { 630 self.vm_delete()?; 631 event!("vmm", "shutdown"); 632 Ok(()) 633 } 634 635 fn vm_resize( 636 &mut self, 637 desired_vcpus: Option<u8>, 638 desired_ram: Option<u64>, 639 desired_balloon: Option<u64>, 640 ) -> result::Result<(), VmError> { 641 if let Some(ref mut vm) = self.vm { 642 if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) { 643 error!("Error when resizing VM: {:?}", e); 644 Err(e) 645 } else { 646 Ok(()) 647 } 648 } else { 649 Err(VmError::VmNotRunning) 650 } 651 } 652 653 fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> { 654 if let Some(ref mut vm) = self.vm { 655 if let Err(e) = vm.resize_zone(id, desired_ram) { 656 error!("Error when resizing VM: {:?}", e); 657 Err(e) 658 } else { 659 Ok(()) 660 } 661 } else { 662 Err(VmError::VmNotRunning) 663 } 664 } 665 666 fn vm_add_device(&mut self, device_cfg: DeviceConfig) -> result::Result<Vec<u8>, VmError> { 667 if let Some(ref mut vm) = self.vm { 668 let info = vm.add_device(device_cfg).map_err(|e| { 669 error!("Error when adding new device to the VM: {:?}", e); 670 e 671 })?; 672 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 673 } else { 674 Err(VmError::VmNotRunning) 675 } 676 } 677 678 fn vm_add_user_device( 679 &mut self, 680 device_cfg: UserDeviceConfig, 681 ) -> result::Result<Vec<u8>, VmError> { 682 if let Some(ref mut vm) = self.vm { 683 let info = vm.add_user_device(device_cfg).map_err(|e| { 684 error!("Error when adding new user device to the VM: {:?}", e); 685 e 686 })?; 687 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 688 } else { 689 Err(VmError::VmNotRunning) 690 } 691 } 692 693 fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> { 694 if let Some(ref mut vm) = self.vm { 695 if let Err(e) = vm.remove_device(id) { 696 error!("Error when removing new device to the VM: {:?}", e); 697 Err(e) 698 } else { 699 Ok(()) 700 } 701 } else { 702 Err(VmError::VmNotRunning) 703 } 704 } 705 706 fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Vec<u8>, VmError> { 707 if let Some(ref mut vm) = self.vm { 708 let info = vm.add_disk(disk_cfg).map_err(|e| { 709 error!("Error when adding new disk to the VM: {:?}", e); 710 e 711 })?; 712 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 713 } else { 714 Err(VmError::VmNotRunning) 715 } 716 } 717 718 fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Vec<u8>, VmError> { 719 if let Some(ref mut vm) = self.vm { 720 let info = vm.add_fs(fs_cfg).map_err(|e| { 721 error!("Error when adding new fs to the VM: {:?}", e); 722 e 723 })?; 724 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 725 } else { 726 Err(VmError::VmNotRunning) 727 } 728 } 729 730 fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Vec<u8>, VmError> { 731 if let Some(ref mut vm) = self.vm { 732 let info = vm.add_pmem(pmem_cfg).map_err(|e| { 733 error!("Error when adding new pmem device to the VM: {:?}", e); 734 e 735 })?; 736 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 737 } else { 738 Err(VmError::VmNotRunning) 739 } 740 } 741 742 fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Vec<u8>, VmError> { 743 if let Some(ref mut vm) = self.vm { 744 let info = vm.add_net(net_cfg).map_err(|e| { 745 error!("Error when adding new network device to the VM: {:?}", e); 746 e 747 })?; 748 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 749 } else { 750 Err(VmError::VmNotRunning) 751 } 752 } 753 754 fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Vec<u8>, VmError> { 755 if let Some(ref mut vm) = self.vm { 756 let info = vm.add_vsock(vsock_cfg).map_err(|e| { 757 error!("Error when adding new vsock device to the VM: {:?}", e); 758 e 759 })?; 760 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 761 } else { 762 Err(VmError::VmNotRunning) 763 } 764 } 765 766 fn vm_counters(&mut self) -> result::Result<Vec<u8>, VmError> { 767 if let Some(ref mut vm) = self.vm { 768 let info = vm.counters().map_err(|e| { 769 error!("Error when getting counters from the VM: {:?}", e); 770 e 771 })?; 772 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 773 } else { 774 Err(VmError::VmNotRunning) 775 } 776 } 777 778 fn vm_power_button(&mut self) -> result::Result<(), VmError> { 779 if let Some(ref mut vm) = self.vm { 780 vm.power_button() 781 } else { 782 Err(VmError::VmNotRunning) 783 } 784 } 785 786 fn vm_receive_config<T>( 787 &mut self, 788 req: &Request, 789 socket: &mut T, 790 ) -> std::result::Result<Vm, MigratableError> 791 where 792 T: Read + Write, 793 { 794 // Read in config data 795 let mut data = Vec::with_capacity(req.length() as usize); 796 unsafe { 797 data.set_len(req.length() as usize); 798 } 799 socket 800 .read_exact(&mut data) 801 .map_err(MigratableError::MigrateSocket)?; 802 803 let vm_migration_config: VmMigrationConfig = 804 serde_json::from_slice(&data).map_err(|e| { 805 MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e)) 806 })?; 807 808 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 809 self.vm_check_cpuid_compatibility( 810 &vm_migration_config.vm_config, 811 &vm_migration_config.common_cpuid, 812 )?; 813 814 let exit_evt = self.exit_evt.try_clone().map_err(|e| { 815 MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e)) 816 })?; 817 let reset_evt = self.reset_evt.try_clone().map_err(|e| { 818 MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e)) 819 })?; 820 let activate_evt = self.activate_evt.try_clone().map_err(|e| { 821 MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e)) 822 })?; 823 824 self.vm_config = Some(vm_migration_config.vm_config); 825 let vm = Vm::new_from_migration( 826 self.vm_config.clone().unwrap(), 827 exit_evt, 828 reset_evt, 829 &self.seccomp_action, 830 self.hypervisor.clone(), 831 activate_evt, 832 ) 833 .map_err(|e| { 834 MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e)) 835 })?; 836 837 Response::ok().write_to(socket)?; 838 839 Ok(vm) 840 } 841 842 fn vm_receive_state<T>( 843 &mut self, 844 req: &Request, 845 socket: &mut T, 846 mut vm: Vm, 847 ) -> std::result::Result<(), MigratableError> 848 where 849 T: Read + Write, 850 { 851 // Read in state data 852 let mut data = Vec::with_capacity(req.length() as usize); 853 unsafe { 854 data.set_len(req.length() as usize); 855 } 856 socket 857 .read_exact(&mut data) 858 .map_err(MigratableError::MigrateSocket)?; 859 let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| { 860 MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e)) 861 })?; 862 863 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 864 vm.load_clock_from_snapshot(&snapshot) 865 .map_err(|e| MigratableError::MigrateReceive(anyhow!("Error resume clock: {:?}", e)))?; 866 867 // Create VM 868 vm.restore(snapshot).map_err(|e| { 869 Response::error().write_to(socket).ok(); 870 e 871 })?; 872 self.vm = Some(vm); 873 874 Response::ok().write_to(socket)?; 875 876 Ok(()) 877 } 878 879 fn vm_receive_memory<T>( 880 &mut self, 881 req: &Request, 882 socket: &mut T, 883 vm: &mut Vm, 884 ) -> std::result::Result<(), MigratableError> 885 where 886 T: Read + Write, 887 { 888 // Read table 889 let table = MemoryRangeTable::read_from(socket, req.length())?; 890 891 // And then read the memory itself 892 vm.receive_memory_regions(&table, socket).map_err(|e| { 893 Response::error().write_to(socket).ok(); 894 e 895 })?; 896 Response::ok().write_to(socket)?; 897 Ok(()) 898 } 899 900 fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> { 901 url.strip_prefix("unix:") 902 .ok_or_else(|| { 903 MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url)) 904 }) 905 .map(|s| s.into()) 906 } 907 908 fn vm_receive_migration( 909 &mut self, 910 receive_data_migration: VmReceiveMigrationData, 911 ) -> result::Result<(), MigratableError> { 912 info!( 913 "Receiving migration: receiver_url = {}", 914 receive_data_migration.receiver_url 915 ); 916 917 let path = Self::socket_url_to_path(&receive_data_migration.receiver_url)?; 918 let listener = UnixListener::bind(&path).map_err(|e| { 919 MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e)) 920 })?; 921 let (mut socket, _addr) = listener.accept().map_err(|e| { 922 MigratableError::MigrateReceive(anyhow!("Error accepting on UNIX socket: {}", e)) 923 })?; 924 std::fs::remove_file(&path).map_err(|e| { 925 MigratableError::MigrateReceive(anyhow!("Error unlinking UNIX socket: {}", e)) 926 })?; 927 928 let mut started = false; 929 let mut vm: Option<Vm> = None; 930 931 loop { 932 let req = Request::read_from(&mut socket)?; 933 match req.command() { 934 Command::Invalid => info!("Invalid Command Received"), 935 Command::Start => { 936 info!("Start Command Received"); 937 started = true; 938 939 Response::ok().write_to(&mut socket)?; 940 } 941 Command::Config => { 942 info!("Config Command Received"); 943 944 if !started { 945 warn!("Migration not started yet"); 946 Response::error().write_to(&mut socket)?; 947 continue; 948 } 949 vm = Some(self.vm_receive_config(&req, &mut socket)?); 950 } 951 Command::State => { 952 info!("State Command Received"); 953 954 if !started { 955 warn!("Migration not started yet"); 956 Response::error().write_to(&mut socket)?; 957 continue; 958 } 959 if let Some(vm) = vm.take() { 960 self.vm_receive_state(&req, &mut socket, vm)?; 961 } else { 962 warn!("Configuration not sent yet"); 963 Response::error().write_to(&mut socket)?; 964 } 965 } 966 Command::Memory => { 967 info!("Memory Command Received"); 968 969 if !started { 970 warn!("Migration not started yet"); 971 Response::error().write_to(&mut socket)?; 972 continue; 973 } 974 if let Some(ref mut vm) = vm.as_mut() { 975 self.vm_receive_memory(&req, &mut socket, vm)?; 976 } else { 977 warn!("Configuration not sent yet"); 978 Response::error().write_to(&mut socket)?; 979 } 980 } 981 Command::Complete => { 982 info!("Complete Command Received"); 983 if let Some(ref mut vm) = self.vm.as_mut() { 984 vm.resume()?; 985 Response::ok().write_to(&mut socket)?; 986 } else { 987 warn!("VM not created yet"); 988 Response::error().write_to(&mut socket)?; 989 } 990 break; 991 } 992 Command::Abandon => { 993 info!("Abandon Command Received"); 994 self.vm = None; 995 self.vm_config = None; 996 Response::ok().write_to(&mut socket).ok(); 997 break; 998 } 999 } 1000 } 1001 1002 Ok(()) 1003 } 1004 1005 // Returns true if there were dirty pages to send 1006 fn vm_maybe_send_dirty_pages<T>( 1007 vm: &mut Vm, 1008 socket: &mut T, 1009 ) -> result::Result<bool, MigratableError> 1010 where 1011 T: Read + Write, 1012 { 1013 // Send (dirty) memory table 1014 let table = vm.dirty_log()?; 1015 1016 // But if there are no regions go straight to pause 1017 if table.regions().is_empty() { 1018 return Ok(false); 1019 } 1020 1021 Request::memory(table.length()).write_to(socket).unwrap(); 1022 table.write_to(socket)?; 1023 // And then the memory itself 1024 vm.send_memory_regions(&table, socket)?; 1025 let res = Response::read_from(socket)?; 1026 if res.status() != Status::Ok { 1027 warn!("Error during dirty memory migration"); 1028 Request::abandon().write_to(socket)?; 1029 Response::read_from(socket).ok(); 1030 return Err(MigratableError::MigrateSend(anyhow!( 1031 "Error during dirty memory migration" 1032 ))); 1033 } 1034 1035 Ok(true) 1036 } 1037 1038 fn send_migration( 1039 vm: &mut Vm, 1040 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] hypervisor: Arc< 1041 dyn hypervisor::Hypervisor, 1042 >, 1043 send_data_migration: VmSendMigrationData, 1044 ) -> result::Result<(), MigratableError> { 1045 let path = Self::socket_url_to_path(&send_data_migration.destination_url)?; 1046 let mut socket = UnixStream::connect(&path).map_err(|e| { 1047 MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e)) 1048 })?; 1049 1050 // Start the migration 1051 Request::start().write_to(&mut socket)?; 1052 let res = Response::read_from(&mut socket)?; 1053 if res.status() != Status::Ok { 1054 warn!("Error starting migration"); 1055 Request::abandon().write_to(&mut socket)?; 1056 Response::read_from(&mut socket).ok(); 1057 return Err(MigratableError::MigrateSend(anyhow!( 1058 "Error starting migration" 1059 ))); 1060 } 1061 1062 // Send config 1063 let vm_config = vm.get_config(); 1064 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1065 let common_cpuid = { 1066 #[cfg(feature = "tdx")] 1067 let tdx_enabled = vm_config.lock().unwrap().tdx.is_some(); 1068 let phys_bits = vm::physical_bits( 1069 vm_config.lock().unwrap().cpus.max_phys_bits, 1070 #[cfg(feature = "tdx")] 1071 tdx_enabled, 1072 ); 1073 arch::generate_common_cpuid( 1074 hypervisor, 1075 None, 1076 None, 1077 phys_bits, 1078 vm_config.lock().unwrap().cpus.kvm_hyperv, 1079 #[cfg(feature = "tdx")] 1080 tdx_enabled, 1081 ) 1082 .map_err(|e| { 1083 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid': {:?}", e)) 1084 })? 1085 }; 1086 1087 let vm_migration_config = VmMigrationConfig { 1088 vm_config, 1089 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1090 common_cpuid, 1091 }; 1092 1093 let config_data = serde_json::to_vec(&vm_migration_config).unwrap(); 1094 Request::config(config_data.len() as u64).write_to(&mut socket)?; 1095 socket 1096 .write_all(&config_data) 1097 .map_err(MigratableError::MigrateSocket)?; 1098 let res = Response::read_from(&mut socket)?; 1099 if res.status() != Status::Ok { 1100 warn!("Error during config migration"); 1101 Request::abandon().write_to(&mut socket)?; 1102 Response::read_from(&mut socket).ok(); 1103 return Err(MigratableError::MigrateSend(anyhow!( 1104 "Error during config migration" 1105 ))); 1106 } 1107 1108 // Start logging dirty pages 1109 vm.start_dirty_log()?; 1110 1111 // Send memory table 1112 let table = vm.memory_range_table()?; 1113 Request::memory(table.length()) 1114 .write_to(&mut socket) 1115 .unwrap(); 1116 table.write_to(&mut socket)?; 1117 // And then the memory itself 1118 vm.send_memory_regions(&table, &mut socket)?; 1119 let res = Response::read_from(&mut socket)?; 1120 if res.status() != Status::Ok { 1121 warn!("Error during memory migration"); 1122 Request::abandon().write_to(&mut socket)?; 1123 Response::read_from(&mut socket).ok(); 1124 return Err(MigratableError::MigrateSend(anyhow!( 1125 "Error during memory migration" 1126 ))); 1127 } 1128 1129 // Try at most 5 passes of dirty memory sending 1130 const MAX_DIRTY_MIGRATIONS: usize = 5; 1131 for i in 0..MAX_DIRTY_MIGRATIONS { 1132 info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS); 1133 if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? { 1134 break; 1135 } 1136 } 1137 1138 // Now pause VM 1139 vm.pause()?; 1140 1141 // Send last batch of dirty pages 1142 Self::vm_maybe_send_dirty_pages(vm, &mut socket)?; 1143 1144 // Capture snapshot and send it 1145 let vm_snapshot = vm.snapshot()?; 1146 let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap(); 1147 Request::state(snapshot_data.len() as u64).write_to(&mut socket)?; 1148 socket 1149 .write_all(&snapshot_data) 1150 .map_err(MigratableError::MigrateSocket)?; 1151 let res = Response::read_from(&mut socket)?; 1152 if res.status() != Status::Ok { 1153 warn!("Error during state migration"); 1154 Request::abandon().write_to(&mut socket)?; 1155 Response::read_from(&mut socket).ok(); 1156 return Err(MigratableError::MigrateSend(anyhow!( 1157 "Error during state migration" 1158 ))); 1159 } 1160 1161 // Complete the migration 1162 Request::complete().write_to(&mut socket)?; 1163 let res = Response::read_from(&mut socket)?; 1164 if res.status() != Status::Ok { 1165 warn!("Error completing migration"); 1166 Request::abandon().write_to(&mut socket)?; 1167 Response::read_from(&mut socket).ok(); 1168 return Err(MigratableError::MigrateSend(anyhow!( 1169 "Error completing migration" 1170 ))); 1171 } 1172 info!("Migration complete"); 1173 1174 // Let every Migratable object know about the migration being complete 1175 vm.complete_migration()?; 1176 1177 // Stop logging dirty pages 1178 vm.stop_dirty_log()?; 1179 1180 Ok(()) 1181 } 1182 1183 fn vm_send_migration( 1184 &mut self, 1185 send_data_migration: VmSendMigrationData, 1186 ) -> result::Result<(), MigratableError> { 1187 info!( 1188 "Sending migration: destination_url = {}", 1189 send_data_migration.destination_url 1190 ); 1191 if let Some(vm) = self.vm.as_mut() { 1192 Self::send_migration( 1193 vm, 1194 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1195 self.hypervisor.clone(), 1196 send_data_migration, 1197 ) 1198 .map_err(|migration_err| { 1199 error!("Migration failed: {:?}", migration_err); 1200 1201 // Stop logging dirty pages 1202 if let Err(e) = vm.stop_dirty_log() { 1203 return e; 1204 } 1205 1206 if vm.get_state().unwrap() == VmState::Paused { 1207 if let Err(e) = vm.resume() { 1208 return e; 1209 } 1210 } 1211 1212 migration_err 1213 })?; 1214 1215 // Shutdown the VM after the migration succeeded 1216 self.exit_evt.write(1).map_err(|e| { 1217 MigratableError::MigrateSend(anyhow!( 1218 "Failed shutting down the VM after migration: {:?}", 1219 e 1220 )) 1221 }) 1222 } else { 1223 Err(MigratableError::MigrateSend(anyhow!("VM is not running"))) 1224 } 1225 } 1226 1227 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1228 fn vm_check_cpuid_compatibility( 1229 &self, 1230 src_vm_config: &Arc<Mutex<VmConfig>>, 1231 src_vm_cpuid: &hypervisor::CpuId, 1232 ) -> result::Result<(), MigratableError> { 1233 // We check the `CPUID` compatibility of between the source vm and destination, which is 1234 // mostly about feature compatibility and "topology/sgx" leaves are not relevant. 1235 let dest_cpuid = &{ 1236 let vm_config = &src_vm_config.lock().unwrap(); 1237 1238 #[cfg(feature = "tdx")] 1239 let tdx_enabled = vm_config.tdx.is_some(); 1240 let phys_bits = vm::physical_bits( 1241 vm_config.cpus.max_phys_bits, 1242 #[cfg(feature = "tdx")] 1243 tdx_enabled, 1244 ); 1245 arch::generate_common_cpuid( 1246 self.hypervisor.clone(), 1247 None, 1248 None, 1249 phys_bits, 1250 vm_config.cpus.kvm_hyperv, 1251 #[cfg(feature = "tdx")] 1252 tdx_enabled, 1253 ) 1254 .map_err(|e| { 1255 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e)) 1256 })? 1257 }; 1258 arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| { 1259 MigratableError::MigrateReceive(anyhow!( 1260 "Error checking cpu feature compatibility': {:?}", 1261 e 1262 )) 1263 }) 1264 } 1265 1266 fn control_loop(&mut self, api_receiver: Arc<Receiver<ApiRequest>>) -> Result<()> { 1267 const EPOLL_EVENTS_LEN: usize = 100; 1268 1269 let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN]; 1270 let epoll_fd = self.epoll.as_raw_fd(); 1271 1272 'outer: loop { 1273 let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { 1274 Ok(res) => res, 1275 Err(e) => { 1276 if e.kind() == io::ErrorKind::Interrupted { 1277 // It's well defined from the epoll_wait() syscall 1278 // documentation that the epoll loop can be interrupted 1279 // before any of the requested events occurred or the 1280 // timeout expired. In both those cases, epoll_wait() 1281 // returns an error of type EINTR, but this should not 1282 // be considered as a regular error. Instead it is more 1283 // appropriate to retry, by calling into epoll_wait(). 1284 continue; 1285 } 1286 return Err(Error::Epoll(e)); 1287 } 1288 }; 1289 1290 for event in events.iter().take(num_events) { 1291 let dispatch_event: EpollDispatch = event.data.into(); 1292 match dispatch_event { 1293 EpollDispatch::Unknown => { 1294 let event = event.data; 1295 warn!("Unknown VMM loop event: {}", event); 1296 } 1297 EpollDispatch::Exit => { 1298 info!("VM exit event"); 1299 // Consume the event. 1300 self.exit_evt.read().map_err(Error::EventFdRead)?; 1301 self.vmm_shutdown().map_err(Error::VmmShutdown)?; 1302 1303 break 'outer; 1304 } 1305 EpollDispatch::Reset => { 1306 info!("VM reset event"); 1307 // Consume the event. 1308 self.reset_evt.read().map_err(Error::EventFdRead)?; 1309 self.vm_reboot().map_err(Error::VmReboot)?; 1310 } 1311 EpollDispatch::Stdin => { 1312 if let Some(ref vm) = self.vm { 1313 vm.handle_stdin().map_err(Error::Stdin)?; 1314 } 1315 } 1316 EpollDispatch::ActivateVirtioDevices => { 1317 if let Some(ref vm) = self.vm { 1318 let count = self.activate_evt.read().map_err(Error::EventFdRead)?; 1319 info!( 1320 "Trying to activate pending virtio devices: count = {}", 1321 count 1322 ); 1323 vm.activate_virtio_devices() 1324 .map_err(Error::ActivateVirtioDevices)?; 1325 } 1326 } 1327 event @ EpollDispatch::SerialPty => { 1328 if let Some(ref vm) = self.vm { 1329 vm.handle_pty(event).map_err(Error::Pty)?; 1330 } 1331 } 1332 EpollDispatch::Api => { 1333 // Consume the event. 1334 self.api_evt.read().map_err(Error::EventFdRead)?; 1335 1336 // Read from the API receiver channel 1337 let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?; 1338 1339 info!("API request event: {:?}", api_request); 1340 match api_request { 1341 ApiRequest::VmCreate(config, sender) => { 1342 let response = self 1343 .vm_create(config) 1344 .map_err(ApiError::VmCreate) 1345 .map(|_| ApiResponsePayload::Empty); 1346 1347 sender.send(response).map_err(Error::ApiResponseSend)?; 1348 } 1349 ApiRequest::VmDelete(sender) => { 1350 let response = self 1351 .vm_delete() 1352 .map_err(ApiError::VmDelete) 1353 .map(|_| ApiResponsePayload::Empty); 1354 1355 sender.send(response).map_err(Error::ApiResponseSend)?; 1356 } 1357 ApiRequest::VmBoot(sender) => { 1358 let response = self 1359 .vm_boot() 1360 .map_err(ApiError::VmBoot) 1361 .map(|_| ApiResponsePayload::Empty); 1362 1363 sender.send(response).map_err(Error::ApiResponseSend)?; 1364 } 1365 ApiRequest::VmShutdown(sender) => { 1366 let response = self 1367 .vm_shutdown() 1368 .map_err(ApiError::VmShutdown) 1369 .map(|_| ApiResponsePayload::Empty); 1370 1371 sender.send(response).map_err(Error::ApiResponseSend)?; 1372 } 1373 ApiRequest::VmReboot(sender) => { 1374 let response = self 1375 .vm_reboot() 1376 .map_err(ApiError::VmReboot) 1377 .map(|_| ApiResponsePayload::Empty); 1378 1379 sender.send(response).map_err(Error::ApiResponseSend)?; 1380 } 1381 ApiRequest::VmInfo(sender) => { 1382 let response = self 1383 .vm_info() 1384 .map_err(ApiError::VmInfo) 1385 .map(ApiResponsePayload::VmInfo); 1386 1387 sender.send(response).map_err(Error::ApiResponseSend)?; 1388 } 1389 ApiRequest::VmmPing(sender) => { 1390 let response = ApiResponsePayload::VmmPing(self.vmm_ping()); 1391 1392 sender.send(Ok(response)).map_err(Error::ApiResponseSend)?; 1393 } 1394 ApiRequest::VmPause(sender) => { 1395 let response = self 1396 .vm_pause() 1397 .map_err(ApiError::VmPause) 1398 .map(|_| ApiResponsePayload::Empty); 1399 1400 sender.send(response).map_err(Error::ApiResponseSend)?; 1401 } 1402 ApiRequest::VmResume(sender) => { 1403 let response = self 1404 .vm_resume() 1405 .map_err(ApiError::VmResume) 1406 .map(|_| ApiResponsePayload::Empty); 1407 1408 sender.send(response).map_err(Error::ApiResponseSend)?; 1409 } 1410 ApiRequest::VmSnapshot(snapshot_data, sender) => { 1411 let response = self 1412 .vm_snapshot(&snapshot_data.destination_url) 1413 .map_err(ApiError::VmSnapshot) 1414 .map(|_| ApiResponsePayload::Empty); 1415 1416 sender.send(response).map_err(Error::ApiResponseSend)?; 1417 } 1418 ApiRequest::VmRestore(restore_data, sender) => { 1419 let response = self 1420 .vm_restore(restore_data.as_ref().clone()) 1421 .map_err(ApiError::VmRestore) 1422 .map(|_| ApiResponsePayload::Empty); 1423 1424 sender.send(response).map_err(Error::ApiResponseSend)?; 1425 } 1426 ApiRequest::VmmShutdown(sender) => { 1427 let response = self 1428 .vmm_shutdown() 1429 .map_err(ApiError::VmmShutdown) 1430 .map(|_| ApiResponsePayload::Empty); 1431 1432 sender.send(response).map_err(Error::ApiResponseSend)?; 1433 1434 break 'outer; 1435 } 1436 ApiRequest::VmResize(resize_data, sender) => { 1437 let response = self 1438 .vm_resize( 1439 resize_data.desired_vcpus, 1440 resize_data.desired_ram, 1441 resize_data.desired_balloon, 1442 ) 1443 .map_err(ApiError::VmResize) 1444 .map(|_| ApiResponsePayload::Empty); 1445 sender.send(response).map_err(Error::ApiResponseSend)?; 1446 } 1447 ApiRequest::VmResizeZone(resize_zone_data, sender) => { 1448 let response = self 1449 .vm_resize_zone( 1450 resize_zone_data.id.clone(), 1451 resize_zone_data.desired_ram, 1452 ) 1453 .map_err(ApiError::VmResizeZone) 1454 .map(|_| ApiResponsePayload::Empty); 1455 sender.send(response).map_err(Error::ApiResponseSend)?; 1456 } 1457 ApiRequest::VmAddDevice(add_device_data, sender) => { 1458 let response = self 1459 .vm_add_device(add_device_data.as_ref().clone()) 1460 .map_err(ApiError::VmAddDevice) 1461 .map(ApiResponsePayload::VmAction); 1462 sender.send(response).map_err(Error::ApiResponseSend)?; 1463 } 1464 ApiRequest::VmAddUserDevice(add_device_data, sender) => { 1465 let response = self 1466 .vm_add_user_device(add_device_data.as_ref().clone()) 1467 .map_err(ApiError::VmAddUserDevice) 1468 .map(ApiResponsePayload::VmAction); 1469 sender.send(response).map_err(Error::ApiResponseSend)?; 1470 } 1471 ApiRequest::VmRemoveDevice(remove_device_data, sender) => { 1472 let response = self 1473 .vm_remove_device(remove_device_data.id.clone()) 1474 .map_err(ApiError::VmRemoveDevice) 1475 .map(|_| ApiResponsePayload::Empty); 1476 sender.send(response).map_err(Error::ApiResponseSend)?; 1477 } 1478 ApiRequest::VmAddDisk(add_disk_data, sender) => { 1479 let response = self 1480 .vm_add_disk(add_disk_data.as_ref().clone()) 1481 .map_err(ApiError::VmAddDisk) 1482 .map(ApiResponsePayload::VmAction); 1483 sender.send(response).map_err(Error::ApiResponseSend)?; 1484 } 1485 ApiRequest::VmAddFs(add_fs_data, sender) => { 1486 let response = self 1487 .vm_add_fs(add_fs_data.as_ref().clone()) 1488 .map_err(ApiError::VmAddFs) 1489 .map(ApiResponsePayload::VmAction); 1490 sender.send(response).map_err(Error::ApiResponseSend)?; 1491 } 1492 ApiRequest::VmAddPmem(add_pmem_data, sender) => { 1493 let response = self 1494 .vm_add_pmem(add_pmem_data.as_ref().clone()) 1495 .map_err(ApiError::VmAddPmem) 1496 .map(ApiResponsePayload::VmAction); 1497 sender.send(response).map_err(Error::ApiResponseSend)?; 1498 } 1499 ApiRequest::VmAddNet(add_net_data, sender) => { 1500 let response = self 1501 .vm_add_net(add_net_data.as_ref().clone()) 1502 .map_err(ApiError::VmAddNet) 1503 .map(ApiResponsePayload::VmAction); 1504 sender.send(response).map_err(Error::ApiResponseSend)?; 1505 } 1506 ApiRequest::VmAddVsock(add_vsock_data, sender) => { 1507 let response = self 1508 .vm_add_vsock(add_vsock_data.as_ref().clone()) 1509 .map_err(ApiError::VmAddVsock) 1510 .map(ApiResponsePayload::VmAction); 1511 sender.send(response).map_err(Error::ApiResponseSend)?; 1512 } 1513 ApiRequest::VmCounters(sender) => { 1514 let response = self 1515 .vm_counters() 1516 .map_err(ApiError::VmInfo) 1517 .map(ApiResponsePayload::VmAction); 1518 1519 sender.send(response).map_err(Error::ApiResponseSend)?; 1520 } 1521 ApiRequest::VmReceiveMigration(receive_migration_data, sender) => { 1522 let response = self 1523 .vm_receive_migration(receive_migration_data.as_ref().clone()) 1524 .map_err(ApiError::VmReceiveMigration) 1525 .map(|_| ApiResponsePayload::Empty); 1526 sender.send(response).map_err(Error::ApiResponseSend)?; 1527 } 1528 ApiRequest::VmSendMigration(send_migration_data, sender) => { 1529 let response = self 1530 .vm_send_migration(send_migration_data.as_ref().clone()) 1531 .map_err(ApiError::VmSendMigration) 1532 .map(|_| ApiResponsePayload::Empty); 1533 sender.send(response).map_err(Error::ApiResponseSend)?; 1534 } 1535 ApiRequest::VmPowerButton(sender) => { 1536 let response = self 1537 .vm_power_button() 1538 .map_err(ApiError::VmPowerButton) 1539 .map(|_| ApiResponsePayload::Empty); 1540 1541 sender.send(response).map_err(Error::ApiResponseSend)?; 1542 } 1543 } 1544 } 1545 } 1546 } 1547 } 1548 1549 Ok(()) 1550 } 1551 } 1552 1553 const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager"; 1554 const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager"; 1555 const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager"; 1556