1 // Copyright © 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 6 #[macro_use] 7 extern crate event_monitor; 8 #[macro_use] 9 extern crate lazy_static; 10 #[macro_use] 11 extern crate log; 12 #[macro_use] 13 extern crate serde_derive; 14 #[cfg(test)] 15 #[macro_use] 16 extern crate credibility; 17 18 use crate::api::{ 19 ApiError, ApiRequest, ApiResponse, ApiResponsePayload, VmInfo, VmReceiveMigrationData, 20 VmSendMigrationData, VmmPingResponse, 21 }; 22 use crate::config::{ 23 DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, RestoreConfig, VmConfig, VsockConfig, 24 }; 25 use crate::migration::{get_vm_snapshot, recv_vm_snapshot}; 26 use crate::seccomp_filters::{get_seccomp_filter, Thread}; 27 use crate::vm::{Error as VmError, Vm, VmState}; 28 use anyhow::anyhow; 29 use libc::EFD_NONBLOCK; 30 use seccomp::{SeccompAction, SeccompFilter}; 31 use serde::ser::{Serialize, SerializeStruct, Serializer}; 32 use std::fs::File; 33 use std::io; 34 use std::io::{Read, Write}; 35 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; 36 use std::os::unix::net::UnixListener; 37 use std::os::unix::net::UnixStream; 38 use std::path::PathBuf; 39 use std::sync::mpsc::{Receiver, RecvError, SendError, Sender}; 40 use std::sync::{Arc, Mutex}; 41 use std::{result, thread}; 42 use thiserror::Error; 43 use vm_memory::bitmap::AtomicBitmap; 44 use vm_migration::{protocol::*, Migratable}; 45 use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 46 use vmm_sys_util::eventfd::EventFd; 47 48 pub mod api; 49 pub mod config; 50 pub mod cpu; 51 pub mod device_manager; 52 pub mod device_tree; 53 pub mod interrupt; 54 pub mod memory_manager; 55 pub mod migration; 56 pub mod seccomp_filters; 57 pub mod vm; 58 59 #[cfg(feature = "acpi")] 60 mod acpi; 61 62 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; 63 type GuestRegionMmap = vm_memory::GuestRegionMmap<AtomicBitmap>; 64 65 /// Errors associated with VMM management 66 #[derive(Debug, Error)] 67 #[allow(clippy::large_enum_variant)] 68 pub enum Error { 69 /// API request receive error 70 #[error("Error receiving API request: {0}")] 71 ApiRequestRecv(#[source] RecvError), 72 73 /// API response send error 74 #[error("Error sending API request: {0}")] 75 ApiResponseSend(#[source] SendError<ApiResponse>), 76 77 /// Cannot bind to the UNIX domain socket path 78 #[error("Error binding to UNIX domain socket: {0}")] 79 Bind(#[source] io::Error), 80 81 /// Cannot clone EventFd. 82 #[error("Error cloning EventFd: {0}")] 83 EventFdClone(#[source] io::Error), 84 85 /// Cannot create EventFd. 86 #[error("Error creating EventFd: {0}")] 87 EventFdCreate(#[source] io::Error), 88 89 /// Cannot read from EventFd. 90 #[error("Error reading from EventFd: {0}")] 91 EventFdRead(#[source] io::Error), 92 93 /// Cannot create epoll context. 94 #[error("Error creating epoll context: {0}")] 95 Epoll(#[source] io::Error), 96 97 /// Cannot create HTTP thread 98 #[error("Error spawning HTTP thread: {0}")] 99 HttpThreadSpawn(#[source] io::Error), 100 101 /// Cannot handle the VM STDIN stream 102 #[error("Error handling VM stdin: {0:?}")] 103 Stdin(VmError), 104 105 /// Cannot handle the VM pty stream 106 #[error("Error handling VM pty: {0:?}")] 107 Pty(VmError), 108 109 /// Cannot reboot the VM 110 #[error("Error rebooting VM: {0:?}")] 111 VmReboot(VmError), 112 113 /// Cannot create VMM thread 114 #[error("Error spawning VMM thread {0:?}")] 115 VmmThreadSpawn(#[source] io::Error), 116 117 /// Cannot shut the VMM down 118 #[error("Error shutting down VMM: {0:?}")] 119 VmmShutdown(VmError), 120 121 /// Cannot create seccomp filter 122 #[error("Error creating seccomp filter: {0}")] 123 CreateSeccompFilter(seccomp::SeccompError), 124 125 /// Cannot apply seccomp filter 126 #[error("Error applying seccomp filter: {0}")] 127 ApplySeccompFilter(seccomp::Error), 128 129 /// Error activating virtio devices 130 #[error("Error activating virtio devices: {0:?}")] 131 ActivateVirtioDevices(VmError), 132 133 /// Error creating API server 134 #[error("Error creating API server {0:?}")] 135 CreateApiServer(micro_http::ServerError), 136 137 /// Error binding API server socket 138 #[error("Error creation API server's socket {0:?}")] 139 CreateApiServerSocket(#[source] io::Error), 140 } 141 pub type Result<T> = result::Result<T, Error>; 142 143 #[derive(Debug, Clone, Copy, PartialEq)] 144 pub enum EpollDispatch { 145 Exit, 146 Reset, 147 Stdin, 148 Api, 149 ActivateVirtioDevices, 150 Pty, 151 } 152 153 pub struct EpollContext { 154 epoll_file: File, 155 dispatch_table: Vec<Option<EpollDispatch>>, 156 } 157 158 impl EpollContext { 159 pub fn new() -> result::Result<EpollContext, io::Error> { 160 let epoll_fd = epoll::create(true)?; 161 // Use 'File' to enforce closing on 'epoll_fd' 162 let epoll_file = unsafe { File::from_raw_fd(epoll_fd) }; 163 164 // Initial capacity needs to be large enough to hold: 165 // * 1 exit event 166 // * 1 reset event 167 // * 1 stdin event 168 // * 1 API event 169 let mut dispatch_table = Vec::with_capacity(5); 170 dispatch_table.push(None); 171 172 Ok(EpollContext { 173 epoll_file, 174 dispatch_table, 175 }) 176 } 177 178 pub fn add_stdin(&mut self) -> result::Result<(), io::Error> { 179 let dispatch_index = self.dispatch_table.len() as u64; 180 epoll::ctl( 181 self.epoll_file.as_raw_fd(), 182 epoll::ControlOptions::EPOLL_CTL_ADD, 183 libc::STDIN_FILENO, 184 epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index), 185 )?; 186 187 self.dispatch_table.push(Some(EpollDispatch::Stdin)); 188 189 Ok(()) 190 } 191 192 fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error> 193 where 194 T: AsRawFd, 195 { 196 let dispatch_index = self.dispatch_table.len() as u64; 197 epoll::ctl( 198 self.epoll_file.as_raw_fd(), 199 epoll::ControlOptions::EPOLL_CTL_ADD, 200 fd.as_raw_fd(), 201 epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index), 202 )?; 203 self.dispatch_table.push(Some(token)); 204 205 Ok(()) 206 } 207 } 208 209 impl AsRawFd for EpollContext { 210 fn as_raw_fd(&self) -> RawFd { 211 self.epoll_file.as_raw_fd() 212 } 213 } 214 215 pub struct PciDeviceInfo { 216 pub id: String, 217 pub bdf: u32, 218 } 219 220 impl Serialize for PciDeviceInfo { 221 fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> 222 where 223 S: Serializer, 224 { 225 // Transform the PCI b/d/f into a standardized string. 226 let segment = (self.bdf >> 16) & 0xffff; 227 let bus = (self.bdf >> 8) & 0xff; 228 let device = (self.bdf >> 3) & 0x1f; 229 let function = self.bdf & 0x7; 230 let bdf_str = format!( 231 "{:04x}:{:02x}:{:02x}.{:01x}", 232 segment, bus, device, function 233 ); 234 235 // Serialize the structure. 236 let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?; 237 state.serialize_field("id", &self.id)?; 238 state.serialize_field("bdf", &bdf_str)?; 239 state.end() 240 } 241 } 242 243 #[allow(clippy::too_many_arguments)] 244 pub fn start_vmm_thread( 245 vmm_version: String, 246 http_path: &Option<String>, 247 http_fd: Option<RawFd>, 248 api_event: EventFd, 249 api_sender: Sender<ApiRequest>, 250 api_receiver: Receiver<ApiRequest>, 251 seccomp_action: &SeccompAction, 252 hypervisor: Arc<dyn hypervisor::Hypervisor>, 253 ) -> Result<thread::JoinHandle<Result<()>>> { 254 let http_api_event = api_event.try_clone().map_err(Error::EventFdClone)?; 255 256 // Retrieve seccomp filter 257 let vmm_seccomp_filter = 258 get_seccomp_filter(seccomp_action, Thread::Vmm).map_err(Error::CreateSeccompFilter)?; 259 260 let vmm_seccomp_action = seccomp_action.clone(); 261 let thread = thread::Builder::new() 262 .name("vmm".to_string()) 263 .spawn(move || { 264 // Apply seccomp filter for VMM thread. 265 SeccompFilter::apply(vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?; 266 267 let mut vmm = Vmm::new( 268 vmm_version.to_string(), 269 api_event, 270 vmm_seccomp_action, 271 hypervisor, 272 )?; 273 274 vmm.control_loop(Arc::new(api_receiver)) 275 }) 276 .map_err(Error::VmmThreadSpawn)?; 277 278 // The VMM thread is started, we can start serving HTTP requests 279 if let Some(http_path) = http_path { 280 api::start_http_path_thread(http_path, http_api_event, api_sender, seccomp_action)?; 281 } else if let Some(http_fd) = http_fd { 282 api::start_http_fd_thread(http_fd, http_api_event, api_sender, seccomp_action)?; 283 } 284 Ok(thread) 285 } 286 287 #[derive(Clone, Deserialize, Serialize)] 288 struct VmMigrationConfig { 289 vm_config: Arc<Mutex<VmConfig>>, 290 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 291 common_cpuid: hypervisor::CpuId, 292 } 293 294 pub struct Vmm { 295 epoll: EpollContext, 296 exit_evt: EventFd, 297 reset_evt: EventFd, 298 api_evt: EventFd, 299 version: String, 300 vm: Option<Vm>, 301 vm_config: Option<Arc<Mutex<VmConfig>>>, 302 seccomp_action: SeccompAction, 303 hypervisor: Arc<dyn hypervisor::Hypervisor>, 304 activate_evt: EventFd, 305 } 306 307 impl Vmm { 308 fn new( 309 vmm_version: String, 310 api_evt: EventFd, 311 seccomp_action: SeccompAction, 312 hypervisor: Arc<dyn hypervisor::Hypervisor>, 313 ) -> Result<Self> { 314 let mut epoll = EpollContext::new().map_err(Error::Epoll)?; 315 let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 316 let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 317 let activate_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; 318 319 if unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0 { 320 epoll.add_stdin().map_err(Error::Epoll)?; 321 } 322 323 epoll 324 .add_event(&exit_evt, EpollDispatch::Exit) 325 .map_err(Error::Epoll)?; 326 327 epoll 328 .add_event(&reset_evt, EpollDispatch::Reset) 329 .map_err(Error::Epoll)?; 330 331 epoll 332 .add_event(&activate_evt, EpollDispatch::ActivateVirtioDevices) 333 .map_err(Error::Epoll)?; 334 335 epoll 336 .add_event(&api_evt, EpollDispatch::Api) 337 .map_err(Error::Epoll)?; 338 339 Ok(Vmm { 340 epoll, 341 exit_evt, 342 reset_evt, 343 api_evt, 344 version: vmm_version, 345 vm: None, 346 vm_config: None, 347 seccomp_action, 348 hypervisor, 349 activate_evt, 350 }) 351 } 352 353 fn vm_create(&mut self, config: Arc<Mutex<VmConfig>>) -> result::Result<(), VmError> { 354 // We only store the passed VM config. 355 // The VM will be created when being asked to boot it. 356 if self.vm_config.is_none() { 357 self.vm_config = Some(config); 358 Ok(()) 359 } else { 360 Err(VmError::VmAlreadyCreated) 361 } 362 } 363 364 fn vm_boot(&mut self) -> result::Result<(), VmError> { 365 // If we don't have a config, we can not boot a VM. 366 if self.vm_config.is_none() { 367 return Err(VmError::VmMissingConfig); 368 }; 369 370 // Create a new VM if we don't have one yet. 371 if self.vm.is_none() { 372 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 373 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 374 let activate_evt = self 375 .activate_evt 376 .try_clone() 377 .map_err(VmError::EventFdClone)?; 378 379 if let Some(ref vm_config) = self.vm_config { 380 let vm = Vm::new( 381 Arc::clone(vm_config), 382 exit_evt, 383 reset_evt, 384 &self.seccomp_action, 385 self.hypervisor.clone(), 386 activate_evt, 387 None, 388 None, 389 )?; 390 if let Some(serial_pty) = vm.serial_pty() { 391 self.epoll 392 .add_event(&serial_pty.main, EpollDispatch::Pty) 393 .map_err(VmError::EventfdError)?; 394 }; 395 if let Some(console_pty) = vm.console_pty() { 396 self.epoll 397 .add_event(&console_pty.main, EpollDispatch::Pty) 398 .map_err(VmError::EventfdError)?; 399 }; 400 self.vm = Some(vm); 401 } 402 } 403 404 // Now we can boot the VM. 405 if let Some(ref mut vm) = self.vm { 406 vm.boot() 407 } else { 408 Err(VmError::VmNotCreated) 409 } 410 } 411 412 fn vm_pause(&mut self) -> result::Result<(), VmError> { 413 if let Some(ref mut vm) = self.vm { 414 vm.pause().map_err(VmError::Pause) 415 } else { 416 Err(VmError::VmNotRunning) 417 } 418 } 419 420 fn vm_resume(&mut self) -> result::Result<(), VmError> { 421 if let Some(ref mut vm) = self.vm { 422 vm.resume().map_err(VmError::Resume) 423 } else { 424 Err(VmError::VmNotRunning) 425 } 426 } 427 428 fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> { 429 if let Some(ref mut vm) = self.vm { 430 vm.snapshot() 431 .map_err(VmError::Snapshot) 432 .and_then(|snapshot| { 433 vm.send(&snapshot, destination_url) 434 .map_err(VmError::SnapshotSend) 435 }) 436 } else { 437 Err(VmError::VmNotRunning) 438 } 439 } 440 441 fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> { 442 if self.vm.is_some() || self.vm_config.is_some() { 443 return Err(VmError::VmAlreadyCreated); 444 } 445 446 let source_url = restore_cfg.source_url.as_path().to_str(); 447 if source_url.is_none() { 448 return Err(VmError::RestoreSourceUrlPathToStr); 449 } 450 // Safe to unwrap as we checked it was Some(&str). 451 let source_url = source_url.unwrap(); 452 453 let snapshot = recv_vm_snapshot(source_url).map_err(VmError::Restore)?; 454 let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?; 455 456 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 457 self.vm_check_cpuid_compatibility(&vm_snapshot.config, &vm_snapshot.common_cpuid) 458 .map_err(VmError::Restore)?; 459 460 self.vm_config = Some(Arc::clone(&vm_snapshot.config)); 461 462 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 463 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 464 let activate_evt = self 465 .activate_evt 466 .try_clone() 467 .map_err(VmError::EventFdClone)?; 468 469 let vm = Vm::new_from_snapshot( 470 &snapshot, 471 exit_evt, 472 reset_evt, 473 Some(source_url), 474 restore_cfg.prefault, 475 &self.seccomp_action, 476 self.hypervisor.clone(), 477 activate_evt, 478 )?; 479 self.vm = Some(vm); 480 481 // Now we can restore the rest of the VM. 482 if let Some(ref mut vm) = self.vm { 483 vm.restore(snapshot).map_err(VmError::Restore) 484 } else { 485 Err(VmError::VmNotCreated) 486 } 487 } 488 489 fn vm_shutdown(&mut self) -> result::Result<(), VmError> { 490 if let Some(ref mut vm) = self.vm.take() { 491 vm.shutdown() 492 } else { 493 Err(VmError::VmNotRunning) 494 } 495 } 496 497 fn vm_reboot(&mut self) -> result::Result<(), VmError> { 498 // Without ACPI, a reset is equivalent to a shutdown 499 // On AArch64, before ACPI is supported, we simply jump over this check and continue to reset. 500 #[cfg(all(target_arch = "x86_64", not(feature = "acpi")))] 501 { 502 if self.vm.is_some() { 503 self.exit_evt.write(1).unwrap(); 504 return Ok(()); 505 } 506 } 507 508 // First we stop the current VM and create a new one. 509 if let Some(ref mut vm) = self.vm { 510 let config = vm.get_config(); 511 let serial_pty = vm.serial_pty(); 512 let console_pty = vm.console_pty(); 513 self.vm_shutdown()?; 514 515 let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?; 516 let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?; 517 let activate_evt = self 518 .activate_evt 519 .try_clone() 520 .map_err(VmError::EventFdClone)?; 521 522 // The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be 523 // an event sitting in the shared reset_evt. Without doing this we get very early reboots 524 // during the boot process. 525 if self.reset_evt.read().is_ok() { 526 warn!("Spurious second reset event received. Ignoring."); 527 } 528 self.vm = Some(Vm::new( 529 config, 530 exit_evt, 531 reset_evt, 532 &self.seccomp_action, 533 self.hypervisor.clone(), 534 activate_evt, 535 serial_pty, 536 console_pty, 537 )?); 538 } 539 540 // Then we start the new VM. 541 if let Some(ref mut vm) = self.vm { 542 vm.boot() 543 } else { 544 Err(VmError::VmNotCreated) 545 } 546 } 547 548 fn vm_info(&self) -> result::Result<VmInfo, VmError> { 549 match &self.vm_config { 550 Some(config) => { 551 let state = match &self.vm { 552 Some(vm) => vm.get_state()?, 553 None => VmState::Created, 554 }; 555 556 let config = Arc::clone(config); 557 558 let mut memory_actual_size = config.lock().unwrap().memory.total_size(); 559 if let Some(vm) = &self.vm { 560 memory_actual_size -= vm.balloon_size(); 561 } 562 563 let device_tree = self.vm.as_ref().map(|vm| vm.device_tree()); 564 565 Ok(VmInfo { 566 config, 567 state, 568 memory_actual_size, 569 device_tree, 570 }) 571 } 572 None => Err(VmError::VmNotCreated), 573 } 574 } 575 576 fn vmm_ping(&self) -> VmmPingResponse { 577 VmmPingResponse { 578 version: self.version.clone(), 579 } 580 } 581 582 fn vm_delete(&mut self) -> result::Result<(), VmError> { 583 if self.vm_config.is_none() { 584 return Ok(()); 585 } 586 587 // If a VM is booted, we first try to shut it down. 588 if self.vm.is_some() { 589 self.vm_shutdown()?; 590 } 591 592 self.vm_config = None; 593 594 event!("vm", "deleted"); 595 596 Ok(()) 597 } 598 599 fn vmm_shutdown(&mut self) -> result::Result<(), VmError> { 600 self.vm_delete()?; 601 event!("vmm", "shutdown"); 602 Ok(()) 603 } 604 605 fn vm_resize( 606 &mut self, 607 desired_vcpus: Option<u8>, 608 desired_ram: Option<u64>, 609 desired_balloon: Option<u64>, 610 ) -> result::Result<(), VmError> { 611 if let Some(ref mut vm) = self.vm { 612 if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) { 613 error!("Error when resizing VM: {:?}", e); 614 Err(e) 615 } else { 616 Ok(()) 617 } 618 } else { 619 Err(VmError::VmNotRunning) 620 } 621 } 622 623 fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> { 624 if let Some(ref mut vm) = self.vm { 625 if let Err(e) = vm.resize_zone(id, desired_ram) { 626 error!("Error when resizing VM: {:?}", e); 627 Err(e) 628 } else { 629 Ok(()) 630 } 631 } else { 632 Err(VmError::VmNotRunning) 633 } 634 } 635 636 fn vm_add_device(&mut self, device_cfg: DeviceConfig) -> result::Result<Vec<u8>, VmError> { 637 if let Some(ref mut vm) = self.vm { 638 let info = vm.add_device(device_cfg).map_err(|e| { 639 error!("Error when adding new device to the VM: {:?}", e); 640 e 641 })?; 642 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 643 } else { 644 Err(VmError::VmNotRunning) 645 } 646 } 647 648 fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> { 649 if let Some(ref mut vm) = self.vm { 650 if let Err(e) = vm.remove_device(id) { 651 error!("Error when removing new device to the VM: {:?}", e); 652 Err(e) 653 } else { 654 Ok(()) 655 } 656 } else { 657 Err(VmError::VmNotRunning) 658 } 659 } 660 661 fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Vec<u8>, VmError> { 662 if let Some(ref mut vm) = self.vm { 663 let info = vm.add_disk(disk_cfg).map_err(|e| { 664 error!("Error when adding new disk to the VM: {:?}", e); 665 e 666 })?; 667 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 668 } else { 669 Err(VmError::VmNotRunning) 670 } 671 } 672 673 fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Vec<u8>, VmError> { 674 if let Some(ref mut vm) = self.vm { 675 let info = vm.add_fs(fs_cfg).map_err(|e| { 676 error!("Error when adding new fs to the VM: {:?}", e); 677 e 678 })?; 679 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 680 } else { 681 Err(VmError::VmNotRunning) 682 } 683 } 684 685 fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Vec<u8>, VmError> { 686 if let Some(ref mut vm) = self.vm { 687 let info = vm.add_pmem(pmem_cfg).map_err(|e| { 688 error!("Error when adding new pmem device to the VM: {:?}", e); 689 e 690 })?; 691 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 692 } else { 693 Err(VmError::VmNotRunning) 694 } 695 } 696 697 fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Vec<u8>, VmError> { 698 if let Some(ref mut vm) = self.vm { 699 let info = vm.add_net(net_cfg).map_err(|e| { 700 error!("Error when adding new network device to the VM: {:?}", e); 701 e 702 })?; 703 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 704 } else { 705 Err(VmError::VmNotRunning) 706 } 707 } 708 709 fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Vec<u8>, VmError> { 710 if let Some(ref mut vm) = self.vm { 711 let info = vm.add_vsock(vsock_cfg).map_err(|e| { 712 error!("Error when adding new vsock device to the VM: {:?}", e); 713 e 714 })?; 715 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 716 } else { 717 Err(VmError::VmNotRunning) 718 } 719 } 720 721 fn vm_counters(&mut self) -> result::Result<Vec<u8>, VmError> { 722 if let Some(ref mut vm) = self.vm { 723 let info = vm.counters().map_err(|e| { 724 error!("Error when getting counters from the VM: {:?}", e); 725 e 726 })?; 727 serde_json::to_vec(&info).map_err(VmError::SerializeJson) 728 } else { 729 Err(VmError::VmNotRunning) 730 } 731 } 732 733 fn vm_power_button(&mut self) -> result::Result<(), VmError> { 734 if let Some(ref mut vm) = self.vm { 735 vm.power_button() 736 } else { 737 Err(VmError::VmNotRunning) 738 } 739 } 740 741 fn vm_receive_config<T>( 742 &mut self, 743 req: &Request, 744 socket: &mut T, 745 ) -> std::result::Result<Vm, MigratableError> 746 where 747 T: Read + Write, 748 { 749 // Read in config data 750 let mut data = Vec::with_capacity(req.length() as usize); 751 unsafe { 752 data.set_len(req.length() as usize); 753 } 754 socket 755 .read_exact(&mut data) 756 .map_err(MigratableError::MigrateSocket)?; 757 758 let vm_migration_config: VmMigrationConfig = 759 serde_json::from_slice(&data).map_err(|e| { 760 MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e)) 761 })?; 762 763 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 764 self.vm_check_cpuid_compatibility( 765 &vm_migration_config.vm_config, 766 &vm_migration_config.common_cpuid, 767 )?; 768 769 let exit_evt = self.exit_evt.try_clone().map_err(|e| { 770 MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e)) 771 })?; 772 let reset_evt = self.reset_evt.try_clone().map_err(|e| { 773 MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e)) 774 })?; 775 let activate_evt = self.activate_evt.try_clone().map_err(|e| { 776 MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e)) 777 })?; 778 779 self.vm_config = Some(vm_migration_config.vm_config); 780 let vm = Vm::new_from_migration( 781 self.vm_config.clone().unwrap(), 782 exit_evt, 783 reset_evt, 784 &self.seccomp_action, 785 self.hypervisor.clone(), 786 activate_evt, 787 ) 788 .map_err(|e| { 789 MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e)) 790 })?; 791 792 Response::ok().write_to(socket)?; 793 794 Ok(vm) 795 } 796 797 fn vm_receive_state<T>( 798 &mut self, 799 req: &Request, 800 socket: &mut T, 801 mut vm: Vm, 802 ) -> std::result::Result<(), MigratableError> 803 where 804 T: Read + Write, 805 { 806 // Read in state data 807 let mut data = Vec::with_capacity(req.length() as usize); 808 unsafe { 809 data.set_len(req.length() as usize); 810 } 811 socket 812 .read_exact(&mut data) 813 .map_err(MigratableError::MigrateSocket)?; 814 let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| { 815 MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e)) 816 })?; 817 818 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 819 vm.load_clock_from_snapshot(&snapshot) 820 .map_err(|e| MigratableError::MigrateReceive(anyhow!("Error resume clock: {:?}", e)))?; 821 822 // Create VM 823 vm.restore(snapshot).map_err(|e| { 824 Response::error().write_to(socket).ok(); 825 e 826 })?; 827 self.vm = Some(vm); 828 829 Response::ok().write_to(socket)?; 830 831 Ok(()) 832 } 833 834 fn vm_receive_memory<T>( 835 &mut self, 836 req: &Request, 837 socket: &mut T, 838 vm: &mut Vm, 839 ) -> std::result::Result<(), MigratableError> 840 where 841 T: Read + Write, 842 { 843 // Read table 844 let table = MemoryRangeTable::read_from(socket, req.length())?; 845 846 // And then read the memory itself 847 vm.receive_memory_regions(&table, socket).map_err(|e| { 848 Response::error().write_to(socket).ok(); 849 e 850 })?; 851 Response::ok().write_to(socket)?; 852 Ok(()) 853 } 854 855 fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> { 856 url.strip_prefix("unix:") 857 .ok_or_else(|| { 858 MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url)) 859 }) 860 .map(|s| s.into()) 861 } 862 863 fn vm_receive_migration( 864 &mut self, 865 receive_data_migration: VmReceiveMigrationData, 866 ) -> result::Result<(), MigratableError> { 867 info!( 868 "Receiving migration: receiver_url = {}", 869 receive_data_migration.receiver_url 870 ); 871 872 let path = Self::socket_url_to_path(&receive_data_migration.receiver_url)?; 873 let listener = UnixListener::bind(&path).map_err(|e| { 874 MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e)) 875 })?; 876 let (mut socket, _addr) = listener.accept().map_err(|e| { 877 MigratableError::MigrateReceive(anyhow!("Error accepting on UNIX socket: {}", e)) 878 })?; 879 std::fs::remove_file(&path).map_err(|e| { 880 MigratableError::MigrateReceive(anyhow!("Error unlinking UNIX socket: {}", e)) 881 })?; 882 883 let mut started = false; 884 let mut vm: Option<Vm> = None; 885 886 loop { 887 let req = Request::read_from(&mut socket)?; 888 match req.command() { 889 Command::Invalid => info!("Invalid Command Received"), 890 Command::Start => { 891 info!("Start Command Received"); 892 started = true; 893 894 Response::ok().write_to(&mut socket)?; 895 } 896 Command::Config => { 897 info!("Config Command Received"); 898 899 if !started { 900 warn!("Migration not started yet"); 901 Response::error().write_to(&mut socket)?; 902 continue; 903 } 904 vm = Some(self.vm_receive_config(&req, &mut socket)?); 905 } 906 Command::State => { 907 info!("State Command Received"); 908 909 if !started { 910 warn!("Migration not started yet"); 911 Response::error().write_to(&mut socket)?; 912 continue; 913 } 914 if let Some(vm) = vm.take() { 915 self.vm_receive_state(&req, &mut socket, vm)?; 916 } else { 917 warn!("Configuration not sent yet"); 918 Response::error().write_to(&mut socket)?; 919 } 920 } 921 Command::Memory => { 922 info!("Memory Command Received"); 923 924 if !started { 925 warn!("Migration not started yet"); 926 Response::error().write_to(&mut socket)?; 927 continue; 928 } 929 if let Some(ref mut vm) = vm.as_mut() { 930 self.vm_receive_memory(&req, &mut socket, vm)?; 931 } else { 932 warn!("Configuration not sent yet"); 933 Response::error().write_to(&mut socket)?; 934 } 935 } 936 Command::Complete => { 937 info!("Complete Command Received"); 938 if let Some(ref mut vm) = self.vm.as_mut() { 939 vm.resume()?; 940 Response::ok().write_to(&mut socket)?; 941 } else { 942 warn!("VM not created yet"); 943 Response::error().write_to(&mut socket)?; 944 } 945 break; 946 } 947 Command::Abandon => { 948 info!("Abandon Command Received"); 949 self.vm = None; 950 self.vm_config = None; 951 Response::ok().write_to(&mut socket).ok(); 952 break; 953 } 954 } 955 } 956 957 Ok(()) 958 } 959 960 // Returns true if there were dirty pages to send 961 fn vm_maybe_send_dirty_pages<T>( 962 vm: &mut Vm, 963 socket: &mut T, 964 ) -> result::Result<bool, MigratableError> 965 where 966 T: Read + Write, 967 { 968 // Send (dirty) memory table 969 let table = vm.dirty_log()?; 970 971 // But if there are no regions go straight to pause 972 if table.regions().is_empty() { 973 return Ok(false); 974 } 975 976 Request::memory(table.length()).write_to(socket).unwrap(); 977 table.write_to(socket)?; 978 // And then the memory itself 979 vm.send_memory_regions(&table, socket)?; 980 let res = Response::read_from(socket)?; 981 if res.status() != Status::Ok { 982 warn!("Error during dirty memory migration"); 983 Request::abandon().write_to(socket)?; 984 Response::read_from(socket).ok(); 985 return Err(MigratableError::MigrateSend(anyhow!( 986 "Error during dirty memory migration" 987 ))); 988 } 989 990 Ok(true) 991 } 992 993 fn vm_send_migration( 994 &mut self, 995 send_data_migration: VmSendMigrationData, 996 ) -> result::Result<(), MigratableError> { 997 info!( 998 "Sending migration: destination_url = {}", 999 send_data_migration.destination_url 1000 ); 1001 if let Some(ref mut vm) = self.vm { 1002 match { 1003 let path = Self::socket_url_to_path(&send_data_migration.destination_url)?; 1004 let mut socket = UnixStream::connect(&path).map_err(|e| { 1005 MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e)) 1006 })?; 1007 1008 // Start the migration 1009 Request::start().write_to(&mut socket)?; 1010 let res = Response::read_from(&mut socket)?; 1011 if res.status() != Status::Ok { 1012 warn!("Error starting migration"); 1013 Request::abandon().write_to(&mut socket)?; 1014 Response::read_from(&mut socket).ok(); 1015 return Err(MigratableError::MigrateSend(anyhow!( 1016 "Error starting migration" 1017 ))); 1018 } 1019 1020 // Send config 1021 let vm_config = vm.get_config(); 1022 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1023 let common_cpuid = { 1024 #[cfg(feature = "tdx")] 1025 let tdx_enabled = vm_config.lock().unwrap().tdx.is_some(); 1026 let phys_bits = vm::physical_bits( 1027 vm_config.lock().unwrap().cpus.max_phys_bits, 1028 #[cfg(feature = "tdx")] 1029 tdx_enabled, 1030 ); 1031 arch::generate_common_cpuid( 1032 self.hypervisor.clone(), 1033 None, 1034 None, 1035 phys_bits, 1036 vm_config.lock().unwrap().cpus.kvm_hyperv, 1037 #[cfg(feature = "tdx")] 1038 tdx_enabled, 1039 ) 1040 .map_err(|e| { 1041 MigratableError::MigrateReceive(anyhow!( 1042 "Error generating common cpuid': {:?}", 1043 e 1044 )) 1045 })? 1046 }; 1047 1048 let vm_migration_config = VmMigrationConfig { 1049 vm_config, 1050 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1051 common_cpuid, 1052 }; 1053 1054 let config_data = serde_json::to_vec(&vm_migration_config).unwrap(); 1055 Request::config(config_data.len() as u64).write_to(&mut socket)?; 1056 socket 1057 .write_all(&config_data) 1058 .map_err(MigratableError::MigrateSocket)?; 1059 let res = Response::read_from(&mut socket)?; 1060 if res.status() != Status::Ok { 1061 warn!("Error during config migration"); 1062 Request::abandon().write_to(&mut socket)?; 1063 Response::read_from(&mut socket).ok(); 1064 return Err(MigratableError::MigrateSend(anyhow!( 1065 "Error during config migration" 1066 ))); 1067 } 1068 1069 // Start logging dirty pages 1070 vm.start_dirty_log()?; 1071 1072 // Send memory table 1073 let table = vm.memory_range_table()?; 1074 Request::memory(table.length()) 1075 .write_to(&mut socket) 1076 .unwrap(); 1077 table.write_to(&mut socket)?; 1078 // And then the memory itself 1079 vm.send_memory_regions(&table, &mut socket)?; 1080 let res = Response::read_from(&mut socket)?; 1081 if res.status() != Status::Ok { 1082 warn!("Error during memory migration"); 1083 Request::abandon().write_to(&mut socket)?; 1084 Response::read_from(&mut socket).ok(); 1085 return Err(MigratableError::MigrateSend(anyhow!( 1086 "Error during memory migration" 1087 ))); 1088 } 1089 1090 // Try at most 5 passes of dirty memory sending 1091 const MAX_DIRTY_MIGRATIONS: usize = 5; 1092 for i in 0..MAX_DIRTY_MIGRATIONS { 1093 info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS); 1094 if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? { 1095 break; 1096 } 1097 } 1098 1099 // Now pause VM 1100 vm.pause()?; 1101 1102 // Send last batch of dirty pages 1103 Self::vm_maybe_send_dirty_pages(vm, &mut socket)?; 1104 1105 // Capture snapshot and send it 1106 let vm_snapshot = vm.snapshot()?; 1107 let snapshot_data = serde_json::to_vec(&vm_snapshot).unwrap(); 1108 Request::state(snapshot_data.len() as u64).write_to(&mut socket)?; 1109 socket 1110 .write_all(&snapshot_data) 1111 .map_err(MigratableError::MigrateSocket)?; 1112 let res = Response::read_from(&mut socket)?; 1113 if res.status() != Status::Ok { 1114 warn!("Error during state migration"); 1115 Request::abandon().write_to(&mut socket)?; 1116 Response::read_from(&mut socket).ok(); 1117 return Err(MigratableError::MigrateSend(anyhow!( 1118 "Error during state migration" 1119 ))); 1120 } 1121 1122 // Complete the migration 1123 Request::complete().write_to(&mut socket)?; 1124 let res = Response::read_from(&mut socket)?; 1125 if res.status() != Status::Ok { 1126 warn!("Error completing migration"); 1127 Request::abandon().write_to(&mut socket)?; 1128 Response::read_from(&mut socket).ok(); 1129 return Err(MigratableError::MigrateSend(anyhow!( 1130 "Error completing migration" 1131 ))); 1132 } 1133 info!("Migration complete"); 1134 Ok(()) 1135 } { 1136 // Stop logging dirty pages and keep the source VM paused unpon successful migration 1137 Ok(()) => { 1138 // Stop logging dirty pages 1139 vm.stop_dirty_log()?; 1140 1141 Ok(()) 1142 } 1143 // Ensure the source VM continue to run upon unsuccessful migration 1144 Err(e) => { 1145 error!("Migration failed: {:?}", e); 1146 1147 // Stop logging dirty pages 1148 vm.stop_dirty_log()?; 1149 1150 if vm.get_state().unwrap() == VmState::Paused { 1151 vm.resume()?; 1152 } 1153 1154 e 1155 } 1156 } 1157 } else { 1158 Err(MigratableError::MigrateSend(anyhow!("VM is not running"))) 1159 } 1160 } 1161 1162 #[cfg(all(feature = "kvm", target_arch = "x86_64"))] 1163 fn vm_check_cpuid_compatibility( 1164 &self, 1165 src_vm_config: &Arc<Mutex<VmConfig>>, 1166 src_vm_cpuid: &hypervisor::CpuId, 1167 ) -> result::Result<(), MigratableError> { 1168 // We check the `CPUID` compatibility of between the source vm and destination, which is 1169 // mostly about feature compatibility and "topology/sgx" leaves are not relevant. 1170 let dest_cpuid = &{ 1171 let vm_config = &src_vm_config.lock().unwrap(); 1172 1173 #[cfg(feature = "tdx")] 1174 let tdx_enabled = vm_config.tdx.is_some(); 1175 let phys_bits = vm::physical_bits( 1176 vm_config.cpus.max_phys_bits, 1177 #[cfg(feature = "tdx")] 1178 tdx_enabled, 1179 ); 1180 arch::generate_common_cpuid( 1181 self.hypervisor.clone(), 1182 None, 1183 None, 1184 phys_bits, 1185 vm_config.cpus.kvm_hyperv, 1186 #[cfg(feature = "tdx")] 1187 tdx_enabled, 1188 ) 1189 .map_err(|e| { 1190 MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e)) 1191 })? 1192 }; 1193 arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| { 1194 MigratableError::MigrateReceive(anyhow!( 1195 "Error checking cpu feature compatibility': {:?}", 1196 e 1197 )) 1198 }) 1199 } 1200 1201 fn control_loop(&mut self, api_receiver: Arc<Receiver<ApiRequest>>) -> Result<()> { 1202 const EPOLL_EVENTS_LEN: usize = 100; 1203 1204 let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN]; 1205 let epoll_fd = self.epoll.as_raw_fd(); 1206 1207 'outer: loop { 1208 let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { 1209 Ok(res) => res, 1210 Err(e) => { 1211 if e.kind() == io::ErrorKind::Interrupted { 1212 // It's well defined from the epoll_wait() syscall 1213 // documentation that the epoll loop can be interrupted 1214 // before any of the requested events occurred or the 1215 // timeout expired. In both those cases, epoll_wait() 1216 // returns an error of type EINTR, but this should not 1217 // be considered as a regular error. Instead it is more 1218 // appropriate to retry, by calling into epoll_wait(). 1219 continue; 1220 } 1221 return Err(Error::Epoll(e)); 1222 } 1223 }; 1224 1225 for event in events.iter().take(num_events) { 1226 let dispatch_idx = event.data as usize; 1227 1228 if let Some(dispatch_type) = self.epoll.dispatch_table[dispatch_idx] { 1229 match dispatch_type { 1230 EpollDispatch::Exit => { 1231 info!("VM exit event"); 1232 // Consume the event. 1233 self.exit_evt.read().map_err(Error::EventFdRead)?; 1234 self.vmm_shutdown().map_err(Error::VmmShutdown)?; 1235 1236 break 'outer; 1237 } 1238 EpollDispatch::Reset => { 1239 info!("VM reset event"); 1240 // Consume the event. 1241 self.reset_evt.read().map_err(Error::EventFdRead)?; 1242 self.vm_reboot().map_err(Error::VmReboot)?; 1243 } 1244 EpollDispatch::Stdin => { 1245 if let Some(ref vm) = self.vm { 1246 vm.handle_stdin().map_err(Error::Stdin)?; 1247 } 1248 } 1249 EpollDispatch::ActivateVirtioDevices => { 1250 if let Some(ref vm) = self.vm { 1251 let count = self.activate_evt.read().map_err(Error::EventFdRead)?; 1252 info!( 1253 "Trying to activate pending virtio devices: count = {}", 1254 count 1255 ); 1256 vm.activate_virtio_devices() 1257 .map_err(Error::ActivateVirtioDevices)?; 1258 } 1259 } 1260 EpollDispatch::Pty => { 1261 if let Some(ref vm) = self.vm { 1262 vm.handle_pty().map_err(Error::Pty)?; 1263 } 1264 } 1265 EpollDispatch::Api => { 1266 // Consume the event. 1267 self.api_evt.read().map_err(Error::EventFdRead)?; 1268 1269 // Read from the API receiver channel 1270 let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?; 1271 1272 info!("API request event: {:?}", api_request); 1273 match api_request { 1274 ApiRequest::VmCreate(config, sender) => { 1275 let response = self 1276 .vm_create(config) 1277 .map_err(ApiError::VmCreate) 1278 .map(|_| ApiResponsePayload::Empty); 1279 1280 sender.send(response).map_err(Error::ApiResponseSend)?; 1281 } 1282 ApiRequest::VmDelete(sender) => { 1283 let response = self 1284 .vm_delete() 1285 .map_err(ApiError::VmDelete) 1286 .map(|_| ApiResponsePayload::Empty); 1287 1288 sender.send(response).map_err(Error::ApiResponseSend)?; 1289 } 1290 ApiRequest::VmBoot(sender) => { 1291 let response = self 1292 .vm_boot() 1293 .map_err(ApiError::VmBoot) 1294 .map(|_| ApiResponsePayload::Empty); 1295 1296 sender.send(response).map_err(Error::ApiResponseSend)?; 1297 } 1298 ApiRequest::VmShutdown(sender) => { 1299 let response = self 1300 .vm_shutdown() 1301 .map_err(ApiError::VmShutdown) 1302 .map(|_| ApiResponsePayload::Empty); 1303 1304 sender.send(response).map_err(Error::ApiResponseSend)?; 1305 } 1306 ApiRequest::VmReboot(sender) => { 1307 let response = self 1308 .vm_reboot() 1309 .map_err(ApiError::VmReboot) 1310 .map(|_| ApiResponsePayload::Empty); 1311 1312 sender.send(response).map_err(Error::ApiResponseSend)?; 1313 } 1314 ApiRequest::VmInfo(sender) => { 1315 let response = self 1316 .vm_info() 1317 .map_err(ApiError::VmInfo) 1318 .map(ApiResponsePayload::VmInfo); 1319 1320 sender.send(response).map_err(Error::ApiResponseSend)?; 1321 } 1322 ApiRequest::VmmPing(sender) => { 1323 let response = ApiResponsePayload::VmmPing(self.vmm_ping()); 1324 1325 sender.send(Ok(response)).map_err(Error::ApiResponseSend)?; 1326 } 1327 ApiRequest::VmPause(sender) => { 1328 let response = self 1329 .vm_pause() 1330 .map_err(ApiError::VmPause) 1331 .map(|_| ApiResponsePayload::Empty); 1332 1333 sender.send(response).map_err(Error::ApiResponseSend)?; 1334 } 1335 ApiRequest::VmResume(sender) => { 1336 let response = self 1337 .vm_resume() 1338 .map_err(ApiError::VmResume) 1339 .map(|_| ApiResponsePayload::Empty); 1340 1341 sender.send(response).map_err(Error::ApiResponseSend)?; 1342 } 1343 ApiRequest::VmSnapshot(snapshot_data, sender) => { 1344 let response = self 1345 .vm_snapshot(&snapshot_data.destination_url) 1346 .map_err(ApiError::VmSnapshot) 1347 .map(|_| ApiResponsePayload::Empty); 1348 1349 sender.send(response).map_err(Error::ApiResponseSend)?; 1350 } 1351 ApiRequest::VmRestore(restore_data, sender) => { 1352 let response = self 1353 .vm_restore(restore_data.as_ref().clone()) 1354 .map_err(ApiError::VmRestore) 1355 .map(|_| ApiResponsePayload::Empty); 1356 1357 sender.send(response).map_err(Error::ApiResponseSend)?; 1358 } 1359 ApiRequest::VmmShutdown(sender) => { 1360 let response = self 1361 .vmm_shutdown() 1362 .map_err(ApiError::VmmShutdown) 1363 .map(|_| ApiResponsePayload::Empty); 1364 1365 sender.send(response).map_err(Error::ApiResponseSend)?; 1366 1367 break 'outer; 1368 } 1369 ApiRequest::VmResize(resize_data, sender) => { 1370 let response = self 1371 .vm_resize( 1372 resize_data.desired_vcpus, 1373 resize_data.desired_ram, 1374 resize_data.desired_balloon, 1375 ) 1376 .map_err(ApiError::VmResize) 1377 .map(|_| ApiResponsePayload::Empty); 1378 sender.send(response).map_err(Error::ApiResponseSend)?; 1379 } 1380 ApiRequest::VmResizeZone(resize_zone_data, sender) => { 1381 let response = self 1382 .vm_resize_zone( 1383 resize_zone_data.id.clone(), 1384 resize_zone_data.desired_ram, 1385 ) 1386 .map_err(ApiError::VmResizeZone) 1387 .map(|_| ApiResponsePayload::Empty); 1388 sender.send(response).map_err(Error::ApiResponseSend)?; 1389 } 1390 ApiRequest::VmAddDevice(add_device_data, sender) => { 1391 let response = self 1392 .vm_add_device(add_device_data.as_ref().clone()) 1393 .map_err(ApiError::VmAddDevice) 1394 .map(ApiResponsePayload::VmAction); 1395 sender.send(response).map_err(Error::ApiResponseSend)?; 1396 } 1397 ApiRequest::VmRemoveDevice(remove_device_data, sender) => { 1398 let response = self 1399 .vm_remove_device(remove_device_data.id.clone()) 1400 .map_err(ApiError::VmRemoveDevice) 1401 .map(|_| ApiResponsePayload::Empty); 1402 sender.send(response).map_err(Error::ApiResponseSend)?; 1403 } 1404 ApiRequest::VmAddDisk(add_disk_data, sender) => { 1405 let response = self 1406 .vm_add_disk(add_disk_data.as_ref().clone()) 1407 .map_err(ApiError::VmAddDisk) 1408 .map(ApiResponsePayload::VmAction); 1409 sender.send(response).map_err(Error::ApiResponseSend)?; 1410 } 1411 ApiRequest::VmAddFs(add_fs_data, sender) => { 1412 let response = self 1413 .vm_add_fs(add_fs_data.as_ref().clone()) 1414 .map_err(ApiError::VmAddFs) 1415 .map(ApiResponsePayload::VmAction); 1416 sender.send(response).map_err(Error::ApiResponseSend)?; 1417 } 1418 ApiRequest::VmAddPmem(add_pmem_data, sender) => { 1419 let response = self 1420 .vm_add_pmem(add_pmem_data.as_ref().clone()) 1421 .map_err(ApiError::VmAddPmem) 1422 .map(ApiResponsePayload::VmAction); 1423 sender.send(response).map_err(Error::ApiResponseSend)?; 1424 } 1425 ApiRequest::VmAddNet(add_net_data, sender) => { 1426 let response = self 1427 .vm_add_net(add_net_data.as_ref().clone()) 1428 .map_err(ApiError::VmAddNet) 1429 .map(ApiResponsePayload::VmAction); 1430 sender.send(response).map_err(Error::ApiResponseSend)?; 1431 } 1432 ApiRequest::VmAddVsock(add_vsock_data, sender) => { 1433 let response = self 1434 .vm_add_vsock(add_vsock_data.as_ref().clone()) 1435 .map_err(ApiError::VmAddVsock) 1436 .map(ApiResponsePayload::VmAction); 1437 sender.send(response).map_err(Error::ApiResponseSend)?; 1438 } 1439 ApiRequest::VmCounters(sender) => { 1440 let response = self 1441 .vm_counters() 1442 .map_err(ApiError::VmInfo) 1443 .map(ApiResponsePayload::VmAction); 1444 1445 sender.send(response).map_err(Error::ApiResponseSend)?; 1446 } 1447 ApiRequest::VmReceiveMigration(receive_migration_data, sender) => { 1448 let response = self 1449 .vm_receive_migration( 1450 receive_migration_data.as_ref().clone(), 1451 ) 1452 .map_err(ApiError::VmReceiveMigration) 1453 .map(|_| ApiResponsePayload::Empty); 1454 sender.send(response).map_err(Error::ApiResponseSend)?; 1455 } 1456 ApiRequest::VmSendMigration(send_migration_data, sender) => { 1457 let response = self 1458 .vm_send_migration(send_migration_data.as_ref().clone()) 1459 .map_err(ApiError::VmSendMigration) 1460 .map(|_| ApiResponsePayload::Empty); 1461 sender.send(response).map_err(Error::ApiResponseSend)?; 1462 } 1463 ApiRequest::VmPowerButton(sender) => { 1464 let response = self 1465 .vm_power_button() 1466 .map_err(ApiError::VmPowerButton) 1467 .map(|_| ApiResponsePayload::Empty); 1468 1469 sender.send(response).map_err(Error::ApiResponseSend)?; 1470 } 1471 } 1472 } 1473 } 1474 } 1475 } 1476 } 1477 1478 Ok(()) 1479 } 1480 } 1481 1482 const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager"; 1483 const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager"; 1484 const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager"; 1485