1 // Copyright 2019 Intel Corporation. All Rights Reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 4 use super::vu_common_ctrl::VhostUserHandle; 5 use super::{Error, Result, DEFAULT_VIRTIO_FEATURES}; 6 use crate::seccomp_filters::{get_seccomp_filter, Thread}; 7 use crate::vhost_user::{Inflight, VhostUserEpollHandler}; 8 use crate::{ 9 ActivateError, ActivateResult, Queue, UserspaceMapping, VirtioCommon, VirtioDevice, 10 VirtioDeviceType, VirtioInterrupt, VirtioSharedMemoryList, 11 }; 12 use crate::{GuestMemoryMmap, GuestRegionMmap, MmapRegion}; 13 use anyhow::anyhow; 14 use libc::{self, c_void, off64_t, pread64, pwrite64}; 15 use seccomp::{SeccompAction, SeccompFilter}; 16 use std::io; 17 use std::ops::Deref; 18 use std::os::unix::io::AsRawFd; 19 use std::result; 20 use std::sync::{Arc, Barrier, Mutex}; 21 use std::thread; 22 use versionize::{VersionMap, Versionize, VersionizeResult}; 23 use versionize_derive::Versionize; 24 use vhost::vhost_user::message::{ 25 VhostUserFSSlaveMsg, VhostUserFSSlaveMsgFlags, VhostUserProtocolFeatures, 26 VhostUserVirtioFeatures, VHOST_USER_FS_SLAVE_ENTRIES, 27 }; 28 use vhost::vhost_user::{ 29 HandlerResult, MasterReqHandler, VhostUserMaster, VhostUserMasterReqHandler, 30 }; 31 use vm_memory::{ 32 Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, 33 }; 34 use vm_migration::{ 35 protocol::MemoryRangeTable, Migratable, MigratableError, Pausable, Snapshot, Snapshottable, 36 Transportable, VersionMapped, 37 }; 38 use vmm_sys_util::eventfd::EventFd; 39 40 const NUM_QUEUE_OFFSET: usize = 1; 41 const DEFAULT_QUEUE_NUMBER: usize = 2; 42 43 #[derive(Versionize)] 44 pub struct State { 45 pub avail_features: u64, 46 pub acked_features: u64, 47 pub config: VirtioFsConfig, 48 } 49 50 impl VersionMapped for State {} 51 52 struct SlaveReqHandler { 53 cache_offset: GuestAddress, 54 cache_size: u64, 55 mmap_cache_addr: u64, 56 mem: GuestMemoryAtomic<GuestMemoryMmap>, 57 } 58 59 impl SlaveReqHandler { 60 // Make sure request is within cache range 61 fn is_req_valid(&self, offset: u64, len: u64) -> bool { 62 let end = match offset.checked_add(len) { 63 Some(n) => n, 64 None => return false, 65 }; 66 67 !(offset >= self.cache_size || end > self.cache_size) 68 } 69 } 70 71 impl VhostUserMasterReqHandler for SlaveReqHandler { 72 fn handle_config_change(&self) -> HandlerResult<u64> { 73 debug!("handle_config_change"); 74 Ok(0) 75 } 76 77 fn fs_slave_map(&self, fs: &VhostUserFSSlaveMsg, fd: &dyn AsRawFd) -> HandlerResult<u64> { 78 debug!("fs_slave_map"); 79 80 for i in 0..VHOST_USER_FS_SLAVE_ENTRIES { 81 let offset = fs.cache_offset[i]; 82 let len = fs.len[i]; 83 84 // Ignore if the length is 0. 85 if len == 0 { 86 continue; 87 } 88 89 if !self.is_req_valid(offset, len) { 90 return Err(io::Error::from_raw_os_error(libc::EINVAL)); 91 } 92 93 let addr = self.mmap_cache_addr + offset; 94 let flags = fs.flags[i]; 95 let ret = unsafe { 96 libc::mmap( 97 addr as *mut libc::c_void, 98 len as usize, 99 flags.bits() as i32, 100 libc::MAP_SHARED | libc::MAP_FIXED, 101 fd.as_raw_fd(), 102 fs.fd_offset[i] as libc::off_t, 103 ) 104 }; 105 if ret == libc::MAP_FAILED { 106 return Err(io::Error::last_os_error()); 107 } 108 109 let ret = unsafe { libc::close(fd.as_raw_fd()) }; 110 if ret == -1 { 111 return Err(io::Error::last_os_error()); 112 } 113 } 114 115 Ok(0) 116 } 117 118 fn fs_slave_unmap(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> { 119 debug!("fs_slave_unmap"); 120 121 for i in 0..VHOST_USER_FS_SLAVE_ENTRIES { 122 let offset = fs.cache_offset[i]; 123 let mut len = fs.len[i]; 124 125 // Ignore if the length is 0. 126 if len == 0 { 127 continue; 128 } 129 130 // Need to handle a special case where the slave ask for the unmapping 131 // of the entire mapping. 132 if len == 0xffff_ffff_ffff_ffff { 133 len = self.cache_size; 134 } 135 136 if !self.is_req_valid(offset, len) { 137 return Err(io::Error::from_raw_os_error(libc::EINVAL)); 138 } 139 140 let addr = self.mmap_cache_addr + offset; 141 let ret = unsafe { 142 libc::mmap( 143 addr as *mut libc::c_void, 144 len as usize, 145 libc::PROT_NONE, 146 libc::MAP_ANONYMOUS | libc::MAP_PRIVATE | libc::MAP_FIXED, 147 -1, 148 0, 149 ) 150 }; 151 if ret == libc::MAP_FAILED { 152 return Err(io::Error::last_os_error()); 153 } 154 } 155 156 Ok(0) 157 } 158 159 fn fs_slave_sync(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> { 160 debug!("fs_slave_sync"); 161 162 for i in 0..VHOST_USER_FS_SLAVE_ENTRIES { 163 let offset = fs.cache_offset[i]; 164 let len = fs.len[i]; 165 166 // Ignore if the length is 0. 167 if len == 0 { 168 continue; 169 } 170 171 if !self.is_req_valid(offset, len) { 172 return Err(io::Error::from_raw_os_error(libc::EINVAL)); 173 } 174 175 let addr = self.mmap_cache_addr + offset; 176 let ret = 177 unsafe { libc::msync(addr as *mut libc::c_void, len as usize, libc::MS_SYNC) }; 178 if ret == -1 { 179 return Err(io::Error::last_os_error()); 180 } 181 } 182 183 Ok(0) 184 } 185 186 fn fs_slave_io(&self, fs: &VhostUserFSSlaveMsg, fd: &dyn AsRawFd) -> HandlerResult<u64> { 187 debug!("fs_slave_io"); 188 189 let mut done: u64 = 0; 190 for i in 0..VHOST_USER_FS_SLAVE_ENTRIES { 191 // Ignore if the length is 0. 192 if fs.len[i] == 0 { 193 continue; 194 } 195 196 let mut foffset = fs.fd_offset[i]; 197 let mut len = fs.len[i] as usize; 198 let gpa = fs.cache_offset[i]; 199 let cache_end = self.cache_offset.raw_value() + self.cache_size; 200 let efault = libc::EFAULT; 201 202 let mut ptr = if gpa >= self.cache_offset.raw_value() && gpa < cache_end { 203 let offset = gpa 204 .checked_sub(self.cache_offset.raw_value()) 205 .ok_or_else(|| io::Error::from_raw_os_error(efault))?; 206 let end = gpa 207 .checked_add(fs.len[i]) 208 .ok_or_else(|| io::Error::from_raw_os_error(efault))?; 209 210 if end >= cache_end { 211 return Err(io::Error::from_raw_os_error(efault)); 212 } 213 214 self.mmap_cache_addr + offset 215 } else { 216 self.mem 217 .memory() 218 .get_host_address(GuestAddress(gpa)) 219 .map_err(|e| { 220 error!( 221 "Failed to find RAM region associated with guest physical address 0x{:x}: {:?}", 222 gpa, e 223 ); 224 io::Error::from_raw_os_error(efault) 225 })? as u64 226 }; 227 228 while len > 0 { 229 let ret = if (fs.flags[i] & VhostUserFSSlaveMsgFlags::MAP_W) 230 == VhostUserFSSlaveMsgFlags::MAP_W 231 { 232 debug!("write: foffset={}, len={}", foffset, len); 233 unsafe { 234 pwrite64( 235 fd.as_raw_fd(), 236 ptr as *const c_void, 237 len as usize, 238 foffset as off64_t, 239 ) 240 } 241 } else { 242 debug!("read: foffset={}, len={}", foffset, len); 243 unsafe { 244 pread64( 245 fd.as_raw_fd(), 246 ptr as *mut c_void, 247 len as usize, 248 foffset as off64_t, 249 ) 250 } 251 }; 252 253 if ret < 0 { 254 return Err(io::Error::last_os_error()); 255 } 256 257 if ret == 0 { 258 // EOF 259 return Err(io::Error::new( 260 io::ErrorKind::UnexpectedEof, 261 "failed to access whole buffer", 262 )); 263 } 264 len -= ret as usize; 265 foffset += ret as u64; 266 ptr += ret as u64; 267 done += ret as u64; 268 } 269 } 270 271 let ret = unsafe { libc::close(fd.as_raw_fd()) }; 272 if ret == -1 { 273 return Err(io::Error::last_os_error()); 274 } 275 276 Ok(done) 277 } 278 } 279 280 #[derive(Copy, Clone, Versionize)] 281 #[repr(C, packed)] 282 pub struct VirtioFsConfig { 283 pub tag: [u8; 36], 284 pub num_request_queues: u32, 285 } 286 287 impl Default for VirtioFsConfig { 288 fn default() -> Self { 289 VirtioFsConfig { 290 tag: [0; 36], 291 num_request_queues: 0, 292 } 293 } 294 } 295 296 unsafe impl ByteValued for VirtioFsConfig {} 297 298 pub struct Fs { 299 common: VirtioCommon, 300 id: String, 301 vu: Arc<Mutex<VhostUserHandle>>, 302 config: VirtioFsConfig, 303 // Hold ownership of the memory that is allocated for the device 304 // which will be automatically dropped when the device is dropped 305 cache: Option<(VirtioSharedMemoryList, MmapRegion)>, 306 slave_req_support: bool, 307 seccomp_action: SeccompAction, 308 guest_memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>, 309 acked_protocol_features: u64, 310 socket_path: String, 311 epoll_thread: Option<thread::JoinHandle<()>>, 312 vu_num_queues: usize, 313 } 314 315 impl Fs { 316 /// Create a new virtio-fs device. 317 pub fn new( 318 id: String, 319 path: &str, 320 tag: &str, 321 req_num_queues: usize, 322 queue_size: u16, 323 cache: Option<(VirtioSharedMemoryList, MmapRegion)>, 324 seccomp_action: SeccompAction, 325 ) -> Result<Fs> { 326 let mut slave_req_support = false; 327 328 // Calculate the actual number of queues needed. 329 let num_queues = NUM_QUEUE_OFFSET + req_num_queues; 330 331 // Connect to the vhost-user socket. 332 let mut vu = VhostUserHandle::connect_vhost_user(false, path, num_queues as u64, false)?; 333 334 // Filling device and vring features VMM supports. 335 let avail_features = DEFAULT_VIRTIO_FEATURES; 336 337 let mut avail_protocol_features = VhostUserProtocolFeatures::MQ 338 | VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS 339 | VhostUserProtocolFeatures::REPLY_ACK 340 | VhostUserProtocolFeatures::INFLIGHT_SHMFD; 341 let slave_protocol_features = 342 VhostUserProtocolFeatures::SLAVE_REQ | VhostUserProtocolFeatures::SLAVE_SEND_FD; 343 if cache.is_some() { 344 avail_protocol_features |= slave_protocol_features; 345 } 346 347 let (acked_features, acked_protocol_features) = 348 vu.negotiate_features_vhost_user(avail_features, avail_protocol_features)?; 349 350 let backend_num_queues = 351 if acked_protocol_features & VhostUserProtocolFeatures::MQ.bits() != 0 { 352 vu.socket_handle() 353 .get_queue_num() 354 .map_err(Error::VhostUserGetQueueMaxNum)? as usize 355 } else { 356 DEFAULT_QUEUE_NUMBER 357 }; 358 359 if num_queues > backend_num_queues { 360 error!( 361 "vhost-user-fs requested too many queues ({}) since the backend only supports {}\n", 362 num_queues, backend_num_queues 363 ); 364 return Err(Error::BadQueueNum); 365 } 366 367 if acked_protocol_features & slave_protocol_features.bits() 368 == slave_protocol_features.bits() 369 { 370 slave_req_support = true; 371 } 372 373 // Create virtio-fs device configuration. 374 let mut config = VirtioFsConfig::default(); 375 let tag_bytes_vec = tag.to_string().into_bytes(); 376 config.tag[..tag_bytes_vec.len()].copy_from_slice(tag_bytes_vec.as_slice()); 377 config.num_request_queues = req_num_queues as u32; 378 379 Ok(Fs { 380 common: VirtioCommon { 381 device_type: VirtioDeviceType::Fs as u32, 382 avail_features: acked_features, 383 acked_features: 0, 384 queue_sizes: vec![queue_size; num_queues], 385 paused_sync: Some(Arc::new(Barrier::new(2))), 386 min_queues: DEFAULT_QUEUE_NUMBER as u16, 387 ..Default::default() 388 }, 389 id, 390 vu: Arc::new(Mutex::new(vu)), 391 config, 392 cache, 393 slave_req_support, 394 seccomp_action, 395 guest_memory: None, 396 acked_protocol_features, 397 socket_path: path.to_string(), 398 epoll_thread: None, 399 vu_num_queues: num_queues, 400 }) 401 } 402 403 fn state(&self) -> State { 404 State { 405 avail_features: self.common.avail_features, 406 acked_features: self.common.acked_features, 407 config: self.config, 408 } 409 } 410 411 fn set_state(&mut self, state: &State) { 412 self.common.avail_features = state.avail_features; 413 self.common.acked_features = state.acked_features; 414 self.config = state.config; 415 } 416 } 417 418 impl Drop for Fs { 419 fn drop(&mut self) { 420 if let Some(kill_evt) = self.common.kill_evt.take() { 421 // Ignore the result because there is nothing we can do about it. 422 let _ = kill_evt.write(1); 423 } 424 } 425 } 426 427 impl VirtioDevice for Fs { 428 fn device_type(&self) -> u32 { 429 self.common.device_type 430 } 431 432 fn queue_max_sizes(&self) -> &[u16] { 433 &self.common.queue_sizes 434 } 435 436 fn features(&self) -> u64 { 437 self.common.avail_features 438 } 439 440 fn ack_features(&mut self, value: u64) { 441 self.common.ack_features(value) 442 } 443 444 fn read_config(&self, offset: u64, data: &mut [u8]) { 445 self.read_config_from_slice(self.config.as_slice(), offset, data); 446 } 447 448 fn activate( 449 &mut self, 450 mem: GuestMemoryAtomic<GuestMemoryMmap>, 451 interrupt_cb: Arc<dyn VirtioInterrupt>, 452 queues: Vec<Queue>, 453 queue_evts: Vec<EventFd>, 454 ) -> ActivateResult { 455 self.common.activate(&queues, &queue_evts, &interrupt_cb)?; 456 self.guest_memory = Some(mem.clone()); 457 458 // Initialize slave communication. 459 let slave_req_handler = if self.slave_req_support { 460 if let Some(cache) = self.cache.as_ref() { 461 let vu_master_req_handler = Arc::new(SlaveReqHandler { 462 cache_offset: cache.0.addr, 463 cache_size: cache.0.len, 464 mmap_cache_addr: cache.0.host_addr, 465 mem: mem.clone(), 466 }); 467 468 let mut req_handler = 469 MasterReqHandler::new(vu_master_req_handler).map_err(|e| { 470 ActivateError::VhostUserFsSetup(Error::MasterReqHandlerCreation(e)) 471 })?; 472 req_handler.set_reply_ack_flag(true); 473 Some(req_handler) 474 } else { 475 None 476 } 477 } else { 478 None 479 }; 480 481 // The backend acknowledged features must contain the protocol feature 482 // bit in case it was initially set but lost through the features 483 // negotiation with the guest. 484 let backend_acked_features = self.common.acked_features 485 | (self.common.avail_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits()); 486 487 let mut inflight: Option<Inflight> = 488 if self.acked_protocol_features & VhostUserProtocolFeatures::INFLIGHT_SHMFD.bits() != 0 489 { 490 Some(Inflight::default()) 491 } else { 492 None 493 }; 494 495 self.vu 496 .lock() 497 .unwrap() 498 .setup_vhost_user( 499 &mem.memory(), 500 queues.clone(), 501 queue_evts.iter().map(|q| q.try_clone().unwrap()).collect(), 502 &interrupt_cb, 503 backend_acked_features, 504 &slave_req_handler, 505 inflight.as_mut(), 506 ) 507 .map_err(ActivateError::VhostUserFsSetup)?; 508 509 // Run a dedicated thread for handling potential reconnections with 510 // the backend as well as requests initiated by the backend. 511 let (kill_evt, pause_evt) = self.common.dup_eventfds(); 512 let mut handler: VhostUserEpollHandler<SlaveReqHandler> = VhostUserEpollHandler { 513 vu: self.vu.clone(), 514 mem, 515 kill_evt, 516 pause_evt, 517 queues, 518 queue_evts, 519 virtio_interrupt: interrupt_cb, 520 acked_features: backend_acked_features, 521 acked_protocol_features: self.acked_protocol_features, 522 socket_path: self.socket_path.clone(), 523 server: false, 524 slave_req_handler, 525 inflight, 526 }; 527 528 let paused = self.common.paused.clone(); 529 let paused_sync = self.common.paused_sync.clone(); 530 531 let virtio_vhost_fs_seccomp_filter = 532 get_seccomp_filter(&self.seccomp_action, Thread::VirtioVhostFs) 533 .map_err(ActivateError::CreateSeccompFilter)?; 534 535 thread::Builder::new() 536 .name(self.id.to_string()) 537 .spawn(move || { 538 if let Err(e) = SeccompFilter::apply(virtio_vhost_fs_seccomp_filter) { 539 error!("Error applying seccomp filter: {:?}", e); 540 } else if let Err(e) = handler.run(paused, paused_sync.unwrap()) { 541 error!("Error running vhost-user-fs worker: {:?}", e); 542 } 543 }) 544 .map(|thread| self.epoll_thread = Some(thread)) 545 .map_err(|e| { 546 error!("failed to clone queue EventFd: {}", e); 547 ActivateError::BadActivate 548 })?; 549 550 event!("virtio-device", "activated", "id", &self.id); 551 Ok(()) 552 } 553 554 fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> { 555 // We first must resume the virtio thread if it was paused. 556 if self.common.pause_evt.take().is_some() { 557 self.common.resume().ok()?; 558 } 559 560 if let Err(e) = self 561 .vu 562 .lock() 563 .unwrap() 564 .reset_vhost_user(self.common.queue_sizes.len()) 565 { 566 error!("Failed to reset vhost-user daemon: {:?}", e); 567 return None; 568 } 569 570 if let Some(kill_evt) = self.common.kill_evt.take() { 571 // Ignore the result because there is nothing we can do about it. 572 let _ = kill_evt.write(1); 573 } 574 575 event!("virtio-device", "reset", "id", &self.id); 576 577 // Return the interrupt 578 Some(self.common.interrupt_cb.take().unwrap()) 579 } 580 581 fn shutdown(&mut self) { 582 let _ = unsafe { libc::close(self.vu.lock().unwrap().socket_handle().as_raw_fd()) }; 583 } 584 585 fn get_shm_regions(&self) -> Option<VirtioSharedMemoryList> { 586 self.cache.as_ref().map(|cache| cache.0.clone()) 587 } 588 589 fn set_shm_regions( 590 &mut self, 591 shm_regions: VirtioSharedMemoryList, 592 ) -> std::result::Result<(), crate::Error> { 593 if let Some(mut cache) = self.cache.as_mut() { 594 cache.0 = shm_regions; 595 Ok(()) 596 } else { 597 Err(crate::Error::SetShmRegionsNotSupported) 598 } 599 } 600 601 fn add_memory_region( 602 &mut self, 603 region: &Arc<GuestRegionMmap>, 604 ) -> std::result::Result<(), crate::Error> { 605 if self.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() != 0 606 { 607 self.vu 608 .lock() 609 .unwrap() 610 .add_memory_region(region) 611 .map_err(crate::Error::VhostUserAddMemoryRegion) 612 } else if let Some(guest_memory) = &self.guest_memory { 613 self.vu 614 .lock() 615 .unwrap() 616 .update_mem_table(guest_memory.memory().deref()) 617 .map_err(crate::Error::VhostUserUpdateMemory) 618 } else { 619 Ok(()) 620 } 621 } 622 623 fn userspace_mappings(&self) -> Vec<UserspaceMapping> { 624 let mut mappings = Vec::new(); 625 if let Some(cache) = self.cache.as_ref() { 626 mappings.push(UserspaceMapping { 627 host_addr: cache.0.host_addr, 628 mem_slot: cache.0.mem_slot, 629 addr: cache.0.addr, 630 len: cache.0.len, 631 mergeable: false, 632 }) 633 } 634 635 mappings 636 } 637 } 638 639 impl Pausable for Fs { 640 fn pause(&mut self) -> result::Result<(), MigratableError> { 641 self.vu 642 .lock() 643 .unwrap() 644 .pause_vhost_user(self.vu_num_queues) 645 .map_err(|e| { 646 MigratableError::Pause(anyhow!("Error pausing vhost-user-fs backend: {:?}", e)) 647 })?; 648 649 self.common.pause() 650 } 651 652 fn resume(&mut self) -> result::Result<(), MigratableError> { 653 self.common.resume()?; 654 655 if let Some(epoll_thread) = &self.epoll_thread { 656 epoll_thread.thread().unpark(); 657 } 658 659 self.vu 660 .lock() 661 .unwrap() 662 .resume_vhost_user(self.vu_num_queues) 663 .map_err(|e| { 664 MigratableError::Resume(anyhow!("Error resuming vhost-user-fs backend: {:?}", e)) 665 }) 666 } 667 } 668 669 impl Snapshottable for Fs { 670 fn id(&self) -> String { 671 self.id.clone() 672 } 673 674 fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> { 675 Snapshot::new_from_versioned_state(&self.id(), &self.state()) 676 } 677 678 fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> { 679 self.set_state(&snapshot.to_versioned_state(&self.id)?); 680 Ok(()) 681 } 682 } 683 impl Transportable for Fs {} 684 685 impl Migratable for Fs { 686 fn start_dirty_log(&mut self) -> std::result::Result<(), MigratableError> { 687 if let Some(guest_memory) = &self.guest_memory { 688 let last_ram_addr = guest_memory.memory().last_addr().raw_value(); 689 self.vu 690 .lock() 691 .unwrap() 692 .start_dirty_log(last_ram_addr) 693 .map_err(|e| { 694 MigratableError::MigrateStart(anyhow!( 695 "Error starting migration for vhost-user-fs backend: {:?}", 696 e 697 )) 698 }) 699 } else { 700 Err(MigratableError::MigrateStart(anyhow!( 701 "Missing guest memory" 702 ))) 703 } 704 } 705 706 fn stop_dirty_log(&mut self) -> std::result::Result<(), MigratableError> { 707 self.vu.lock().unwrap().stop_dirty_log().map_err(|e| { 708 MigratableError::MigrateStop(anyhow!( 709 "Error stopping migration for vhost-user-fs backend: {:?}", 710 e 711 )) 712 }) 713 } 714 715 fn dirty_log(&mut self) -> std::result::Result<MemoryRangeTable, MigratableError> { 716 if let Some(guest_memory) = &self.guest_memory { 717 let last_ram_addr = guest_memory.memory().last_addr().raw_value(); 718 self.vu 719 .lock() 720 .unwrap() 721 .dirty_log(last_ram_addr) 722 .map_err(|e| { 723 MigratableError::MigrateDirtyRanges(anyhow!( 724 "Error retrieving dirty ranges from vhost-user-fs backend: {:?}", 725 e 726 )) 727 }) 728 } else { 729 Err(MigratableError::MigrateDirtyRanges(anyhow!( 730 "Missing guest memory" 731 ))) 732 } 733 } 734 } 735