1 // Copyright 2018 The Chromium OS Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE-BSD-3-Clause file. 4 // 5 // Copyright © 2019 Intel Corporation 6 // 7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause 8 9 use std::any::Any; 10 use std::cmp; 11 use std::io::Write; 12 use std::ops::Deref; 13 use std::sync::atomic::{AtomicBool, AtomicU16, AtomicUsize, Ordering}; 14 use std::sync::{Arc, Barrier, Mutex}; 15 16 use anyhow::anyhow; 17 use libc::EFD_NONBLOCK; 18 use pci::{ 19 BarReprogrammingParams, MsixCap, MsixConfig, PciBarConfiguration, PciBarRegionType, 20 PciCapability, PciCapabilityId, PciClassCode, PciConfiguration, PciDevice, PciDeviceError, 21 PciHeaderType, PciMassStorageSubclass, PciNetworkControllerSubclass, PciSubclass, 22 }; 23 use serde::{Deserialize, Serialize}; 24 use thiserror::Error; 25 use virtio_queue::{Queue, QueueT}; 26 use vm_allocator::{AddressAllocator, SystemAllocator}; 27 use vm_device::dma_mapping::ExternalDmaMapping; 28 use vm_device::interrupt::{ 29 InterruptIndex, InterruptManager, InterruptSourceGroup, MsiIrqGroupConfig, 30 }; 31 use vm_device::{BusDevice, PciBarType, Resource}; 32 use vm_memory::{Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, Le32}; 33 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 34 use vm_virtio::AccessPlatform; 35 use vmm_sys_util::eventfd::EventFd; 36 37 use super::pci_common_config::VirtioPciCommonConfigState; 38 use crate::transport::{VirtioPciCommonConfig, VirtioTransport, VIRTIO_PCI_COMMON_CONFIG_ID}; 39 use crate::{ 40 ActivateResult, GuestMemoryMmap, VirtioDevice, VirtioDeviceType, VirtioInterrupt, 41 VirtioInterruptType, DEVICE_ACKNOWLEDGE, DEVICE_DRIVER, DEVICE_DRIVER_OK, DEVICE_FAILED, 42 DEVICE_FEATURES_OK, DEVICE_INIT, 43 }; 44 45 /// Vector value used to disable MSI for a queue. 46 const VIRTQ_MSI_NO_VECTOR: u16 = 0xffff; 47 48 enum PciCapabilityType { 49 Common = 1, 50 Notify = 2, 51 Isr = 3, 52 Device = 4, 53 Pci = 5, 54 SharedMemory = 8, 55 } 56 57 // This offset represents the 2 bytes omitted from the VirtioPciCap structure 58 // as they are already handled through add_capability(). These 2 bytes are the 59 // fields cap_vndr (1 byte) and cap_next (1 byte) defined in the virtio spec. 60 const VIRTIO_PCI_CAP_OFFSET: usize = 2; 61 62 #[allow(dead_code)] 63 #[repr(packed)] 64 #[derive(Clone, Copy, Default)] 65 struct VirtioPciCap { 66 cap_len: u8, // Generic PCI field: capability length 67 cfg_type: u8, // Identifies the structure. 68 pci_bar: u8, // Where to find it. 69 id: u8, // Multiple capabilities of the same type 70 padding: [u8; 2], // Pad to full dword. 71 offset: Le32, // Offset within bar. 72 length: Le32, // Length of the structure, in bytes. 73 } 74 // SAFETY: All members are simple numbers and any value is valid. 75 unsafe impl ByteValued for VirtioPciCap {} 76 77 impl PciCapability for VirtioPciCap { 78 fn bytes(&self) -> &[u8] { 79 self.as_slice() 80 } 81 82 fn id(&self) -> PciCapabilityId { 83 PciCapabilityId::VendorSpecific 84 } 85 } 86 87 const VIRTIO_PCI_CAP_LEN_OFFSET: u8 = 2; 88 89 impl VirtioPciCap { 90 pub fn new(cfg_type: PciCapabilityType, pci_bar: u8, offset: u32, length: u32) -> Self { 91 VirtioPciCap { 92 cap_len: (std::mem::size_of::<VirtioPciCap>() as u8) + VIRTIO_PCI_CAP_LEN_OFFSET, 93 cfg_type: cfg_type as u8, 94 pci_bar, 95 id: 0, 96 padding: [0; 2], 97 offset: Le32::from(offset), 98 length: Le32::from(length), 99 } 100 } 101 } 102 103 #[allow(dead_code)] 104 #[repr(packed)] 105 #[derive(Clone, Copy, Default)] 106 struct VirtioPciNotifyCap { 107 cap: VirtioPciCap, 108 notify_off_multiplier: Le32, 109 } 110 // SAFETY: All members are simple numbers and any value is valid. 111 unsafe impl ByteValued for VirtioPciNotifyCap {} 112 113 impl PciCapability for VirtioPciNotifyCap { 114 fn bytes(&self) -> &[u8] { 115 self.as_slice() 116 } 117 118 fn id(&self) -> PciCapabilityId { 119 PciCapabilityId::VendorSpecific 120 } 121 } 122 123 impl VirtioPciNotifyCap { 124 pub fn new( 125 cfg_type: PciCapabilityType, 126 pci_bar: u8, 127 offset: u32, 128 length: u32, 129 multiplier: Le32, 130 ) -> Self { 131 VirtioPciNotifyCap { 132 cap: VirtioPciCap { 133 cap_len: (std::mem::size_of::<VirtioPciNotifyCap>() as u8) 134 + VIRTIO_PCI_CAP_LEN_OFFSET, 135 cfg_type: cfg_type as u8, 136 pci_bar, 137 id: 0, 138 padding: [0; 2], 139 offset: Le32::from(offset), 140 length: Le32::from(length), 141 }, 142 notify_off_multiplier: multiplier, 143 } 144 } 145 } 146 147 #[allow(dead_code)] 148 #[repr(packed)] 149 #[derive(Clone, Copy, Default)] 150 struct VirtioPciCap64 { 151 cap: VirtioPciCap, 152 offset_hi: Le32, 153 length_hi: Le32, 154 } 155 // SAFETY: All members are simple numbers and any value is valid. 156 unsafe impl ByteValued for VirtioPciCap64 {} 157 158 impl PciCapability for VirtioPciCap64 { 159 fn bytes(&self) -> &[u8] { 160 self.as_slice() 161 } 162 163 fn id(&self) -> PciCapabilityId { 164 PciCapabilityId::VendorSpecific 165 } 166 } 167 168 impl VirtioPciCap64 { 169 pub fn new(cfg_type: PciCapabilityType, pci_bar: u8, id: u8, offset: u64, length: u64) -> Self { 170 VirtioPciCap64 { 171 cap: VirtioPciCap { 172 cap_len: (std::mem::size_of::<VirtioPciCap64>() as u8) + VIRTIO_PCI_CAP_LEN_OFFSET, 173 cfg_type: cfg_type as u8, 174 pci_bar, 175 id, 176 padding: [0; 2], 177 offset: Le32::from(offset as u32), 178 length: Le32::from(length as u32), 179 }, 180 offset_hi: Le32::from((offset >> 32) as u32), 181 length_hi: Le32::from((length >> 32) as u32), 182 } 183 } 184 } 185 186 #[allow(dead_code)] 187 #[repr(packed)] 188 #[derive(Clone, Copy, Default)] 189 struct VirtioPciCfgCap { 190 cap: VirtioPciCap, 191 pci_cfg_data: [u8; 4], 192 } 193 // SAFETY: All members are simple numbers and any value is valid. 194 unsafe impl ByteValued for VirtioPciCfgCap {} 195 196 impl PciCapability for VirtioPciCfgCap { 197 fn bytes(&self) -> &[u8] { 198 self.as_slice() 199 } 200 201 fn id(&self) -> PciCapabilityId { 202 PciCapabilityId::VendorSpecific 203 } 204 } 205 206 impl VirtioPciCfgCap { 207 fn new() -> Self { 208 VirtioPciCfgCap { 209 cap: VirtioPciCap::new(PciCapabilityType::Pci, 0, 0, 0), 210 ..Default::default() 211 } 212 } 213 } 214 215 #[derive(Clone, Copy, Default)] 216 struct VirtioPciCfgCapInfo { 217 offset: usize, 218 cap: VirtioPciCfgCap, 219 } 220 221 #[allow(dead_code)] 222 #[derive(Copy, Clone)] 223 pub enum PciVirtioSubclass { 224 NonTransitionalBase = 0xff, 225 } 226 227 impl PciSubclass for PciVirtioSubclass { 228 fn get_register_value(&self) -> u8 { 229 *self as u8 230 } 231 } 232 233 // Allocate one bar for the structs pointed to by the capability structures. 234 // As per the PCI specification, because the same BAR shares MSI-X and non 235 // MSI-X structures, it is recommended to use 8KiB alignment for all those 236 // structures. 237 const COMMON_CONFIG_BAR_OFFSET: u64 = 0x0000; 238 const COMMON_CONFIG_SIZE: u64 = 56; 239 const ISR_CONFIG_BAR_OFFSET: u64 = 0x2000; 240 const ISR_CONFIG_SIZE: u64 = 1; 241 const DEVICE_CONFIG_BAR_OFFSET: u64 = 0x4000; 242 const DEVICE_CONFIG_SIZE: u64 = 0x1000; 243 const NOTIFICATION_BAR_OFFSET: u64 = 0x6000; 244 const NOTIFICATION_SIZE: u64 = 0x1000; 245 const MSIX_TABLE_BAR_OFFSET: u64 = 0x8000; 246 // The size is 256KiB because the table can hold up to 2048 entries, with each 247 // entry being 128 bits (4 DWORDS). 248 const MSIX_TABLE_SIZE: u64 = 0x40000; 249 const MSIX_PBA_BAR_OFFSET: u64 = 0x48000; 250 // The size is 2KiB because the Pending Bit Array has one bit per vector and it 251 // can support up to 2048 vectors. 252 const MSIX_PBA_SIZE: u64 = 0x800; 253 // The BAR size must be a power of 2. 254 const CAPABILITY_BAR_SIZE: u64 = 0x80000; 255 const VIRTIO_COMMON_BAR_INDEX: usize = 0; 256 const VIRTIO_SHM_BAR_INDEX: usize = 2; 257 258 const NOTIFY_OFF_MULTIPLIER: u32 = 4; // A dword per notification address. 259 260 const VIRTIO_PCI_VENDOR_ID: u16 = 0x1af4; 261 const VIRTIO_PCI_DEVICE_ID_BASE: u16 = 0x1040; // Add to device type to get device ID. 262 263 #[derive(Serialize, Deserialize)] 264 struct QueueState { 265 max_size: u16, 266 size: u16, 267 ready: bool, 268 desc_table: u64, 269 avail_ring: u64, 270 used_ring: u64, 271 } 272 273 #[derive(Serialize, Deserialize)] 274 pub struct VirtioPciDeviceState { 275 device_activated: bool, 276 queues: Vec<QueueState>, 277 interrupt_status: usize, 278 cap_pci_cfg_offset: usize, 279 cap_pci_cfg: Vec<u8>, 280 } 281 282 pub struct VirtioPciDeviceActivator { 283 interrupt: Option<Arc<dyn VirtioInterrupt>>, 284 memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>, 285 device: Arc<Mutex<dyn VirtioDevice>>, 286 device_activated: Arc<AtomicBool>, 287 queues: Option<Vec<(usize, Queue, EventFd)>>, 288 barrier: Option<Arc<Barrier>>, 289 id: String, 290 } 291 292 impl VirtioPciDeviceActivator { 293 pub fn activate(&mut self) -> ActivateResult { 294 self.device.lock().unwrap().activate( 295 self.memory.take().unwrap(), 296 self.interrupt.take().unwrap(), 297 self.queues.take().unwrap(), 298 )?; 299 self.device_activated.store(true, Ordering::SeqCst); 300 301 if let Some(barrier) = self.barrier.take() { 302 info!("{}: Waiting for barrier", self.id); 303 barrier.wait(); 304 info!("{}: Barrier released", self.id); 305 } 306 307 Ok(()) 308 } 309 } 310 311 #[derive(Error, Debug)] 312 pub enum VirtioPciDeviceError { 313 #[error("Failed creating VirtioPciDevice: {0}")] 314 CreateVirtioPciDevice(#[source] anyhow::Error), 315 } 316 pub type Result<T> = std::result::Result<T, VirtioPciDeviceError>; 317 318 pub struct VirtioPciDevice { 319 id: String, 320 321 // PCI configuration registers. 322 configuration: PciConfiguration, 323 324 // virtio PCI common configuration 325 common_config: VirtioPciCommonConfig, 326 327 // MSI-X config 328 msix_config: Option<Arc<Mutex<MsixConfig>>>, 329 330 // Number of MSI-X vectors 331 msix_num: u16, 332 333 // Virtio device reference and status 334 device: Arc<Mutex<dyn VirtioDevice>>, 335 device_activated: Arc<AtomicBool>, 336 337 // PCI interrupts. 338 interrupt_status: Arc<AtomicUsize>, 339 virtio_interrupt: Option<Arc<dyn VirtioInterrupt>>, 340 interrupt_source_group: Arc<dyn InterruptSourceGroup>, 341 342 // virtio queues 343 queues: Vec<Queue>, 344 queue_evts: Vec<EventFd>, 345 346 // Guest memory 347 memory: GuestMemoryAtomic<GuestMemoryMmap>, 348 349 // Settings PCI BAR 350 settings_bar: u8, 351 352 // Whether to use 64-bit bar location or 32-bit 353 use_64bit_bar: bool, 354 355 // Add a dedicated structure to hold information about the very specific 356 // virtio-pci capability VIRTIO_PCI_CAP_PCI_CFG. This is needed to support 357 // the legacy/backward compatible mechanism of letting the guest access the 358 // other virtio capabilities without mapping the PCI BARs. This can be 359 // needed when the guest tries to early access the virtio configuration of 360 // a device. 361 cap_pci_cfg_info: VirtioPciCfgCapInfo, 362 363 // Details of bar regions to free 364 bar_regions: Vec<PciBarConfiguration>, 365 366 // EventFd to signal on to request activation 367 activate_evt: EventFd, 368 369 // Optional DMA handler 370 dma_handler: Option<Arc<dyn ExternalDmaMapping>>, 371 372 // Pending activations 373 pending_activations: Arc<Mutex<Vec<VirtioPciDeviceActivator>>>, 374 } 375 376 impl VirtioPciDevice { 377 /// Constructs a new PCI transport for the given virtio device. 378 #[allow(clippy::too_many_arguments)] 379 pub fn new( 380 id: String, 381 memory: GuestMemoryAtomic<GuestMemoryMmap>, 382 device: Arc<Mutex<dyn VirtioDevice>>, 383 msix_num: u16, 384 access_platform: Option<Arc<dyn AccessPlatform>>, 385 interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>, 386 pci_device_bdf: u32, 387 activate_evt: EventFd, 388 use_64bit_bar: bool, 389 dma_handler: Option<Arc<dyn ExternalDmaMapping>>, 390 pending_activations: Arc<Mutex<Vec<VirtioPciDeviceActivator>>>, 391 snapshot: Option<Snapshot>, 392 ) -> Result<Self> { 393 let mut locked_device = device.lock().unwrap(); 394 let mut queue_evts = Vec::new(); 395 for _ in locked_device.queue_max_sizes().iter() { 396 queue_evts.push(EventFd::new(EFD_NONBLOCK).map_err(|e| { 397 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 398 "Failed creating eventfd: {}", 399 e 400 )) 401 })?) 402 } 403 let num_queues = locked_device.queue_max_sizes().len(); 404 405 if let Some(access_platform) = &access_platform { 406 locked_device.set_access_platform(access_platform.clone()); 407 } 408 409 let mut queues: Vec<Queue> = locked_device 410 .queue_max_sizes() 411 .iter() 412 .map(|&s| Queue::new(s).unwrap()) 413 .collect(); 414 415 let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + locked_device.device_type() as u16; 416 417 let interrupt_source_group = interrupt_manager 418 .create_group(MsiIrqGroupConfig { 419 base: 0, 420 count: msix_num as InterruptIndex, 421 }) 422 .map_err(|e| { 423 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 424 "Failed creating MSI interrupt group: {}", 425 e 426 )) 427 })?; 428 429 let msix_state = vm_migration::state_from_id(snapshot.as_ref(), pci::MSIX_CONFIG_ID) 430 .map_err(|e| { 431 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 432 "Failed to get MsixConfigState from Snapshot: {}", 433 e 434 )) 435 })?; 436 437 let (msix_config, msix_config_clone) = if msix_num > 0 { 438 let msix_config = Arc::new(Mutex::new( 439 MsixConfig::new( 440 msix_num, 441 interrupt_source_group.clone(), 442 pci_device_bdf, 443 msix_state, 444 ) 445 .unwrap(), 446 )); 447 let msix_config_clone = msix_config.clone(); 448 (Some(msix_config), Some(msix_config_clone)) 449 } else { 450 (None, None) 451 }; 452 453 let (class, subclass) = match VirtioDeviceType::from(locked_device.device_type()) { 454 VirtioDeviceType::Net => ( 455 PciClassCode::NetworkController, 456 &PciNetworkControllerSubclass::EthernetController as &dyn PciSubclass, 457 ), 458 VirtioDeviceType::Block => ( 459 PciClassCode::MassStorage, 460 &PciMassStorageSubclass::MassStorage as &dyn PciSubclass, 461 ), 462 _ => ( 463 PciClassCode::Other, 464 &PciVirtioSubclass::NonTransitionalBase as &dyn PciSubclass, 465 ), 466 }; 467 468 let pci_configuration_state = 469 vm_migration::state_from_id(snapshot.as_ref(), pci::PCI_CONFIGURATION_ID).map_err( 470 |e| { 471 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 472 "Failed to get PciConfigurationState from Snapshot: {}", 473 e 474 )) 475 }, 476 )?; 477 478 let configuration = PciConfiguration::new( 479 VIRTIO_PCI_VENDOR_ID, 480 pci_device_id, 481 0x1, // For modern virtio-PCI devices 482 class, 483 subclass, 484 None, 485 PciHeaderType::Device, 486 VIRTIO_PCI_VENDOR_ID, 487 pci_device_id, 488 msix_config_clone, 489 pci_configuration_state, 490 ); 491 492 let common_config_state = 493 vm_migration::state_from_id(snapshot.as_ref(), VIRTIO_PCI_COMMON_CONFIG_ID).map_err( 494 |e| { 495 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 496 "Failed to get VirtioPciCommonConfigState from Snapshot: {}", 497 e 498 )) 499 }, 500 )?; 501 502 let common_config = if let Some(common_config_state) = common_config_state { 503 VirtioPciCommonConfig::new(common_config_state, access_platform) 504 } else { 505 VirtioPciCommonConfig::new( 506 VirtioPciCommonConfigState { 507 driver_status: 0, 508 config_generation: 0, 509 device_feature_select: 0, 510 driver_feature_select: 0, 511 queue_select: 0, 512 msix_config: VIRTQ_MSI_NO_VECTOR, 513 msix_queues: vec![VIRTQ_MSI_NO_VECTOR; num_queues], 514 }, 515 access_platform, 516 ) 517 }; 518 519 let state: Option<VirtioPciDeviceState> = snapshot 520 .as_ref() 521 .map(|s| s.to_state()) 522 .transpose() 523 .map_err(|e| { 524 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 525 "Failed to get VirtioPciDeviceState from Snapshot: {}", 526 e 527 )) 528 })?; 529 530 let (device_activated, interrupt_status, cap_pci_cfg_info) = if let Some(state) = state { 531 // Update virtqueues indexes for both available and used rings. 532 for (i, queue) in queues.iter_mut().enumerate() { 533 queue.set_size(state.queues[i].size); 534 queue.set_ready(state.queues[i].ready); 535 queue 536 .try_set_desc_table_address(GuestAddress(state.queues[i].desc_table)) 537 .unwrap(); 538 queue 539 .try_set_avail_ring_address(GuestAddress(state.queues[i].avail_ring)) 540 .unwrap(); 541 queue 542 .try_set_used_ring_address(GuestAddress(state.queues[i].used_ring)) 543 .unwrap(); 544 queue.set_next_avail( 545 queue 546 .used_idx(memory.memory().deref(), Ordering::Acquire) 547 .unwrap() 548 .0, 549 ); 550 queue.set_next_used( 551 queue 552 .used_idx(memory.memory().deref(), Ordering::Acquire) 553 .unwrap() 554 .0, 555 ); 556 } 557 558 ( 559 state.device_activated, 560 state.interrupt_status, 561 VirtioPciCfgCapInfo { 562 offset: state.cap_pci_cfg_offset, 563 cap: *VirtioPciCfgCap::from_slice(&state.cap_pci_cfg).unwrap(), 564 }, 565 ) 566 } else { 567 (false, 0, VirtioPciCfgCapInfo::default()) 568 }; 569 570 // Dropping the MutexGuard to unlock the VirtioDevice. This is required 571 // in the context of a restore given the device might require some 572 // activation, meaning it will require locking. Dropping the lock 573 // prevents from a subtle deadlock. 574 std::mem::drop(locked_device); 575 576 let mut virtio_pci_device = VirtioPciDevice { 577 id, 578 configuration, 579 common_config, 580 msix_config, 581 msix_num, 582 device, 583 device_activated: Arc::new(AtomicBool::new(device_activated)), 584 interrupt_status: Arc::new(AtomicUsize::new(interrupt_status)), 585 virtio_interrupt: None, 586 queues, 587 queue_evts, 588 memory, 589 settings_bar: 0, 590 use_64bit_bar, 591 interrupt_source_group, 592 cap_pci_cfg_info, 593 bar_regions: vec![], 594 activate_evt, 595 dma_handler, 596 pending_activations, 597 }; 598 599 if let Some(msix_config) = &virtio_pci_device.msix_config { 600 virtio_pci_device.virtio_interrupt = Some(Arc::new(VirtioInterruptMsix::new( 601 msix_config.clone(), 602 virtio_pci_device.common_config.msix_config.clone(), 603 virtio_pci_device.common_config.msix_queues.clone(), 604 virtio_pci_device.interrupt_source_group.clone(), 605 ))); 606 } 607 608 // In case of a restore, we can activate the device, as we know at 609 // this point the virtqueues are in the right state and the device is 610 // ready to be activated, which will spawn each virtio worker thread. 611 if virtio_pci_device.device_activated.load(Ordering::SeqCst) 612 && virtio_pci_device.is_driver_ready() 613 { 614 virtio_pci_device.activate().map_err(|e| { 615 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 616 "Failed activating the device: {}", 617 e 618 )) 619 })?; 620 } 621 622 Ok(virtio_pci_device) 623 } 624 625 fn state(&self) -> VirtioPciDeviceState { 626 VirtioPciDeviceState { 627 device_activated: self.device_activated.load(Ordering::Acquire), 628 interrupt_status: self.interrupt_status.load(Ordering::Acquire), 629 queues: self 630 .queues 631 .iter() 632 .map(|q| QueueState { 633 max_size: q.max_size(), 634 size: q.size(), 635 ready: q.ready(), 636 desc_table: q.desc_table(), 637 avail_ring: q.avail_ring(), 638 used_ring: q.used_ring(), 639 }) 640 .collect(), 641 cap_pci_cfg_offset: self.cap_pci_cfg_info.offset, 642 cap_pci_cfg: self.cap_pci_cfg_info.cap.bytes().to_vec(), 643 } 644 } 645 646 /// Gets the list of queue events that must be triggered whenever the VM writes to 647 /// `virtio::NOTIFY_REG_OFFSET` past the MMIO base. Each event must be triggered when the 648 /// value being written equals the index of the event in this list. 649 fn queue_evts(&self) -> &[EventFd] { 650 self.queue_evts.as_slice() 651 } 652 653 fn is_driver_ready(&self) -> bool { 654 let ready_bits = 655 (DEVICE_ACKNOWLEDGE | DEVICE_DRIVER | DEVICE_DRIVER_OK | DEVICE_FEATURES_OK) as u8; 656 self.common_config.driver_status == ready_bits 657 && self.common_config.driver_status & DEVICE_FAILED as u8 == 0 658 } 659 660 /// Determines if the driver has requested the device (re)init / reset itself 661 fn is_driver_init(&self) -> bool { 662 self.common_config.driver_status == DEVICE_INIT as u8 663 } 664 665 pub fn config_bar_addr(&self) -> u64 { 666 self.configuration.get_bar_addr(self.settings_bar as usize) 667 } 668 669 fn add_pci_capabilities( 670 &mut self, 671 settings_bar: u8, 672 ) -> std::result::Result<(), PciDeviceError> { 673 // Add pointers to the different configuration structures from the PCI capabilities. 674 let common_cap = VirtioPciCap::new( 675 PciCapabilityType::Common, 676 settings_bar, 677 COMMON_CONFIG_BAR_OFFSET as u32, 678 COMMON_CONFIG_SIZE as u32, 679 ); 680 self.configuration 681 .add_capability(&common_cap) 682 .map_err(PciDeviceError::CapabilitiesSetup)?; 683 684 let isr_cap = VirtioPciCap::new( 685 PciCapabilityType::Isr, 686 settings_bar, 687 ISR_CONFIG_BAR_OFFSET as u32, 688 ISR_CONFIG_SIZE as u32, 689 ); 690 self.configuration 691 .add_capability(&isr_cap) 692 .map_err(PciDeviceError::CapabilitiesSetup)?; 693 694 // TODO(dgreid) - set based on device's configuration size? 695 let device_cap = VirtioPciCap::new( 696 PciCapabilityType::Device, 697 settings_bar, 698 DEVICE_CONFIG_BAR_OFFSET as u32, 699 DEVICE_CONFIG_SIZE as u32, 700 ); 701 self.configuration 702 .add_capability(&device_cap) 703 .map_err(PciDeviceError::CapabilitiesSetup)?; 704 705 let notify_cap = VirtioPciNotifyCap::new( 706 PciCapabilityType::Notify, 707 settings_bar, 708 NOTIFICATION_BAR_OFFSET as u32, 709 NOTIFICATION_SIZE as u32, 710 Le32::from(NOTIFY_OFF_MULTIPLIER), 711 ); 712 self.configuration 713 .add_capability(¬ify_cap) 714 .map_err(PciDeviceError::CapabilitiesSetup)?; 715 716 let configuration_cap = VirtioPciCfgCap::new(); 717 self.cap_pci_cfg_info.offset = self 718 .configuration 719 .add_capability(&configuration_cap) 720 .map_err(PciDeviceError::CapabilitiesSetup)? 721 + VIRTIO_PCI_CAP_OFFSET; 722 self.cap_pci_cfg_info.cap = configuration_cap; 723 724 if self.msix_config.is_some() { 725 let msix_cap = MsixCap::new( 726 settings_bar, 727 self.msix_num, 728 MSIX_TABLE_BAR_OFFSET as u32, 729 settings_bar, 730 MSIX_PBA_BAR_OFFSET as u32, 731 ); 732 self.configuration 733 .add_capability(&msix_cap) 734 .map_err(PciDeviceError::CapabilitiesSetup)?; 735 } 736 737 self.settings_bar = settings_bar; 738 Ok(()) 739 } 740 741 fn read_cap_pci_cfg(&mut self, offset: usize, mut data: &mut [u8]) { 742 let cap_slice = self.cap_pci_cfg_info.cap.as_slice(); 743 let data_len = data.len(); 744 let cap_len = cap_slice.len(); 745 if offset + data_len > cap_len { 746 error!("Failed to read cap_pci_cfg from config space"); 747 return; 748 } 749 750 if offset < std::mem::size_of::<VirtioPciCap>() { 751 if let Some(end) = offset.checked_add(data_len) { 752 // This write can't fail, offset and end are checked against config_len. 753 data.write_all(&cap_slice[offset..cmp::min(end, cap_len)]) 754 .unwrap(); 755 } 756 } else { 757 let bar_offset: u32 = 758 // SAFETY: we know self.cap_pci_cfg_info.cap.cap.offset is 32bits long. 759 unsafe { std::mem::transmute(self.cap_pci_cfg_info.cap.cap.offset) }; 760 self.read_bar(0, bar_offset as u64, data) 761 } 762 } 763 764 fn write_cap_pci_cfg(&mut self, offset: usize, data: &[u8]) -> Option<Arc<Barrier>> { 765 let cap_slice = self.cap_pci_cfg_info.cap.as_mut_slice(); 766 let data_len = data.len(); 767 let cap_len = cap_slice.len(); 768 if offset + data_len > cap_len { 769 error!("Failed to write cap_pci_cfg to config space"); 770 return None; 771 } 772 773 if offset < std::mem::size_of::<VirtioPciCap>() { 774 let (_, right) = cap_slice.split_at_mut(offset); 775 right[..data_len].copy_from_slice(data); 776 None 777 } else { 778 let bar_offset: u32 = 779 // SAFETY: we know self.cap_pci_cfg_info.cap.cap.offset is 32bits long. 780 unsafe { std::mem::transmute(self.cap_pci_cfg_info.cap.cap.offset) }; 781 self.write_bar(0, bar_offset as u64, data) 782 } 783 } 784 785 pub fn virtio_device(&self) -> Arc<Mutex<dyn VirtioDevice>> { 786 self.device.clone() 787 } 788 789 fn prepare_activator(&mut self, barrier: Option<Arc<Barrier>>) -> VirtioPciDeviceActivator { 790 let mut queues = Vec::new(); 791 792 for (queue_index, queue) in self.queues.iter().enumerate() { 793 if !queue.ready() { 794 continue; 795 } 796 797 if !queue.is_valid(self.memory.memory().deref()) { 798 error!("Queue {} is not valid", queue_index); 799 } 800 801 queues.push(( 802 queue_index, 803 vm_virtio::clone_queue(queue), 804 self.queue_evts[queue_index].try_clone().unwrap(), 805 )); 806 } 807 808 VirtioPciDeviceActivator { 809 interrupt: self.virtio_interrupt.take(), 810 memory: Some(self.memory.clone()), 811 device: self.device.clone(), 812 queues: Some(queues), 813 device_activated: self.device_activated.clone(), 814 barrier, 815 id: self.id.clone(), 816 } 817 } 818 819 fn activate(&mut self) -> ActivateResult { 820 self.prepare_activator(None).activate() 821 } 822 823 fn needs_activation(&self) -> bool { 824 !self.device_activated.load(Ordering::SeqCst) && self.is_driver_ready() 825 } 826 827 pub fn dma_handler(&self) -> Option<&Arc<dyn ExternalDmaMapping>> { 828 self.dma_handler.as_ref() 829 } 830 } 831 832 impl VirtioTransport for VirtioPciDevice { 833 fn ioeventfds(&self, base_addr: u64) -> Vec<(&EventFd, u64)> { 834 let notify_base = base_addr + NOTIFICATION_BAR_OFFSET; 835 self.queue_evts() 836 .iter() 837 .enumerate() 838 .map(|(i, event)| { 839 ( 840 event, 841 notify_base + i as u64 * u64::from(NOTIFY_OFF_MULTIPLIER), 842 ) 843 }) 844 .collect() 845 } 846 } 847 848 pub struct VirtioInterruptMsix { 849 msix_config: Arc<Mutex<MsixConfig>>, 850 config_vector: Arc<AtomicU16>, 851 queues_vectors: Arc<Mutex<Vec<u16>>>, 852 interrupt_source_group: Arc<dyn InterruptSourceGroup>, 853 } 854 855 impl VirtioInterruptMsix { 856 pub fn new( 857 msix_config: Arc<Mutex<MsixConfig>>, 858 config_vector: Arc<AtomicU16>, 859 queues_vectors: Arc<Mutex<Vec<u16>>>, 860 interrupt_source_group: Arc<dyn InterruptSourceGroup>, 861 ) -> Self { 862 VirtioInterruptMsix { 863 msix_config, 864 config_vector, 865 queues_vectors, 866 interrupt_source_group, 867 } 868 } 869 } 870 871 impl VirtioInterrupt for VirtioInterruptMsix { 872 fn trigger(&self, int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> { 873 let vector = match int_type { 874 VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire), 875 VirtioInterruptType::Queue(queue_index) => { 876 self.queues_vectors.lock().unwrap()[queue_index as usize] 877 } 878 }; 879 880 if vector == VIRTQ_MSI_NO_VECTOR { 881 return Ok(()); 882 } 883 884 let config = &mut self.msix_config.lock().unwrap(); 885 let entry = &config.table_entries[vector as usize]; 886 // In case the vector control register associated with the entry 887 // has its first bit set, this means the vector is masked and the 888 // device should not inject the interrupt. 889 // Instead, the Pending Bit Array table is updated to reflect there 890 // is a pending interrupt for this specific vector. 891 if config.masked() || entry.masked() { 892 config.set_pba_bit(vector, false); 893 return Ok(()); 894 } 895 896 self.interrupt_source_group 897 .trigger(vector as InterruptIndex) 898 } 899 900 fn notifier(&self, int_type: VirtioInterruptType) -> Option<EventFd> { 901 let vector = match int_type { 902 VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire), 903 VirtioInterruptType::Queue(queue_index) => { 904 self.queues_vectors.lock().unwrap()[queue_index as usize] 905 } 906 }; 907 908 self.interrupt_source_group 909 .notifier(vector as InterruptIndex) 910 } 911 } 912 913 impl PciDevice for VirtioPciDevice { 914 fn write_config_register( 915 &mut self, 916 reg_idx: usize, 917 offset: u64, 918 data: &[u8], 919 ) -> Option<Arc<Barrier>> { 920 // Handle the special case where the capability VIRTIO_PCI_CAP_PCI_CFG 921 // is accessed. This capability has a special meaning as it allows the 922 // guest to access other capabilities without mapping the PCI BAR. 923 let base = reg_idx * 4; 924 if base + offset as usize >= self.cap_pci_cfg_info.offset 925 && base + offset as usize + data.len() 926 <= self.cap_pci_cfg_info.offset + self.cap_pci_cfg_info.cap.bytes().len() 927 { 928 let offset = base + offset as usize - self.cap_pci_cfg_info.offset; 929 self.write_cap_pci_cfg(offset, data) 930 } else { 931 self.configuration 932 .write_config_register(reg_idx, offset, data); 933 None 934 } 935 } 936 937 fn read_config_register(&mut self, reg_idx: usize) -> u32 { 938 // Handle the special case where the capability VIRTIO_PCI_CAP_PCI_CFG 939 // is accessed. This capability has a special meaning as it allows the 940 // guest to access other capabilities without mapping the PCI BAR. 941 let base = reg_idx * 4; 942 if base >= self.cap_pci_cfg_info.offset 943 && base + 4 <= self.cap_pci_cfg_info.offset + self.cap_pci_cfg_info.cap.bytes().len() 944 { 945 let offset = base - self.cap_pci_cfg_info.offset; 946 let mut data = [0u8; 4]; 947 self.read_cap_pci_cfg(offset, &mut data); 948 u32::from_le_bytes(data) 949 } else { 950 self.configuration.read_reg(reg_idx) 951 } 952 } 953 954 fn detect_bar_reprogramming( 955 &mut self, 956 reg_idx: usize, 957 data: &[u8], 958 ) -> Option<BarReprogrammingParams> { 959 self.configuration.detect_bar_reprogramming(reg_idx, data) 960 } 961 962 fn allocate_bars( 963 &mut self, 964 _allocator: &Arc<Mutex<SystemAllocator>>, 965 mmio32_allocator: &mut AddressAllocator, 966 mmio64_allocator: &mut AddressAllocator, 967 resources: Option<Vec<Resource>>, 968 ) -> std::result::Result<Vec<PciBarConfiguration>, PciDeviceError> { 969 let mut bars = Vec::new(); 970 let device_clone = self.device.clone(); 971 let device = device_clone.lock().unwrap(); 972 973 let mut settings_bar_addr = None; 974 let mut use_64bit_bar = self.use_64bit_bar; 975 let restoring = resources.is_some(); 976 if let Some(resources) = resources { 977 for resource in resources { 978 if let Resource::PciBar { 979 index, base, type_, .. 980 } = resource 981 { 982 if index == VIRTIO_COMMON_BAR_INDEX { 983 settings_bar_addr = Some(GuestAddress(base)); 984 use_64bit_bar = match type_ { 985 PciBarType::Io => { 986 return Err(PciDeviceError::InvalidResource(resource)) 987 } 988 PciBarType::Mmio32 => false, 989 PciBarType::Mmio64 => true, 990 }; 991 break; 992 } 993 } 994 } 995 // Error out if no resource was matching the BAR id. 996 if settings_bar_addr.is_none() { 997 return Err(PciDeviceError::MissingResource); 998 } 999 } 1000 1001 // Allocate the virtio-pci capability BAR. 1002 // See http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-740004 1003 let (virtio_pci_bar_addr, region_type) = if use_64bit_bar { 1004 let region_type = PciBarRegionType::Memory64BitRegion; 1005 let addr = mmio64_allocator 1006 .allocate( 1007 settings_bar_addr, 1008 CAPABILITY_BAR_SIZE, 1009 Some(CAPABILITY_BAR_SIZE), 1010 ) 1011 .ok_or(PciDeviceError::IoAllocationFailed(CAPABILITY_BAR_SIZE))?; 1012 (addr, region_type) 1013 } else { 1014 let region_type = PciBarRegionType::Memory32BitRegion; 1015 let addr = mmio32_allocator 1016 .allocate( 1017 settings_bar_addr, 1018 CAPABILITY_BAR_SIZE, 1019 Some(CAPABILITY_BAR_SIZE), 1020 ) 1021 .ok_or(PciDeviceError::IoAllocationFailed(CAPABILITY_BAR_SIZE))?; 1022 (addr, region_type) 1023 }; 1024 1025 let bar = PciBarConfiguration::default() 1026 .set_index(VIRTIO_COMMON_BAR_INDEX) 1027 .set_address(virtio_pci_bar_addr.raw_value()) 1028 .set_size(CAPABILITY_BAR_SIZE) 1029 .set_region_type(region_type); 1030 1031 // The creation of the PCI BAR and its associated capabilities must 1032 // happen only during the creation of a brand new VM. When a VM is 1033 // restored from a known state, the BARs are already created with the 1034 // right content, therefore we don't need to go through this codepath. 1035 if !restoring { 1036 self.configuration.add_pci_bar(&bar).map_err(|e| { 1037 PciDeviceError::IoRegistrationFailed(virtio_pci_bar_addr.raw_value(), e) 1038 })?; 1039 1040 // Once the BARs are allocated, the capabilities can be added to the PCI configuration. 1041 self.add_pci_capabilities(VIRTIO_COMMON_BAR_INDEX as u8)?; 1042 } 1043 1044 bars.push(bar); 1045 1046 // Allocate a dedicated BAR if there are some shared memory regions. 1047 if let Some(shm_list) = device.get_shm_regions() { 1048 let bar = PciBarConfiguration::default() 1049 .set_index(VIRTIO_SHM_BAR_INDEX) 1050 .set_address(shm_list.addr.raw_value()) 1051 .set_size(shm_list.len); 1052 1053 // The creation of the PCI BAR and its associated capabilities must 1054 // happen only during the creation of a brand new VM. When a VM is 1055 // restored from a known state, the BARs are already created with the 1056 // right content, therefore we don't need to go through this codepath. 1057 if !restoring { 1058 self.configuration.add_pci_bar(&bar).map_err(|e| { 1059 PciDeviceError::IoRegistrationFailed(shm_list.addr.raw_value(), e) 1060 })?; 1061 1062 for (idx, shm) in shm_list.region_list.iter().enumerate() { 1063 let shm_cap = VirtioPciCap64::new( 1064 PciCapabilityType::SharedMemory, 1065 VIRTIO_SHM_BAR_INDEX as u8, 1066 idx as u8, 1067 shm.offset, 1068 shm.len, 1069 ); 1070 self.configuration 1071 .add_capability(&shm_cap) 1072 .map_err(PciDeviceError::CapabilitiesSetup)?; 1073 } 1074 } 1075 1076 bars.push(bar); 1077 } 1078 1079 self.bar_regions.clone_from(&bars); 1080 1081 Ok(bars) 1082 } 1083 1084 fn free_bars( 1085 &mut self, 1086 _allocator: &mut SystemAllocator, 1087 mmio32_allocator: &mut AddressAllocator, 1088 mmio64_allocator: &mut AddressAllocator, 1089 ) -> std::result::Result<(), PciDeviceError> { 1090 for bar in self.bar_regions.drain(..) { 1091 match bar.region_type() { 1092 PciBarRegionType::Memory32BitRegion => { 1093 mmio32_allocator.free(GuestAddress(bar.addr()), bar.size()); 1094 } 1095 PciBarRegionType::Memory64BitRegion => { 1096 mmio64_allocator.free(GuestAddress(bar.addr()), bar.size()); 1097 } 1098 _ => error!("Unexpected PCI bar type"), 1099 } 1100 } 1101 Ok(()) 1102 } 1103 1104 fn move_bar( 1105 &mut self, 1106 old_base: u64, 1107 new_base: u64, 1108 ) -> std::result::Result<(), std::io::Error> { 1109 // We only update our idea of the bar in order to support free_bars() above. 1110 // The majority of the reallocation is done inside DeviceManager. 1111 for bar in self.bar_regions.iter_mut() { 1112 if bar.addr() == old_base { 1113 *bar = bar.set_address(new_base); 1114 } 1115 } 1116 1117 Ok(()) 1118 } 1119 1120 fn read_bar(&mut self, _base: u64, offset: u64, data: &mut [u8]) { 1121 match offset { 1122 o if o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => self.common_config.read( 1123 o - COMMON_CONFIG_BAR_OFFSET, 1124 data, 1125 &self.queues, 1126 self.device.clone(), 1127 ), 1128 o if (ISR_CONFIG_BAR_OFFSET..ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE).contains(&o) => { 1129 if let Some(v) = data.get_mut(0) { 1130 // Reading this register resets it to 0. 1131 *v = self.interrupt_status.swap(0, Ordering::AcqRel) as u8; 1132 } 1133 } 1134 o if (DEVICE_CONFIG_BAR_OFFSET..DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE) 1135 .contains(&o) => 1136 { 1137 let device = self.device.lock().unwrap(); 1138 device.read_config(o - DEVICE_CONFIG_BAR_OFFSET, data); 1139 } 1140 o if (NOTIFICATION_BAR_OFFSET..NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE) 1141 .contains(&o) => 1142 { 1143 // Handled with ioeventfds. 1144 } 1145 o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => { 1146 if let Some(msix_config) = &self.msix_config { 1147 msix_config 1148 .lock() 1149 .unwrap() 1150 .read_table(o - MSIX_TABLE_BAR_OFFSET, data); 1151 } 1152 } 1153 o if (MSIX_PBA_BAR_OFFSET..MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE).contains(&o) => { 1154 if let Some(msix_config) = &self.msix_config { 1155 msix_config 1156 .lock() 1157 .unwrap() 1158 .read_pba(o - MSIX_PBA_BAR_OFFSET, data); 1159 } 1160 } 1161 _ => (), 1162 } 1163 } 1164 1165 fn write_bar(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> { 1166 match offset { 1167 o if o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => self.common_config.write( 1168 o - COMMON_CONFIG_BAR_OFFSET, 1169 data, 1170 &mut self.queues, 1171 self.device.clone(), 1172 ), 1173 o if (ISR_CONFIG_BAR_OFFSET..ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE).contains(&o) => { 1174 if let Some(v) = data.first() { 1175 self.interrupt_status 1176 .fetch_and(!(*v as usize), Ordering::AcqRel); 1177 } 1178 } 1179 o if (DEVICE_CONFIG_BAR_OFFSET..DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE) 1180 .contains(&o) => 1181 { 1182 let mut device = self.device.lock().unwrap(); 1183 device.write_config(o - DEVICE_CONFIG_BAR_OFFSET, data); 1184 } 1185 o if (NOTIFICATION_BAR_OFFSET..NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE) 1186 .contains(&o) => 1187 { 1188 #[cfg(feature = "sev_snp")] 1189 for (_event, _addr) in self.ioeventfds(_base) { 1190 if _addr == _base + offset { 1191 _event.write(1).unwrap(); 1192 } 1193 } 1194 // Handled with ioeventfds. 1195 #[cfg(not(feature = "sev_snp"))] 1196 error!("Unexpected write to notification BAR: offset = 0x{:x}", o); 1197 } 1198 o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => { 1199 if let Some(msix_config) = &self.msix_config { 1200 msix_config 1201 .lock() 1202 .unwrap() 1203 .write_table(o - MSIX_TABLE_BAR_OFFSET, data); 1204 } 1205 } 1206 o if (MSIX_PBA_BAR_OFFSET..MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE).contains(&o) => { 1207 if let Some(msix_config) = &self.msix_config { 1208 msix_config 1209 .lock() 1210 .unwrap() 1211 .write_pba(o - MSIX_PBA_BAR_OFFSET, data); 1212 } 1213 } 1214 _ => (), 1215 }; 1216 1217 // Try and activate the device if the driver status has changed 1218 if self.needs_activation() { 1219 let barrier = Arc::new(Barrier::new(2)); 1220 let activator = self.prepare_activator(Some(barrier.clone())); 1221 self.pending_activations.lock().unwrap().push(activator); 1222 info!( 1223 "{}: Needs activation; writing to activate event fd", 1224 self.id 1225 ); 1226 self.activate_evt.write(1).ok(); 1227 info!("{}: Needs activation; returning barrier", self.id); 1228 return Some(barrier); 1229 } 1230 1231 // Device has been reset by the driver 1232 if self.device_activated.load(Ordering::SeqCst) && self.is_driver_init() { 1233 let mut device = self.device.lock().unwrap(); 1234 if let Some(virtio_interrupt) = device.reset() { 1235 // Upon reset the device returns its interrupt EventFD 1236 self.virtio_interrupt = Some(virtio_interrupt); 1237 self.device_activated.store(false, Ordering::SeqCst); 1238 1239 // Reset queue readiness (changes queue_enable), queue sizes 1240 // and selected_queue as per spec for reset 1241 self.queues.iter_mut().for_each(Queue::reset); 1242 self.common_config.queue_select = 0; 1243 } else { 1244 error!("Attempt to reset device when not implemented in underlying device"); 1245 self.common_config.driver_status = crate::DEVICE_FAILED as u8; 1246 } 1247 } 1248 1249 None 1250 } 1251 1252 fn as_any(&mut self) -> &mut dyn Any { 1253 self 1254 } 1255 1256 fn id(&self) -> Option<String> { 1257 Some(self.id.clone()) 1258 } 1259 } 1260 1261 impl BusDevice for VirtioPciDevice { 1262 fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) { 1263 self.read_bar(base, offset, data) 1264 } 1265 1266 fn write(&mut self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> { 1267 self.write_bar(base, offset, data) 1268 } 1269 } 1270 1271 impl Pausable for VirtioPciDevice { 1272 fn pause(&mut self) -> std::result::Result<(), MigratableError> { 1273 Ok(()) 1274 } 1275 1276 fn resume(&mut self) -> std::result::Result<(), MigratableError> { 1277 Ok(()) 1278 } 1279 } 1280 1281 impl Snapshottable for VirtioPciDevice { 1282 fn id(&self) -> String { 1283 self.id.clone() 1284 } 1285 1286 fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> { 1287 let mut virtio_pci_dev_snapshot = Snapshot::new_from_state(&self.state())?; 1288 1289 // Snapshot PciConfiguration 1290 virtio_pci_dev_snapshot 1291 .add_snapshot(self.configuration.id(), self.configuration.snapshot()?); 1292 1293 // Snapshot VirtioPciCommonConfig 1294 virtio_pci_dev_snapshot 1295 .add_snapshot(self.common_config.id(), self.common_config.snapshot()?); 1296 1297 // Snapshot MSI-X 1298 if let Some(msix_config) = &self.msix_config { 1299 let mut msix_config = msix_config.lock().unwrap(); 1300 virtio_pci_dev_snapshot.add_snapshot(msix_config.id(), msix_config.snapshot()?); 1301 } 1302 1303 Ok(virtio_pci_dev_snapshot) 1304 } 1305 } 1306 impl Transportable for VirtioPciDevice {} 1307 impl Migratable for VirtioPciDevice {} 1308