1 // Copyright 2018 The Chromium OS Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE-BSD-3-Clause file. 4 // 5 // Copyright © 2019 Intel Corporation 6 // 7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause 8 9 use std::any::Any; 10 use std::cmp; 11 use std::io::Write; 12 use std::ops::Deref; 13 use std::sync::atomic::{AtomicBool, AtomicU16, AtomicUsize, Ordering}; 14 use std::sync::{Arc, Barrier, Mutex}; 15 16 use anyhow::anyhow; 17 use libc::EFD_NONBLOCK; 18 use pci::{ 19 BarReprogrammingParams, MsixCap, MsixConfig, PciBarConfiguration, PciBarRegionType, 20 PciCapability, PciCapabilityId, PciClassCode, PciConfiguration, PciDevice, PciDeviceError, 21 PciHeaderType, PciMassStorageSubclass, PciNetworkControllerSubclass, PciSubclass, 22 }; 23 use serde::{Deserialize, Serialize}; 24 use thiserror::Error; 25 use virtio_queue::{Queue, QueueT}; 26 use vm_allocator::{AddressAllocator, SystemAllocator}; 27 use vm_device::dma_mapping::ExternalDmaMapping; 28 use vm_device::interrupt::{ 29 InterruptIndex, InterruptManager, InterruptSourceGroup, MsiIrqGroupConfig, 30 }; 31 use vm_device::{BusDevice, PciBarType, Resource}; 32 use vm_memory::{Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, Le32}; 33 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 34 use vm_virtio::AccessPlatform; 35 use vmm_sys_util::eventfd::EventFd; 36 37 use super::pci_common_config::VirtioPciCommonConfigState; 38 use crate::transport::{VirtioPciCommonConfig, VirtioTransport, VIRTIO_PCI_COMMON_CONFIG_ID}; 39 use crate::{ 40 ActivateResult, GuestMemoryMmap, VirtioDevice, VirtioDeviceType, VirtioInterrupt, 41 VirtioInterruptType, DEVICE_ACKNOWLEDGE, DEVICE_DRIVER, DEVICE_DRIVER_OK, DEVICE_FAILED, 42 DEVICE_FEATURES_OK, DEVICE_INIT, 43 }; 44 45 /// Vector value used to disable MSI for a queue. 46 const VIRTQ_MSI_NO_VECTOR: u16 = 0xffff; 47 48 enum PciCapabilityType { 49 Common = 1, 50 Notify = 2, 51 Isr = 3, 52 Device = 4, 53 Pci = 5, 54 SharedMemory = 8, 55 } 56 57 // This offset represents the 2 bytes omitted from the VirtioPciCap structure 58 // as they are already handled through add_capability(). These 2 bytes are the 59 // fields cap_vndr (1 byte) and cap_next (1 byte) defined in the virtio spec. 60 const VIRTIO_PCI_CAP_OFFSET: usize = 2; 61 62 #[allow(dead_code)] 63 #[repr(C, packed)] 64 #[derive(Clone, Copy, Default)] 65 struct VirtioPciCap { 66 cap_len: u8, // Generic PCI field: capability length 67 cfg_type: u8, // Identifies the structure. 68 pci_bar: u8, // Where to find it. 69 id: u8, // Multiple capabilities of the same type 70 padding: [u8; 2], // Pad to full dword. 71 offset: Le32, // Offset within bar. 72 length: Le32, // Length of the structure, in bytes. 73 } 74 // SAFETY: All members are simple numbers and any value is valid. 75 unsafe impl ByteValued for VirtioPciCap {} 76 77 impl PciCapability for VirtioPciCap { 78 fn bytes(&self) -> &[u8] { 79 self.as_slice() 80 } 81 82 fn id(&self) -> PciCapabilityId { 83 PciCapabilityId::VendorSpecific 84 } 85 } 86 87 const VIRTIO_PCI_CAP_LEN_OFFSET: u8 = 2; 88 89 impl VirtioPciCap { 90 pub fn new(cfg_type: PciCapabilityType, pci_bar: u8, offset: u32, length: u32) -> Self { 91 VirtioPciCap { 92 cap_len: (std::mem::size_of::<VirtioPciCap>() as u8) + VIRTIO_PCI_CAP_LEN_OFFSET, 93 cfg_type: cfg_type as u8, 94 pci_bar, 95 id: 0, 96 padding: [0; 2], 97 offset: Le32::from(offset), 98 length: Le32::from(length), 99 } 100 } 101 } 102 103 #[allow(dead_code)] 104 #[repr(C, packed)] 105 #[derive(Clone, Copy, Default)] 106 struct VirtioPciNotifyCap { 107 cap: VirtioPciCap, 108 notify_off_multiplier: Le32, 109 } 110 // SAFETY: All members are simple numbers and any value is valid. 111 unsafe impl ByteValued for VirtioPciNotifyCap {} 112 113 impl PciCapability for VirtioPciNotifyCap { 114 fn bytes(&self) -> &[u8] { 115 self.as_slice() 116 } 117 118 fn id(&self) -> PciCapabilityId { 119 PciCapabilityId::VendorSpecific 120 } 121 } 122 123 impl VirtioPciNotifyCap { 124 pub fn new( 125 cfg_type: PciCapabilityType, 126 pci_bar: u8, 127 offset: u32, 128 length: u32, 129 multiplier: Le32, 130 ) -> Self { 131 VirtioPciNotifyCap { 132 cap: VirtioPciCap { 133 cap_len: (std::mem::size_of::<VirtioPciNotifyCap>() as u8) 134 + VIRTIO_PCI_CAP_LEN_OFFSET, 135 cfg_type: cfg_type as u8, 136 pci_bar, 137 id: 0, 138 padding: [0; 2], 139 offset: Le32::from(offset), 140 length: Le32::from(length), 141 }, 142 notify_off_multiplier: multiplier, 143 } 144 } 145 } 146 147 #[allow(dead_code)] 148 #[repr(C, packed)] 149 #[derive(Clone, Copy, Default)] 150 struct VirtioPciCap64 { 151 cap: VirtioPciCap, 152 offset_hi: Le32, 153 length_hi: Le32, 154 } 155 // SAFETY: All members are simple numbers and any value is valid. 156 unsafe impl ByteValued for VirtioPciCap64 {} 157 158 impl PciCapability for VirtioPciCap64 { 159 fn bytes(&self) -> &[u8] { 160 self.as_slice() 161 } 162 163 fn id(&self) -> PciCapabilityId { 164 PciCapabilityId::VendorSpecific 165 } 166 } 167 168 impl VirtioPciCap64 { 169 pub fn new(cfg_type: PciCapabilityType, pci_bar: u8, id: u8, offset: u64, length: u64) -> Self { 170 VirtioPciCap64 { 171 cap: VirtioPciCap { 172 cap_len: (std::mem::size_of::<VirtioPciCap64>() as u8) + VIRTIO_PCI_CAP_LEN_OFFSET, 173 cfg_type: cfg_type as u8, 174 pci_bar, 175 id, 176 padding: [0; 2], 177 offset: Le32::from(offset as u32), 178 length: Le32::from(length as u32), 179 }, 180 offset_hi: Le32::from((offset >> 32) as u32), 181 length_hi: Le32::from((length >> 32) as u32), 182 } 183 } 184 } 185 186 #[allow(dead_code)] 187 #[repr(C, packed)] 188 #[derive(Clone, Copy, Default)] 189 struct VirtioPciCfgCap { 190 cap: VirtioPciCap, 191 pci_cfg_data: [u8; 4], 192 } 193 // SAFETY: All members are simple numbers and any value is valid. 194 unsafe impl ByteValued for VirtioPciCfgCap {} 195 196 impl PciCapability for VirtioPciCfgCap { 197 fn bytes(&self) -> &[u8] { 198 self.as_slice() 199 } 200 201 fn id(&self) -> PciCapabilityId { 202 PciCapabilityId::VendorSpecific 203 } 204 } 205 206 impl VirtioPciCfgCap { 207 fn new() -> Self { 208 VirtioPciCfgCap { 209 cap: VirtioPciCap::new(PciCapabilityType::Pci, 0, 0, 0), 210 ..Default::default() 211 } 212 } 213 } 214 215 #[derive(Clone, Copy, Default)] 216 struct VirtioPciCfgCapInfo { 217 offset: usize, 218 cap: VirtioPciCfgCap, 219 } 220 221 #[allow(dead_code)] 222 #[derive(Copy, Clone)] 223 pub enum PciVirtioSubclass { 224 NonTransitionalBase = 0xff, 225 } 226 227 impl PciSubclass for PciVirtioSubclass { 228 fn get_register_value(&self) -> u8 { 229 *self as u8 230 } 231 } 232 233 // Allocate one bar for the structs pointed to by the capability structures. 234 // As per the PCI specification, because the same BAR shares MSI-X and non 235 // MSI-X structures, it is recommended to use 8KiB alignment for all those 236 // structures. 237 const COMMON_CONFIG_BAR_OFFSET: u64 = 0x0000; 238 const COMMON_CONFIG_SIZE: u64 = 56; 239 const ISR_CONFIG_BAR_OFFSET: u64 = 0x2000; 240 const ISR_CONFIG_SIZE: u64 = 1; 241 const DEVICE_CONFIG_BAR_OFFSET: u64 = 0x4000; 242 const DEVICE_CONFIG_SIZE: u64 = 0x1000; 243 const NOTIFICATION_BAR_OFFSET: u64 = 0x6000; 244 const NOTIFICATION_SIZE: u64 = 0x1000; 245 const MSIX_TABLE_BAR_OFFSET: u64 = 0x8000; 246 // The size is 256KiB because the table can hold up to 2048 entries, with each 247 // entry being 128 bits (4 DWORDS). 248 const MSIX_TABLE_SIZE: u64 = 0x40000; 249 const MSIX_PBA_BAR_OFFSET: u64 = 0x48000; 250 // The size is 2KiB because the Pending Bit Array has one bit per vector and it 251 // can support up to 2048 vectors. 252 const MSIX_PBA_SIZE: u64 = 0x800; 253 // The BAR size must be a power of 2. 254 const CAPABILITY_BAR_SIZE: u64 = 0x80000; 255 const VIRTIO_COMMON_BAR_INDEX: usize = 0; 256 const VIRTIO_SHM_BAR_INDEX: usize = 2; 257 258 const NOTIFY_OFF_MULTIPLIER: u32 = 4; // A dword per notification address. 259 260 const VIRTIO_PCI_VENDOR_ID: u16 = 0x1af4; 261 const VIRTIO_PCI_DEVICE_ID_BASE: u16 = 0x1040; // Add to device type to get device ID. 262 263 #[derive(Serialize, Deserialize)] 264 struct QueueState { 265 max_size: u16, 266 size: u16, 267 ready: bool, 268 desc_table: u64, 269 avail_ring: u64, 270 used_ring: u64, 271 } 272 273 #[derive(Serialize, Deserialize)] 274 pub struct VirtioPciDeviceState { 275 device_activated: bool, 276 queues: Vec<QueueState>, 277 interrupt_status: usize, 278 cap_pci_cfg_offset: usize, 279 cap_pci_cfg: Vec<u8>, 280 } 281 282 pub struct VirtioPciDeviceActivator { 283 interrupt: Option<Arc<dyn VirtioInterrupt>>, 284 memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>, 285 device: Arc<Mutex<dyn VirtioDevice>>, 286 device_activated: Arc<AtomicBool>, 287 queues: Option<Vec<(usize, Queue, EventFd)>>, 288 barrier: Option<Arc<Barrier>>, 289 id: String, 290 } 291 292 impl VirtioPciDeviceActivator { 293 pub fn activate(&mut self) -> ActivateResult { 294 self.device.lock().unwrap().activate( 295 self.memory.take().unwrap(), 296 self.interrupt.take().unwrap(), 297 self.queues.take().unwrap(), 298 )?; 299 self.device_activated.store(true, Ordering::SeqCst); 300 301 if let Some(barrier) = self.barrier.take() { 302 info!("{}: Waiting for barrier", self.id); 303 barrier.wait(); 304 info!("{}: Barrier released", self.id); 305 } 306 307 Ok(()) 308 } 309 } 310 311 #[derive(Error, Debug)] 312 pub enum VirtioPciDeviceError { 313 #[error("Failed creating VirtioPciDevice: {0}")] 314 CreateVirtioPciDevice(#[source] anyhow::Error), 315 } 316 pub type Result<T> = std::result::Result<T, VirtioPciDeviceError>; 317 318 pub struct VirtioPciDevice { 319 id: String, 320 321 // PCI configuration registers. 322 configuration: PciConfiguration, 323 324 // virtio PCI common configuration 325 common_config: VirtioPciCommonConfig, 326 327 // MSI-X config 328 msix_config: Option<Arc<Mutex<MsixConfig>>>, 329 330 // Number of MSI-X vectors 331 msix_num: u16, 332 333 // Virtio device reference and status 334 device: Arc<Mutex<dyn VirtioDevice>>, 335 device_activated: Arc<AtomicBool>, 336 337 // PCI interrupts. 338 interrupt_status: Arc<AtomicUsize>, 339 virtio_interrupt: Option<Arc<dyn VirtioInterrupt>>, 340 interrupt_source_group: Arc<dyn InterruptSourceGroup>, 341 342 // virtio queues 343 queues: Vec<Queue>, 344 queue_evts: Vec<EventFd>, 345 346 // Guest memory 347 memory: GuestMemoryAtomic<GuestMemoryMmap>, 348 349 // Settings PCI BAR 350 settings_bar: u8, 351 352 // Whether to use 64-bit bar location or 32-bit 353 use_64bit_bar: bool, 354 355 // Add a dedicated structure to hold information about the very specific 356 // virtio-pci capability VIRTIO_PCI_CAP_PCI_CFG. This is needed to support 357 // the legacy/backward compatible mechanism of letting the guest access the 358 // other virtio capabilities without mapping the PCI BARs. This can be 359 // needed when the guest tries to early access the virtio configuration of 360 // a device. 361 cap_pci_cfg_info: VirtioPciCfgCapInfo, 362 363 // Details of bar regions to free 364 bar_regions: Vec<PciBarConfiguration>, 365 366 // EventFd to signal on to request activation 367 activate_evt: EventFd, 368 369 // Optional DMA handler 370 dma_handler: Option<Arc<dyn ExternalDmaMapping>>, 371 372 // Pending activations 373 pending_activations: Arc<Mutex<Vec<VirtioPciDeviceActivator>>>, 374 } 375 376 impl VirtioPciDevice { 377 /// Constructs a new PCI transport for the given virtio device. 378 #[allow(clippy::too_many_arguments)] 379 pub fn new( 380 id: String, 381 memory: GuestMemoryAtomic<GuestMemoryMmap>, 382 device: Arc<Mutex<dyn VirtioDevice>>, 383 msix_num: u16, 384 access_platform: Option<Arc<dyn AccessPlatform>>, 385 interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>, 386 pci_device_bdf: u32, 387 activate_evt: EventFd, 388 use_64bit_bar: bool, 389 dma_handler: Option<Arc<dyn ExternalDmaMapping>>, 390 pending_activations: Arc<Mutex<Vec<VirtioPciDeviceActivator>>>, 391 snapshot: Option<Snapshot>, 392 ) -> Result<Self> { 393 let mut locked_device = device.lock().unwrap(); 394 let mut queue_evts = Vec::new(); 395 for _ in locked_device.queue_max_sizes().iter() { 396 queue_evts.push(EventFd::new(EFD_NONBLOCK).map_err(|e| { 397 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 398 "Failed creating eventfd: {}", 399 e 400 )) 401 })?) 402 } 403 let num_queues = locked_device.queue_max_sizes().len(); 404 405 if let Some(access_platform) = &access_platform { 406 locked_device.set_access_platform(access_platform.clone()); 407 } 408 409 let mut queues: Vec<Queue> = locked_device 410 .queue_max_sizes() 411 .iter() 412 .map(|&s| Queue::new(s).unwrap()) 413 .collect(); 414 415 let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + locked_device.device_type() as u16; 416 417 let interrupt_source_group = interrupt_manager 418 .create_group(MsiIrqGroupConfig { 419 base: 0, 420 count: msix_num as InterruptIndex, 421 }) 422 .map_err(|e| { 423 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 424 "Failed creating MSI interrupt group: {}", 425 e 426 )) 427 })?; 428 429 let msix_state = vm_migration::state_from_id(snapshot.as_ref(), pci::MSIX_CONFIG_ID) 430 .map_err(|e| { 431 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 432 "Failed to get MsixConfigState from Snapshot: {}", 433 e 434 )) 435 })?; 436 437 let (msix_config, msix_config_clone) = if msix_num > 0 { 438 let msix_config = Arc::new(Mutex::new( 439 MsixConfig::new( 440 msix_num, 441 interrupt_source_group.clone(), 442 pci_device_bdf, 443 msix_state, 444 ) 445 .unwrap(), 446 )); 447 let msix_config_clone = msix_config.clone(); 448 (Some(msix_config), Some(msix_config_clone)) 449 } else { 450 (None, None) 451 }; 452 453 let (class, subclass) = match VirtioDeviceType::from(locked_device.device_type()) { 454 VirtioDeviceType::Net => ( 455 PciClassCode::NetworkController, 456 &PciNetworkControllerSubclass::EthernetController as &dyn PciSubclass, 457 ), 458 VirtioDeviceType::Block => ( 459 PciClassCode::MassStorage, 460 &PciMassStorageSubclass::MassStorage as &dyn PciSubclass, 461 ), 462 _ => ( 463 PciClassCode::Other, 464 &PciVirtioSubclass::NonTransitionalBase as &dyn PciSubclass, 465 ), 466 }; 467 468 let pci_configuration_state = 469 vm_migration::state_from_id(snapshot.as_ref(), pci::PCI_CONFIGURATION_ID).map_err( 470 |e| { 471 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 472 "Failed to get PciConfigurationState from Snapshot: {}", 473 e 474 )) 475 }, 476 )?; 477 478 let configuration = PciConfiguration::new( 479 VIRTIO_PCI_VENDOR_ID, 480 pci_device_id, 481 0x1, // For modern virtio-PCI devices 482 class, 483 subclass, 484 None, 485 PciHeaderType::Device, 486 VIRTIO_PCI_VENDOR_ID, 487 pci_device_id, 488 msix_config_clone, 489 pci_configuration_state, 490 ); 491 492 let common_config_state = 493 vm_migration::state_from_id(snapshot.as_ref(), VIRTIO_PCI_COMMON_CONFIG_ID).map_err( 494 |e| { 495 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 496 "Failed to get VirtioPciCommonConfigState from Snapshot: {}", 497 e 498 )) 499 }, 500 )?; 501 502 let common_config = if let Some(common_config_state) = common_config_state { 503 VirtioPciCommonConfig::new(common_config_state, access_platform) 504 } else { 505 VirtioPciCommonConfig::new( 506 VirtioPciCommonConfigState { 507 driver_status: 0, 508 config_generation: 0, 509 device_feature_select: 0, 510 driver_feature_select: 0, 511 queue_select: 0, 512 msix_config: VIRTQ_MSI_NO_VECTOR, 513 msix_queues: vec![VIRTQ_MSI_NO_VECTOR; num_queues], 514 }, 515 access_platform, 516 ) 517 }; 518 519 let state: Option<VirtioPciDeviceState> = snapshot 520 .as_ref() 521 .map(|s| s.to_state()) 522 .transpose() 523 .map_err(|e| { 524 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 525 "Failed to get VirtioPciDeviceState from Snapshot: {}", 526 e 527 )) 528 })?; 529 530 let (device_activated, interrupt_status, cap_pci_cfg_info) = if let Some(state) = state { 531 // Update virtqueues indexes for both available and used rings. 532 for (i, queue) in queues.iter_mut().enumerate() { 533 queue.set_size(state.queues[i].size); 534 queue.set_ready(state.queues[i].ready); 535 queue 536 .try_set_desc_table_address(GuestAddress(state.queues[i].desc_table)) 537 .unwrap(); 538 queue 539 .try_set_avail_ring_address(GuestAddress(state.queues[i].avail_ring)) 540 .unwrap(); 541 queue 542 .try_set_used_ring_address(GuestAddress(state.queues[i].used_ring)) 543 .unwrap(); 544 queue.set_next_avail( 545 queue 546 .used_idx(memory.memory().deref(), Ordering::Acquire) 547 .unwrap() 548 .0, 549 ); 550 queue.set_next_used( 551 queue 552 .used_idx(memory.memory().deref(), Ordering::Acquire) 553 .unwrap() 554 .0, 555 ); 556 } 557 558 ( 559 state.device_activated, 560 state.interrupt_status, 561 VirtioPciCfgCapInfo { 562 offset: state.cap_pci_cfg_offset, 563 cap: *VirtioPciCfgCap::from_slice(&state.cap_pci_cfg).unwrap(), 564 }, 565 ) 566 } else { 567 (false, 0, VirtioPciCfgCapInfo::default()) 568 }; 569 570 // Dropping the MutexGuard to unlock the VirtioDevice. This is required 571 // in the context of a restore given the device might require some 572 // activation, meaning it will require locking. Dropping the lock 573 // prevents from a subtle deadlock. 574 std::mem::drop(locked_device); 575 576 let mut virtio_pci_device = VirtioPciDevice { 577 id, 578 configuration, 579 common_config, 580 msix_config, 581 msix_num, 582 device, 583 device_activated: Arc::new(AtomicBool::new(device_activated)), 584 interrupt_status: Arc::new(AtomicUsize::new(interrupt_status)), 585 virtio_interrupt: None, 586 queues, 587 queue_evts, 588 memory, 589 settings_bar: 0, 590 use_64bit_bar, 591 interrupt_source_group, 592 cap_pci_cfg_info, 593 bar_regions: vec![], 594 activate_evt, 595 dma_handler, 596 pending_activations, 597 }; 598 599 if let Some(msix_config) = &virtio_pci_device.msix_config { 600 virtio_pci_device.virtio_interrupt = Some(Arc::new(VirtioInterruptMsix::new( 601 msix_config.clone(), 602 virtio_pci_device.common_config.msix_config.clone(), 603 virtio_pci_device.common_config.msix_queues.clone(), 604 virtio_pci_device.interrupt_source_group.clone(), 605 ))); 606 } 607 608 // In case of a restore, we can activate the device, as we know at 609 // this point the virtqueues are in the right state and the device is 610 // ready to be activated, which will spawn each virtio worker thread. 611 if virtio_pci_device.device_activated.load(Ordering::SeqCst) 612 && virtio_pci_device.is_driver_ready() 613 { 614 virtio_pci_device.activate().map_err(|e| { 615 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( 616 "Failed activating the device: {}", 617 e 618 )) 619 })?; 620 } 621 622 Ok(virtio_pci_device) 623 } 624 625 fn state(&self) -> VirtioPciDeviceState { 626 VirtioPciDeviceState { 627 device_activated: self.device_activated.load(Ordering::Acquire), 628 interrupt_status: self.interrupt_status.load(Ordering::Acquire), 629 queues: self 630 .queues 631 .iter() 632 .map(|q| QueueState { 633 max_size: q.max_size(), 634 size: q.size(), 635 ready: q.ready(), 636 desc_table: q.desc_table(), 637 avail_ring: q.avail_ring(), 638 used_ring: q.used_ring(), 639 }) 640 .collect(), 641 cap_pci_cfg_offset: self.cap_pci_cfg_info.offset, 642 cap_pci_cfg: self.cap_pci_cfg_info.cap.bytes().to_vec(), 643 } 644 } 645 646 /// Gets the list of queue events that must be triggered whenever the VM writes to 647 /// `virtio::NOTIFY_REG_OFFSET` past the MMIO base. Each event must be triggered when the 648 /// value being written equals the index of the event in this list. 649 fn queue_evts(&self) -> &[EventFd] { 650 self.queue_evts.as_slice() 651 } 652 653 fn is_driver_ready(&self) -> bool { 654 let ready_bits = 655 (DEVICE_ACKNOWLEDGE | DEVICE_DRIVER | DEVICE_DRIVER_OK | DEVICE_FEATURES_OK) as u8; 656 self.common_config.driver_status == ready_bits 657 && self.common_config.driver_status & DEVICE_FAILED as u8 == 0 658 } 659 660 /// Determines if the driver has requested the device (re)init / reset itself 661 fn is_driver_init(&self) -> bool { 662 self.common_config.driver_status == DEVICE_INIT as u8 663 } 664 665 pub fn config_bar_addr(&self) -> u64 { 666 self.configuration.get_bar_addr(self.settings_bar as usize) 667 } 668 669 fn add_pci_capabilities( 670 &mut self, 671 settings_bar: u8, 672 ) -> std::result::Result<(), PciDeviceError> { 673 // Add pointers to the different configuration structures from the PCI capabilities. 674 let common_cap = VirtioPciCap::new( 675 PciCapabilityType::Common, 676 settings_bar, 677 COMMON_CONFIG_BAR_OFFSET as u32, 678 COMMON_CONFIG_SIZE as u32, 679 ); 680 self.configuration 681 .add_capability(&common_cap) 682 .map_err(PciDeviceError::CapabilitiesSetup)?; 683 684 let isr_cap = VirtioPciCap::new( 685 PciCapabilityType::Isr, 686 settings_bar, 687 ISR_CONFIG_BAR_OFFSET as u32, 688 ISR_CONFIG_SIZE as u32, 689 ); 690 self.configuration 691 .add_capability(&isr_cap) 692 .map_err(PciDeviceError::CapabilitiesSetup)?; 693 694 // TODO(dgreid) - set based on device's configuration size? 695 let device_cap = VirtioPciCap::new( 696 PciCapabilityType::Device, 697 settings_bar, 698 DEVICE_CONFIG_BAR_OFFSET as u32, 699 DEVICE_CONFIG_SIZE as u32, 700 ); 701 self.configuration 702 .add_capability(&device_cap) 703 .map_err(PciDeviceError::CapabilitiesSetup)?; 704 705 let notify_cap = VirtioPciNotifyCap::new( 706 PciCapabilityType::Notify, 707 settings_bar, 708 NOTIFICATION_BAR_OFFSET as u32, 709 NOTIFICATION_SIZE as u32, 710 Le32::from(NOTIFY_OFF_MULTIPLIER), 711 ); 712 self.configuration 713 .add_capability(¬ify_cap) 714 .map_err(PciDeviceError::CapabilitiesSetup)?; 715 716 let configuration_cap = VirtioPciCfgCap::new(); 717 self.cap_pci_cfg_info.offset = self 718 .configuration 719 .add_capability(&configuration_cap) 720 .map_err(PciDeviceError::CapabilitiesSetup)? 721 + VIRTIO_PCI_CAP_OFFSET; 722 self.cap_pci_cfg_info.cap = configuration_cap; 723 724 if self.msix_config.is_some() { 725 let msix_cap = MsixCap::new( 726 settings_bar, 727 self.msix_num, 728 MSIX_TABLE_BAR_OFFSET as u32, 729 settings_bar, 730 MSIX_PBA_BAR_OFFSET as u32, 731 ); 732 self.configuration 733 .add_capability(&msix_cap) 734 .map_err(PciDeviceError::CapabilitiesSetup)?; 735 } 736 737 self.settings_bar = settings_bar; 738 Ok(()) 739 } 740 741 fn read_cap_pci_cfg(&mut self, offset: usize, mut data: &mut [u8]) { 742 let cap_slice = self.cap_pci_cfg_info.cap.as_slice(); 743 let data_len = data.len(); 744 let cap_len = cap_slice.len(); 745 if offset + data_len > cap_len { 746 error!("Failed to read cap_pci_cfg from config space"); 747 return; 748 } 749 750 if offset < std::mem::size_of::<VirtioPciCap>() { 751 if let Some(end) = offset.checked_add(data_len) { 752 // This write can't fail, offset and end are checked against config_len. 753 data.write_all(&cap_slice[offset..cmp::min(end, cap_len)]) 754 .unwrap(); 755 } 756 } else { 757 let bar_offset: u32 = 758 // SAFETY: we know self.cap_pci_cfg_info.cap.cap.offset is 32bits long. 759 unsafe { std::mem::transmute(self.cap_pci_cfg_info.cap.cap.offset) }; 760 self.read_bar(0, bar_offset as u64, data) 761 } 762 } 763 764 fn write_cap_pci_cfg(&mut self, offset: usize, data: &[u8]) -> Option<Arc<Barrier>> { 765 let cap_slice = self.cap_pci_cfg_info.cap.as_mut_slice(); 766 let data_len = data.len(); 767 let cap_len = cap_slice.len(); 768 if offset + data_len > cap_len { 769 error!("Failed to write cap_pci_cfg to config space"); 770 return None; 771 } 772 773 if offset < std::mem::size_of::<VirtioPciCap>() { 774 let (_, right) = cap_slice.split_at_mut(offset); 775 right[..data_len].copy_from_slice(data); 776 None 777 } else { 778 let bar_offset: u32 = 779 // SAFETY: we know self.cap_pci_cfg_info.cap.cap.offset is 32bits long. 780 unsafe { std::mem::transmute(self.cap_pci_cfg_info.cap.cap.offset) }; 781 self.write_bar(0, bar_offset as u64, data) 782 } 783 } 784 785 pub fn virtio_device(&self) -> Arc<Mutex<dyn VirtioDevice>> { 786 self.device.clone() 787 } 788 789 fn prepare_activator(&mut self, barrier: Option<Arc<Barrier>>) -> VirtioPciDeviceActivator { 790 let mut queues = Vec::new(); 791 792 for (queue_index, queue) in self.queues.iter().enumerate() { 793 if !queue.ready() { 794 continue; 795 } 796 797 if !queue.is_valid(self.memory.memory().deref()) { 798 error!("Queue {} is not valid", queue_index); 799 } 800 801 queues.push(( 802 queue_index, 803 vm_virtio::clone_queue(queue), 804 self.queue_evts[queue_index].try_clone().unwrap(), 805 )); 806 } 807 808 VirtioPciDeviceActivator { 809 interrupt: self.virtio_interrupt.take(), 810 memory: Some(self.memory.clone()), 811 device: self.device.clone(), 812 queues: Some(queues), 813 device_activated: self.device_activated.clone(), 814 barrier, 815 id: self.id.clone(), 816 } 817 } 818 819 fn activate(&mut self) -> ActivateResult { 820 self.prepare_activator(None).activate() 821 } 822 823 fn needs_activation(&self) -> bool { 824 !self.device_activated.load(Ordering::SeqCst) && self.is_driver_ready() 825 } 826 827 pub fn dma_handler(&self) -> Option<&Arc<dyn ExternalDmaMapping>> { 828 self.dma_handler.as_ref() 829 } 830 } 831 832 impl VirtioTransport for VirtioPciDevice { 833 fn ioeventfds(&self, base_addr: u64) -> impl Iterator<Item = (&EventFd, u64)> { 834 let notify_base = base_addr + NOTIFICATION_BAR_OFFSET; 835 self.queue_evts().iter().enumerate().map(move |(i, event)| { 836 ( 837 event, 838 notify_base + i as u64 * u64::from(NOTIFY_OFF_MULTIPLIER), 839 ) 840 }) 841 } 842 } 843 844 pub struct VirtioInterruptMsix { 845 msix_config: Arc<Mutex<MsixConfig>>, 846 config_vector: Arc<AtomicU16>, 847 queues_vectors: Arc<Mutex<Vec<u16>>>, 848 interrupt_source_group: Arc<dyn InterruptSourceGroup>, 849 } 850 851 impl VirtioInterruptMsix { 852 pub fn new( 853 msix_config: Arc<Mutex<MsixConfig>>, 854 config_vector: Arc<AtomicU16>, 855 queues_vectors: Arc<Mutex<Vec<u16>>>, 856 interrupt_source_group: Arc<dyn InterruptSourceGroup>, 857 ) -> Self { 858 VirtioInterruptMsix { 859 msix_config, 860 config_vector, 861 queues_vectors, 862 interrupt_source_group, 863 } 864 } 865 } 866 867 impl VirtioInterrupt for VirtioInterruptMsix { 868 fn trigger(&self, int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> { 869 let vector = match int_type { 870 VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire), 871 VirtioInterruptType::Queue(queue_index) => { 872 self.queues_vectors.lock().unwrap()[queue_index as usize] 873 } 874 }; 875 876 if vector == VIRTQ_MSI_NO_VECTOR { 877 return Ok(()); 878 } 879 880 let config = &mut self.msix_config.lock().unwrap(); 881 let entry = &config.table_entries[vector as usize]; 882 // In case the vector control register associated with the entry 883 // has its first bit set, this means the vector is masked and the 884 // device should not inject the interrupt. 885 // Instead, the Pending Bit Array table is updated to reflect there 886 // is a pending interrupt for this specific vector. 887 if config.masked() || entry.masked() { 888 config.set_pba_bit(vector, false); 889 return Ok(()); 890 } 891 892 self.interrupt_source_group 893 .trigger(vector as InterruptIndex) 894 } 895 896 fn notifier(&self, int_type: VirtioInterruptType) -> Option<EventFd> { 897 let vector = match int_type { 898 VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire), 899 VirtioInterruptType::Queue(queue_index) => { 900 self.queues_vectors.lock().unwrap()[queue_index as usize] 901 } 902 }; 903 904 self.interrupt_source_group 905 .notifier(vector as InterruptIndex) 906 } 907 } 908 909 impl PciDevice for VirtioPciDevice { 910 fn write_config_register( 911 &mut self, 912 reg_idx: usize, 913 offset: u64, 914 data: &[u8], 915 ) -> (Vec<BarReprogrammingParams>, Option<Arc<Barrier>>) { 916 // Handle the special case where the capability VIRTIO_PCI_CAP_PCI_CFG 917 // is accessed. This capability has a special meaning as it allows the 918 // guest to access other capabilities without mapping the PCI BAR. 919 let base = reg_idx * 4; 920 if base + offset as usize >= self.cap_pci_cfg_info.offset 921 && base + offset as usize + data.len() 922 <= self.cap_pci_cfg_info.offset + self.cap_pci_cfg_info.cap.bytes().len() 923 { 924 let offset = base + offset as usize - self.cap_pci_cfg_info.offset; 925 (Vec::new(), self.write_cap_pci_cfg(offset, data)) 926 } else { 927 ( 928 self.configuration 929 .write_config_register(reg_idx, offset, data), 930 None, 931 ) 932 } 933 } 934 935 fn read_config_register(&mut self, reg_idx: usize) -> u32 { 936 // Handle the special case where the capability VIRTIO_PCI_CAP_PCI_CFG 937 // is accessed. This capability has a special meaning as it allows the 938 // guest to access other capabilities without mapping the PCI BAR. 939 let base = reg_idx * 4; 940 if base >= self.cap_pci_cfg_info.offset 941 && base + 4 <= self.cap_pci_cfg_info.offset + self.cap_pci_cfg_info.cap.bytes().len() 942 { 943 let offset = base - self.cap_pci_cfg_info.offset; 944 let mut data = [0u8; 4]; 945 self.read_cap_pci_cfg(offset, &mut data); 946 u32::from_le_bytes(data) 947 } else { 948 self.configuration.read_reg(reg_idx) 949 } 950 } 951 952 fn allocate_bars( 953 &mut self, 954 _allocator: &Arc<Mutex<SystemAllocator>>, 955 mmio32_allocator: &mut AddressAllocator, 956 mmio64_allocator: &mut AddressAllocator, 957 resources: Option<Vec<Resource>>, 958 ) -> std::result::Result<Vec<PciBarConfiguration>, PciDeviceError> { 959 let mut bars = Vec::new(); 960 let device_clone = self.device.clone(); 961 let device = device_clone.lock().unwrap(); 962 963 let mut settings_bar_addr = None; 964 let mut use_64bit_bar = self.use_64bit_bar; 965 let restoring = resources.is_some(); 966 if let Some(resources) = resources { 967 for resource in resources { 968 if let Resource::PciBar { 969 index, base, type_, .. 970 } = resource 971 { 972 if index == VIRTIO_COMMON_BAR_INDEX { 973 settings_bar_addr = Some(GuestAddress(base)); 974 use_64bit_bar = match type_ { 975 PciBarType::Io => { 976 return Err(PciDeviceError::InvalidResource(resource)) 977 } 978 PciBarType::Mmio32 => false, 979 PciBarType::Mmio64 => true, 980 }; 981 break; 982 } 983 } 984 } 985 // Error out if no resource was matching the BAR id. 986 if settings_bar_addr.is_none() { 987 return Err(PciDeviceError::MissingResource); 988 } 989 } 990 991 // Allocate the virtio-pci capability BAR. 992 // See http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-740004 993 let (virtio_pci_bar_addr, region_type) = if use_64bit_bar { 994 let region_type = PciBarRegionType::Memory64BitRegion; 995 let addr = mmio64_allocator 996 .allocate( 997 settings_bar_addr, 998 CAPABILITY_BAR_SIZE, 999 Some(CAPABILITY_BAR_SIZE), 1000 ) 1001 .ok_or(PciDeviceError::IoAllocationFailed(CAPABILITY_BAR_SIZE))?; 1002 (addr, region_type) 1003 } else { 1004 let region_type = PciBarRegionType::Memory32BitRegion; 1005 let addr = mmio32_allocator 1006 .allocate( 1007 settings_bar_addr, 1008 CAPABILITY_BAR_SIZE, 1009 Some(CAPABILITY_BAR_SIZE), 1010 ) 1011 .ok_or(PciDeviceError::IoAllocationFailed(CAPABILITY_BAR_SIZE))?; 1012 (addr, region_type) 1013 }; 1014 1015 let bar = PciBarConfiguration::default() 1016 .set_index(VIRTIO_COMMON_BAR_INDEX) 1017 .set_address(virtio_pci_bar_addr.raw_value()) 1018 .set_size(CAPABILITY_BAR_SIZE) 1019 .set_region_type(region_type); 1020 1021 // The creation of the PCI BAR and its associated capabilities must 1022 // happen only during the creation of a brand new VM. When a VM is 1023 // restored from a known state, the BARs are already created with the 1024 // right content, therefore we don't need to go through this codepath. 1025 if !restoring { 1026 self.configuration.add_pci_bar(&bar).map_err(|e| { 1027 PciDeviceError::IoRegistrationFailed(virtio_pci_bar_addr.raw_value(), e) 1028 })?; 1029 1030 // Once the BARs are allocated, the capabilities can be added to the PCI configuration. 1031 self.add_pci_capabilities(VIRTIO_COMMON_BAR_INDEX as u8)?; 1032 } 1033 1034 bars.push(bar); 1035 1036 // Allocate a dedicated BAR if there are some shared memory regions. 1037 if let Some(shm_list) = device.get_shm_regions() { 1038 let bar = PciBarConfiguration::default() 1039 .set_index(VIRTIO_SHM_BAR_INDEX) 1040 .set_address(shm_list.addr.raw_value()) 1041 .set_size(shm_list.len); 1042 1043 // The creation of the PCI BAR and its associated capabilities must 1044 // happen only during the creation of a brand new VM. When a VM is 1045 // restored from a known state, the BARs are already created with the 1046 // right content, therefore we don't need to go through this codepath. 1047 if !restoring { 1048 self.configuration.add_pci_bar(&bar).map_err(|e| { 1049 PciDeviceError::IoRegistrationFailed(shm_list.addr.raw_value(), e) 1050 })?; 1051 1052 for (idx, shm) in shm_list.region_list.iter().enumerate() { 1053 let shm_cap = VirtioPciCap64::new( 1054 PciCapabilityType::SharedMemory, 1055 VIRTIO_SHM_BAR_INDEX as u8, 1056 idx as u8, 1057 shm.offset, 1058 shm.len, 1059 ); 1060 self.configuration 1061 .add_capability(&shm_cap) 1062 .map_err(PciDeviceError::CapabilitiesSetup)?; 1063 } 1064 } 1065 1066 bars.push(bar); 1067 } 1068 1069 self.bar_regions.clone_from(&bars); 1070 1071 Ok(bars) 1072 } 1073 1074 fn free_bars( 1075 &mut self, 1076 _allocator: &mut SystemAllocator, 1077 mmio32_allocator: &mut AddressAllocator, 1078 mmio64_allocator: &mut AddressAllocator, 1079 ) -> std::result::Result<(), PciDeviceError> { 1080 for bar in self.bar_regions.drain(..) { 1081 match bar.region_type() { 1082 PciBarRegionType::Memory32BitRegion => { 1083 mmio32_allocator.free(GuestAddress(bar.addr()), bar.size()); 1084 } 1085 PciBarRegionType::Memory64BitRegion => { 1086 mmio64_allocator.free(GuestAddress(bar.addr()), bar.size()); 1087 } 1088 _ => error!("Unexpected PCI bar type"), 1089 } 1090 } 1091 Ok(()) 1092 } 1093 1094 fn move_bar( 1095 &mut self, 1096 old_base: u64, 1097 new_base: u64, 1098 ) -> std::result::Result<(), std::io::Error> { 1099 // We only update our idea of the bar in order to support free_bars() above. 1100 // The majority of the reallocation is done inside DeviceManager. 1101 for bar in self.bar_regions.iter_mut() { 1102 if bar.addr() == old_base { 1103 *bar = bar.set_address(new_base); 1104 } 1105 } 1106 1107 Ok(()) 1108 } 1109 1110 fn read_bar(&mut self, _base: u64, offset: u64, data: &mut [u8]) { 1111 match offset { 1112 o if o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => self.common_config.read( 1113 o - COMMON_CONFIG_BAR_OFFSET, 1114 data, 1115 &self.queues, 1116 self.device.clone(), 1117 ), 1118 o if (ISR_CONFIG_BAR_OFFSET..ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE).contains(&o) => { 1119 if let Some(v) = data.get_mut(0) { 1120 // Reading this register resets it to 0. 1121 *v = self.interrupt_status.swap(0, Ordering::AcqRel) as u8; 1122 } 1123 } 1124 o if (DEVICE_CONFIG_BAR_OFFSET..DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE) 1125 .contains(&o) => 1126 { 1127 let device = self.device.lock().unwrap(); 1128 device.read_config(o - DEVICE_CONFIG_BAR_OFFSET, data); 1129 } 1130 o if (NOTIFICATION_BAR_OFFSET..NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE) 1131 .contains(&o) => 1132 { 1133 // Handled with ioeventfds. 1134 } 1135 o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => { 1136 if let Some(msix_config) = &self.msix_config { 1137 msix_config 1138 .lock() 1139 .unwrap() 1140 .read_table(o - MSIX_TABLE_BAR_OFFSET, data); 1141 } 1142 } 1143 o if (MSIX_PBA_BAR_OFFSET..MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE).contains(&o) => { 1144 if let Some(msix_config) = &self.msix_config { 1145 msix_config 1146 .lock() 1147 .unwrap() 1148 .read_pba(o - MSIX_PBA_BAR_OFFSET, data); 1149 } 1150 } 1151 _ => (), 1152 } 1153 } 1154 1155 fn write_bar(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> { 1156 match offset { 1157 o if o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => self.common_config.write( 1158 o - COMMON_CONFIG_BAR_OFFSET, 1159 data, 1160 &mut self.queues, 1161 self.device.clone(), 1162 ), 1163 o if (ISR_CONFIG_BAR_OFFSET..ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE).contains(&o) => { 1164 if let Some(v) = data.first() { 1165 self.interrupt_status 1166 .fetch_and(!(*v as usize), Ordering::AcqRel); 1167 } 1168 } 1169 o if (DEVICE_CONFIG_BAR_OFFSET..DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE) 1170 .contains(&o) => 1171 { 1172 let mut device = self.device.lock().unwrap(); 1173 device.write_config(o - DEVICE_CONFIG_BAR_OFFSET, data); 1174 } 1175 o if (NOTIFICATION_BAR_OFFSET..NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE) 1176 .contains(&o) => 1177 { 1178 #[cfg(feature = "sev_snp")] 1179 for (event, addr) in self.ioeventfds(_base) { 1180 if addr == _base + offset { 1181 event.write(1).unwrap(); 1182 } 1183 } 1184 // Handled with ioeventfds. 1185 #[cfg(not(feature = "sev_snp"))] 1186 error!("Unexpected write to notification BAR: offset = 0x{:x}", o); 1187 } 1188 o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => { 1189 if let Some(msix_config) = &self.msix_config { 1190 msix_config 1191 .lock() 1192 .unwrap() 1193 .write_table(o - MSIX_TABLE_BAR_OFFSET, data); 1194 } 1195 } 1196 o if (MSIX_PBA_BAR_OFFSET..MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE).contains(&o) => { 1197 if let Some(msix_config) = &self.msix_config { 1198 msix_config 1199 .lock() 1200 .unwrap() 1201 .write_pba(o - MSIX_PBA_BAR_OFFSET, data); 1202 } 1203 } 1204 _ => (), 1205 }; 1206 1207 // Try and activate the device if the driver status has changed 1208 if self.needs_activation() { 1209 let barrier = Arc::new(Barrier::new(2)); 1210 let activator = self.prepare_activator(Some(barrier.clone())); 1211 self.pending_activations.lock().unwrap().push(activator); 1212 info!( 1213 "{}: Needs activation; writing to activate event fd", 1214 self.id 1215 ); 1216 self.activate_evt.write(1).ok(); 1217 info!("{}: Needs activation; returning barrier", self.id); 1218 return Some(barrier); 1219 } 1220 1221 // Device has been reset by the driver 1222 if self.device_activated.load(Ordering::SeqCst) && self.is_driver_init() { 1223 let mut device = self.device.lock().unwrap(); 1224 if let Some(virtio_interrupt) = device.reset() { 1225 // Upon reset the device returns its interrupt EventFD 1226 self.virtio_interrupt = Some(virtio_interrupt); 1227 self.device_activated.store(false, Ordering::SeqCst); 1228 1229 // Reset queue readiness (changes queue_enable), queue sizes 1230 // and selected_queue as per spec for reset 1231 self.queues.iter_mut().for_each(Queue::reset); 1232 self.common_config.queue_select = 0; 1233 } else { 1234 error!("Attempt to reset device when not implemented in underlying device"); 1235 self.common_config.driver_status = crate::DEVICE_FAILED as u8; 1236 } 1237 } 1238 1239 None 1240 } 1241 1242 fn as_any_mut(&mut self) -> &mut dyn Any { 1243 self 1244 } 1245 1246 fn id(&self) -> Option<String> { 1247 Some(self.id.clone()) 1248 } 1249 } 1250 1251 impl BusDevice for VirtioPciDevice { 1252 fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) { 1253 self.read_bar(base, offset, data) 1254 } 1255 1256 fn write(&mut self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> { 1257 self.write_bar(base, offset, data) 1258 } 1259 } 1260 1261 impl Pausable for VirtioPciDevice { 1262 fn pause(&mut self) -> std::result::Result<(), MigratableError> { 1263 Ok(()) 1264 } 1265 1266 fn resume(&mut self) -> std::result::Result<(), MigratableError> { 1267 Ok(()) 1268 } 1269 } 1270 1271 impl Snapshottable for VirtioPciDevice { 1272 fn id(&self) -> String { 1273 self.id.clone() 1274 } 1275 1276 fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> { 1277 let mut virtio_pci_dev_snapshot = Snapshot::new_from_state(&self.state())?; 1278 1279 // Snapshot PciConfiguration 1280 virtio_pci_dev_snapshot 1281 .add_snapshot(self.configuration.id(), self.configuration.snapshot()?); 1282 1283 // Snapshot VirtioPciCommonConfig 1284 virtio_pci_dev_snapshot 1285 .add_snapshot(self.common_config.id(), self.common_config.snapshot()?); 1286 1287 // Snapshot MSI-X 1288 if let Some(msix_config) = &self.msix_config { 1289 let mut msix_config = msix_config.lock().unwrap(); 1290 virtio_pci_dev_snapshot.add_snapshot(msix_config.id(), msix_config.snapshot()?); 1291 } 1292 1293 Ok(virtio_pci_dev_snapshot) 1294 } 1295 } 1296 impl Transportable for VirtioPciDevice {} 1297 impl Migratable for VirtioPciDevice {} 1298