xref: /cloud-hypervisor/virtio-devices/src/transport/pci_device.rs (revision 3ce0fef7fd546467398c914dbc74d8542e45cf6f)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE-BSD-3-Clause file.
4 //
5 // Copyright © 2019 Intel Corporation
6 //
7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
8 
9 use crate::transport::{VirtioPciCommonConfig, VirtioTransport, VIRTIO_PCI_COMMON_CONFIG_ID};
10 use crate::GuestMemoryMmap;
11 use crate::{
12     ActivateResult, VirtioDevice, VirtioDeviceType, VirtioInterrupt, VirtioInterruptType,
13     DEVICE_ACKNOWLEDGE, DEVICE_DRIVER, DEVICE_DRIVER_OK, DEVICE_FAILED, DEVICE_FEATURES_OK,
14     DEVICE_INIT,
15 };
16 use anyhow::anyhow;
17 use libc::EFD_NONBLOCK;
18 use pci::{
19     BarReprogrammingParams, MsixCap, MsixConfig, PciBarConfiguration, PciBarRegionType,
20     PciCapability, PciCapabilityId, PciClassCode, PciConfiguration, PciDevice, PciDeviceError,
21     PciHeaderType, PciMassStorageSubclass, PciNetworkControllerSubclass, PciSubclass,
22 };
23 use std::any::Any;
24 use std::cmp;
25 use std::io::Write;
26 use std::ops::Deref;
27 use std::sync::atomic::{AtomicBool, AtomicU16, AtomicUsize, Ordering};
28 use std::sync::{Arc, Barrier, Mutex};
29 use thiserror::Error;
30 use versionize::{VersionMap, Versionize, VersionizeResult};
31 use versionize_derive::Versionize;
32 use virtio_queue::{Queue, QueueT};
33 use vm_allocator::{AddressAllocator, SystemAllocator};
34 use vm_device::dma_mapping::ExternalDmaMapping;
35 use vm_device::interrupt::{
36     InterruptIndex, InterruptManager, InterruptSourceGroup, MsiIrqGroupConfig,
37 };
38 use vm_device::{BusDevice, PciBarType, Resource};
39 use vm_memory::{Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, Le32};
40 use vm_migration::{
41     Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
42 };
43 use vm_virtio::AccessPlatform;
44 use vmm_sys_util::eventfd::EventFd;
45 
46 use super::pci_common_config::VirtioPciCommonConfigState;
47 
48 /// Vector value used to disable MSI for a queue.
49 const VIRTQ_MSI_NO_VECTOR: u16 = 0xffff;
50 
51 enum PciCapabilityType {
52     Common = 1,
53     Notify = 2,
54     Isr = 3,
55     Device = 4,
56     Pci = 5,
57     SharedMemory = 8,
58 }
59 
60 // This offset represents the 2 bytes omitted from the VirtioPciCap structure
61 // as they are already handled through add_capability(). These 2 bytes are the
62 // fields cap_vndr (1 byte) and cap_next (1 byte) defined in the virtio spec.
63 const VIRTIO_PCI_CAP_OFFSET: usize = 2;
64 
65 #[allow(dead_code)]
66 #[repr(packed)]
67 #[derive(Clone, Copy, Default)]
68 struct VirtioPciCap {
69     cap_len: u8,      // Generic PCI field: capability length
70     cfg_type: u8,     // Identifies the structure.
71     pci_bar: u8,      // Where to find it.
72     id: u8,           // Multiple capabilities of the same type
73     padding: [u8; 2], // Pad to full dword.
74     offset: Le32,     // Offset within bar.
75     length: Le32,     // Length of the structure, in bytes.
76 }
77 // SAFETY: All members are simple numbers and any value is valid.
78 unsafe impl ByteValued for VirtioPciCap {}
79 
80 impl PciCapability for VirtioPciCap {
81     fn bytes(&self) -> &[u8] {
82         self.as_slice()
83     }
84 
85     fn id(&self) -> PciCapabilityId {
86         PciCapabilityId::VendorSpecific
87     }
88 }
89 
90 const VIRTIO_PCI_CAP_LEN_OFFSET: u8 = 2;
91 
92 impl VirtioPciCap {
93     pub fn new(cfg_type: PciCapabilityType, pci_bar: u8, offset: u32, length: u32) -> Self {
94         VirtioPciCap {
95             cap_len: (std::mem::size_of::<VirtioPciCap>() as u8) + VIRTIO_PCI_CAP_LEN_OFFSET,
96             cfg_type: cfg_type as u8,
97             pci_bar,
98             id: 0,
99             padding: [0; 2],
100             offset: Le32::from(offset),
101             length: Le32::from(length),
102         }
103     }
104 }
105 
106 #[allow(dead_code)]
107 #[repr(packed)]
108 #[derive(Clone, Copy, Default)]
109 struct VirtioPciNotifyCap {
110     cap: VirtioPciCap,
111     notify_off_multiplier: Le32,
112 }
113 // SAFETY: All members are simple numbers and any value is valid.
114 unsafe impl ByteValued for VirtioPciNotifyCap {}
115 
116 impl PciCapability for VirtioPciNotifyCap {
117     fn bytes(&self) -> &[u8] {
118         self.as_slice()
119     }
120 
121     fn id(&self) -> PciCapabilityId {
122         PciCapabilityId::VendorSpecific
123     }
124 }
125 
126 impl VirtioPciNotifyCap {
127     pub fn new(
128         cfg_type: PciCapabilityType,
129         pci_bar: u8,
130         offset: u32,
131         length: u32,
132         multiplier: Le32,
133     ) -> Self {
134         VirtioPciNotifyCap {
135             cap: VirtioPciCap {
136                 cap_len: (std::mem::size_of::<VirtioPciNotifyCap>() as u8)
137                     + VIRTIO_PCI_CAP_LEN_OFFSET,
138                 cfg_type: cfg_type as u8,
139                 pci_bar,
140                 id: 0,
141                 padding: [0; 2],
142                 offset: Le32::from(offset),
143                 length: Le32::from(length),
144             },
145             notify_off_multiplier: multiplier,
146         }
147     }
148 }
149 
150 #[allow(dead_code)]
151 #[repr(packed)]
152 #[derive(Clone, Copy, Default)]
153 struct VirtioPciCap64 {
154     cap: VirtioPciCap,
155     offset_hi: Le32,
156     length_hi: Le32,
157 }
158 // SAFETY: All members are simple numbers and any value is valid.
159 unsafe impl ByteValued for VirtioPciCap64 {}
160 
161 impl PciCapability for VirtioPciCap64 {
162     fn bytes(&self) -> &[u8] {
163         self.as_slice()
164     }
165 
166     fn id(&self) -> PciCapabilityId {
167         PciCapabilityId::VendorSpecific
168     }
169 }
170 
171 impl VirtioPciCap64 {
172     pub fn new(cfg_type: PciCapabilityType, pci_bar: u8, id: u8, offset: u64, length: u64) -> Self {
173         VirtioPciCap64 {
174             cap: VirtioPciCap {
175                 cap_len: (std::mem::size_of::<VirtioPciCap64>() as u8) + VIRTIO_PCI_CAP_LEN_OFFSET,
176                 cfg_type: cfg_type as u8,
177                 pci_bar,
178                 id,
179                 padding: [0; 2],
180                 offset: Le32::from(offset as u32),
181                 length: Le32::from(length as u32),
182             },
183             offset_hi: Le32::from((offset >> 32) as u32),
184             length_hi: Le32::from((length >> 32) as u32),
185         }
186     }
187 }
188 
189 #[allow(dead_code)]
190 #[repr(packed)]
191 #[derive(Clone, Copy, Default)]
192 struct VirtioPciCfgCap {
193     cap: VirtioPciCap,
194     pci_cfg_data: [u8; 4],
195 }
196 // SAFETY: All members are simple numbers and any value is valid.
197 unsafe impl ByteValued for VirtioPciCfgCap {}
198 
199 impl PciCapability for VirtioPciCfgCap {
200     fn bytes(&self) -> &[u8] {
201         self.as_slice()
202     }
203 
204     fn id(&self) -> PciCapabilityId {
205         PciCapabilityId::VendorSpecific
206     }
207 }
208 
209 impl VirtioPciCfgCap {
210     fn new() -> Self {
211         VirtioPciCfgCap {
212             cap: VirtioPciCap::new(PciCapabilityType::Pci, 0, 0, 0),
213             ..Default::default()
214         }
215     }
216 }
217 
218 #[derive(Clone, Copy, Default)]
219 struct VirtioPciCfgCapInfo {
220     offset: usize,
221     cap: VirtioPciCfgCap,
222 }
223 
224 #[allow(dead_code)]
225 #[derive(Copy, Clone)]
226 pub enum PciVirtioSubclass {
227     NonTransitionalBase = 0xff,
228 }
229 
230 impl PciSubclass for PciVirtioSubclass {
231     fn get_register_value(&self) -> u8 {
232         *self as u8
233     }
234 }
235 
236 // Allocate one bar for the structs pointed to by the capability structures.
237 // As per the PCI specification, because the same BAR shares MSI-X and non
238 // MSI-X structures, it is recommended to use 8KiB alignment for all those
239 // structures.
240 const COMMON_CONFIG_BAR_OFFSET: u64 = 0x0000;
241 const COMMON_CONFIG_SIZE: u64 = 56;
242 const ISR_CONFIG_BAR_OFFSET: u64 = 0x2000;
243 const ISR_CONFIG_SIZE: u64 = 1;
244 const DEVICE_CONFIG_BAR_OFFSET: u64 = 0x4000;
245 const DEVICE_CONFIG_SIZE: u64 = 0x1000;
246 const NOTIFICATION_BAR_OFFSET: u64 = 0x6000;
247 const NOTIFICATION_SIZE: u64 = 0x1000;
248 const MSIX_TABLE_BAR_OFFSET: u64 = 0x8000;
249 // The size is 256KiB because the table can hold up to 2048 entries, with each
250 // entry being 128 bits (4 DWORDS).
251 const MSIX_TABLE_SIZE: u64 = 0x40000;
252 const MSIX_PBA_BAR_OFFSET: u64 = 0x48000;
253 // The size is 2KiB because the Pending Bit Array has one bit per vector and it
254 // can support up to 2048 vectors.
255 const MSIX_PBA_SIZE: u64 = 0x800;
256 // The BAR size must be a power of 2.
257 const CAPABILITY_BAR_SIZE: u64 = 0x80000;
258 const VIRTIO_COMMON_BAR_INDEX: usize = 0;
259 const VIRTIO_SHM_BAR_INDEX: usize = 2;
260 
261 const NOTIFY_OFF_MULTIPLIER: u32 = 4; // A dword per notification address.
262 
263 const VIRTIO_PCI_VENDOR_ID: u16 = 0x1af4;
264 const VIRTIO_PCI_DEVICE_ID_BASE: u16 = 0x1040; // Add to device type to get device ID.
265 
266 #[derive(Versionize)]
267 struct QueueState {
268     max_size: u16,
269     size: u16,
270     ready: bool,
271     desc_table: u64,
272     avail_ring: u64,
273     used_ring: u64,
274 }
275 
276 #[derive(Versionize)]
277 pub struct VirtioPciDeviceState {
278     device_activated: bool,
279     queues: Vec<QueueState>,
280     interrupt_status: usize,
281 }
282 
283 impl VersionMapped for VirtioPciDeviceState {}
284 
285 pub struct VirtioPciDeviceActivator {
286     interrupt: Option<Arc<dyn VirtioInterrupt>>,
287     memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
288     device: Arc<Mutex<dyn VirtioDevice>>,
289     device_activated: Arc<AtomicBool>,
290     queues: Option<Vec<(usize, Queue, EventFd)>>,
291     barrier: Option<Arc<Barrier>>,
292     id: String,
293 }
294 
295 impl VirtioPciDeviceActivator {
296     pub fn activate(&mut self) -> ActivateResult {
297         self.device.lock().unwrap().activate(
298             self.memory.take().unwrap(),
299             self.interrupt.take().unwrap(),
300             self.queues.take().unwrap(),
301         )?;
302         self.device_activated.store(true, Ordering::SeqCst);
303 
304         if let Some(barrier) = self.barrier.take() {
305             info!("{}: Waiting for barrier", self.id);
306             barrier.wait();
307             info!("{}: Barrier released", self.id);
308         }
309 
310         Ok(())
311     }
312 }
313 
314 #[derive(Error, Debug)]
315 pub enum VirtioPciDeviceError {
316     #[error("Failed creating VirtioPciDevice: {0}")]
317     CreateVirtioPciDevice(#[source] anyhow::Error),
318 }
319 pub type Result<T> = std::result::Result<T, VirtioPciDeviceError>;
320 
321 pub struct VirtioPciDevice {
322     id: String,
323 
324     // PCI configuration registers.
325     configuration: PciConfiguration,
326 
327     // virtio PCI common configuration
328     common_config: VirtioPciCommonConfig,
329 
330     // MSI-X config
331     msix_config: Option<Arc<Mutex<MsixConfig>>>,
332 
333     // Number of MSI-X vectors
334     msix_num: u16,
335 
336     // Virtio device reference and status
337     device: Arc<Mutex<dyn VirtioDevice>>,
338     device_activated: Arc<AtomicBool>,
339 
340     // PCI interrupts.
341     interrupt_status: Arc<AtomicUsize>,
342     virtio_interrupt: Option<Arc<dyn VirtioInterrupt>>,
343     interrupt_source_group: Arc<dyn InterruptSourceGroup>,
344 
345     // virtio queues
346     queues: Vec<Queue>,
347     queue_evts: Vec<EventFd>,
348 
349     // Guest memory
350     memory: GuestMemoryAtomic<GuestMemoryMmap>,
351 
352     // Settings PCI BAR
353     settings_bar: u8,
354 
355     // Whether to use 64-bit bar location or 32-bit
356     use_64bit_bar: bool,
357 
358     // Add a dedicated structure to hold information about the very specific
359     // virtio-pci capability VIRTIO_PCI_CAP_PCI_CFG. This is needed to support
360     // the legacy/backward compatible mechanism of letting the guest access the
361     // other virtio capabilities without mapping the PCI BARs. This can be
362     // needed when the guest tries to early access the virtio configuration of
363     // a device.
364     cap_pci_cfg_info: VirtioPciCfgCapInfo,
365 
366     // Details of bar regions to free
367     bar_regions: Vec<PciBarConfiguration>,
368 
369     // EventFd to signal on to request activation
370     activate_evt: EventFd,
371 
372     // Optional DMA handler
373     dma_handler: Option<Arc<dyn ExternalDmaMapping>>,
374 
375     // Pending activations
376     pending_activations: Arc<Mutex<Vec<VirtioPciDeviceActivator>>>,
377 }
378 
379 impl VirtioPciDevice {
380     /// Constructs a new PCI transport for the given virtio device.
381     #[allow(clippy::too_many_arguments)]
382     pub fn new(
383         id: String,
384         memory: GuestMemoryAtomic<GuestMemoryMmap>,
385         device: Arc<Mutex<dyn VirtioDevice>>,
386         msix_num: u16,
387         access_platform: Option<Arc<dyn AccessPlatform>>,
388         interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
389         pci_device_bdf: u32,
390         activate_evt: EventFd,
391         use_64bit_bar: bool,
392         dma_handler: Option<Arc<dyn ExternalDmaMapping>>,
393         pending_activations: Arc<Mutex<Vec<VirtioPciDeviceActivator>>>,
394         snapshot: Option<Snapshot>,
395     ) -> Result<Self> {
396         let mut locked_device = device.lock().unwrap();
397         let mut queue_evts = Vec::new();
398         for _ in locked_device.queue_max_sizes().iter() {
399             queue_evts.push(EventFd::new(EFD_NONBLOCK).map_err(|e| {
400                 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
401                     "Failed creating eventfd: {}",
402                     e
403                 ))
404             })?)
405         }
406         let num_queues = locked_device.queue_max_sizes().len();
407 
408         if let Some(access_platform) = &access_platform {
409             locked_device.set_access_platform(access_platform.clone());
410         }
411 
412         let mut queues: Vec<Queue> = locked_device
413             .queue_max_sizes()
414             .iter()
415             .map(|&s| Queue::new(s).unwrap())
416             .collect();
417 
418         let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + locked_device.device_type() as u16;
419 
420         let interrupt_source_group = interrupt_manager
421             .create_group(MsiIrqGroupConfig {
422                 base: 0,
423                 count: msix_num as InterruptIndex,
424             })
425             .map_err(|e| {
426                 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
427                     "Failed creating MSI interrupt group: {}",
428                     e
429                 ))
430             })?;
431 
432         let msix_state =
433             vm_migration::versioned_state_from_id(snapshot.as_ref(), pci::MSIX_CONFIG_ID).map_err(
434                 |e| {
435                     VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
436                         "Failed to get MsixConfigState from Snapshot: {}",
437                         e
438                     ))
439                 },
440             )?;
441 
442         let (msix_config, msix_config_clone) = if msix_num > 0 {
443             let msix_config = Arc::new(Mutex::new(
444                 MsixConfig::new(
445                     msix_num,
446                     interrupt_source_group.clone(),
447                     pci_device_bdf,
448                     msix_state,
449                 )
450                 .unwrap(),
451             ));
452             let msix_config_clone = msix_config.clone();
453             (Some(msix_config), Some(msix_config_clone))
454         } else {
455             (None, None)
456         };
457 
458         let (class, subclass) = match VirtioDeviceType::from(locked_device.device_type()) {
459             VirtioDeviceType::Net => (
460                 PciClassCode::NetworkController,
461                 &PciNetworkControllerSubclass::EthernetController as &dyn PciSubclass,
462             ),
463             VirtioDeviceType::Block => (
464                 PciClassCode::MassStorage,
465                 &PciMassStorageSubclass::MassStorage as &dyn PciSubclass,
466             ),
467             _ => (
468                 PciClassCode::Other,
469                 &PciVirtioSubclass::NonTransitionalBase as &dyn PciSubclass,
470             ),
471         };
472 
473         let pci_configuration_state =
474             vm_migration::versioned_state_from_id(snapshot.as_ref(), pci::PCI_CONFIGURATION_ID)
475                 .map_err(|e| {
476                     VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
477                         "Failed to get PciConfigurationState from Snapshot: {}",
478                         e
479                     ))
480                 })?;
481 
482         let configuration = PciConfiguration::new(
483             VIRTIO_PCI_VENDOR_ID,
484             pci_device_id,
485             0x1, // For modern virtio-PCI devices
486             class,
487             subclass,
488             None,
489             PciHeaderType::Device,
490             VIRTIO_PCI_VENDOR_ID,
491             pci_device_id,
492             msix_config_clone,
493             pci_configuration_state,
494         );
495 
496         let common_config_state =
497             vm_migration::versioned_state_from_id(snapshot.as_ref(), VIRTIO_PCI_COMMON_CONFIG_ID)
498                 .map_err(|e| {
499                 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
500                     "Failed to get VirtioPciCommonConfigState from Snapshot: {}",
501                     e
502                 ))
503             })?;
504 
505         let common_config = if let Some(common_config_state) = common_config_state {
506             VirtioPciCommonConfig::new(common_config_state, access_platform)
507         } else {
508             VirtioPciCommonConfig::new(
509                 VirtioPciCommonConfigState {
510                     driver_status: 0,
511                     config_generation: 0,
512                     device_feature_select: 0,
513                     driver_feature_select: 0,
514                     queue_select: 0,
515                     msix_config: VIRTQ_MSI_NO_VECTOR,
516                     msix_queues: vec![VIRTQ_MSI_NO_VECTOR; num_queues],
517                 },
518                 access_platform,
519             )
520         };
521 
522         let state: Option<VirtioPciDeviceState> = snapshot
523             .as_ref()
524             .map(|s| s.to_versioned_state())
525             .transpose()
526             .map_err(|e| {
527                 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
528                     "Failed to get VirtioPciDeviceState from Snapshot: {}",
529                     e
530                 ))
531             })?;
532 
533         let (device_activated, interrupt_status) = if let Some(state) = state {
534             // Update virtqueues indexes for both available and used rings.
535             for (i, queue) in queues.iter_mut().enumerate() {
536                 queue.set_size(state.queues[i].size);
537                 queue.set_ready(state.queues[i].ready);
538                 queue
539                     .try_set_desc_table_address(GuestAddress(state.queues[i].desc_table))
540                     .unwrap();
541                 queue
542                     .try_set_avail_ring_address(GuestAddress(state.queues[i].avail_ring))
543                     .unwrap();
544                 queue
545                     .try_set_used_ring_address(GuestAddress(state.queues[i].used_ring))
546                     .unwrap();
547                 queue.set_next_avail(
548                     queue
549                         .used_idx(memory.memory().deref(), Ordering::Acquire)
550                         .unwrap()
551                         .0,
552                 );
553                 queue.set_next_used(
554                     queue
555                         .used_idx(memory.memory().deref(), Ordering::Acquire)
556                         .unwrap()
557                         .0,
558                 );
559             }
560 
561             (state.device_activated, state.interrupt_status)
562         } else {
563             (false, 0)
564         };
565 
566         // Dropping the MutexGuard to unlock the VirtioDevice. This is required
567         // in the context of a restore given the device might require some
568         // activation, meaning it will require locking. Dropping the lock
569         // prevents from a subtle deadlock.
570         std::mem::drop(locked_device);
571 
572         let mut virtio_pci_device = VirtioPciDevice {
573             id,
574             configuration,
575             common_config,
576             msix_config,
577             msix_num,
578             device,
579             device_activated: Arc::new(AtomicBool::new(device_activated)),
580             interrupt_status: Arc::new(AtomicUsize::new(interrupt_status)),
581             virtio_interrupt: None,
582             queues,
583             queue_evts,
584             memory,
585             settings_bar: 0,
586             use_64bit_bar,
587             interrupt_source_group,
588             cap_pci_cfg_info: VirtioPciCfgCapInfo::default(),
589             bar_regions: vec![],
590             activate_evt,
591             dma_handler,
592             pending_activations,
593         };
594 
595         if let Some(msix_config) = &virtio_pci_device.msix_config {
596             virtio_pci_device.virtio_interrupt = Some(Arc::new(VirtioInterruptMsix::new(
597                 msix_config.clone(),
598                 virtio_pci_device.common_config.msix_config.clone(),
599                 virtio_pci_device.common_config.msix_queues.clone(),
600                 virtio_pci_device.interrupt_source_group.clone(),
601             )));
602         }
603 
604         // In case of a restore, we can activate the device, as we know at
605         // this point the virtqueues are in the right state and the device is
606         // ready to be activated, which will spawn each virtio worker thread.
607         if virtio_pci_device.device_activated.load(Ordering::SeqCst)
608             && virtio_pci_device.is_driver_ready()
609         {
610             virtio_pci_device.activate().map_err(|e| {
611                 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
612                     "Failed activating the device: {}",
613                     e
614                 ))
615             })?;
616         }
617 
618         Ok(virtio_pci_device)
619     }
620 
621     fn state(&self) -> VirtioPciDeviceState {
622         VirtioPciDeviceState {
623             device_activated: self.device_activated.load(Ordering::Acquire),
624             interrupt_status: self.interrupt_status.load(Ordering::Acquire),
625             queues: self
626                 .queues
627                 .iter()
628                 .map(|q| QueueState {
629                     max_size: q.max_size(),
630                     size: q.size(),
631                     ready: q.ready(),
632                     desc_table: q.desc_table(),
633                     avail_ring: q.avail_ring(),
634                     used_ring: q.used_ring(),
635                 })
636                 .collect(),
637         }
638     }
639 
640     /// Gets the list of queue events that must be triggered whenever the VM writes to
641     /// `virtio::NOTIFY_REG_OFFSET` past the MMIO base. Each event must be triggered when the
642     /// value being written equals the index of the event in this list.
643     fn queue_evts(&self) -> &[EventFd] {
644         self.queue_evts.as_slice()
645     }
646 
647     fn is_driver_ready(&self) -> bool {
648         let ready_bits =
649             (DEVICE_ACKNOWLEDGE | DEVICE_DRIVER | DEVICE_DRIVER_OK | DEVICE_FEATURES_OK) as u8;
650         self.common_config.driver_status == ready_bits
651             && self.common_config.driver_status & DEVICE_FAILED as u8 == 0
652     }
653 
654     /// Determines if the driver has requested the device (re)init / reset itself
655     fn is_driver_init(&self) -> bool {
656         self.common_config.driver_status == DEVICE_INIT as u8
657     }
658 
659     pub fn config_bar_addr(&self) -> u64 {
660         self.configuration.get_bar_addr(self.settings_bar as usize)
661     }
662 
663     fn add_pci_capabilities(
664         &mut self,
665         settings_bar: u8,
666     ) -> std::result::Result<(), PciDeviceError> {
667         // Add pointers to the different configuration structures from the PCI capabilities.
668         let common_cap = VirtioPciCap::new(
669             PciCapabilityType::Common,
670             settings_bar,
671             COMMON_CONFIG_BAR_OFFSET as u32,
672             COMMON_CONFIG_SIZE as u32,
673         );
674         self.configuration
675             .add_capability(&common_cap)
676             .map_err(PciDeviceError::CapabilitiesSetup)?;
677 
678         let isr_cap = VirtioPciCap::new(
679             PciCapabilityType::Isr,
680             settings_bar,
681             ISR_CONFIG_BAR_OFFSET as u32,
682             ISR_CONFIG_SIZE as u32,
683         );
684         self.configuration
685             .add_capability(&isr_cap)
686             .map_err(PciDeviceError::CapabilitiesSetup)?;
687 
688         // TODO(dgreid) - set based on device's configuration size?
689         let device_cap = VirtioPciCap::new(
690             PciCapabilityType::Device,
691             settings_bar,
692             DEVICE_CONFIG_BAR_OFFSET as u32,
693             DEVICE_CONFIG_SIZE as u32,
694         );
695         self.configuration
696             .add_capability(&device_cap)
697             .map_err(PciDeviceError::CapabilitiesSetup)?;
698 
699         let notify_cap = VirtioPciNotifyCap::new(
700             PciCapabilityType::Notify,
701             settings_bar,
702             NOTIFICATION_BAR_OFFSET as u32,
703             NOTIFICATION_SIZE as u32,
704             Le32::from(NOTIFY_OFF_MULTIPLIER),
705         );
706         self.configuration
707             .add_capability(&notify_cap)
708             .map_err(PciDeviceError::CapabilitiesSetup)?;
709 
710         let configuration_cap = VirtioPciCfgCap::new();
711         self.cap_pci_cfg_info.offset = self
712             .configuration
713             .add_capability(&configuration_cap)
714             .map_err(PciDeviceError::CapabilitiesSetup)?
715             + VIRTIO_PCI_CAP_OFFSET;
716         self.cap_pci_cfg_info.cap = configuration_cap;
717 
718         if self.msix_config.is_some() {
719             let msix_cap = MsixCap::new(
720                 settings_bar,
721                 self.msix_num,
722                 MSIX_TABLE_BAR_OFFSET as u32,
723                 settings_bar,
724                 MSIX_PBA_BAR_OFFSET as u32,
725             );
726             self.configuration
727                 .add_capability(&msix_cap)
728                 .map_err(PciDeviceError::CapabilitiesSetup)?;
729         }
730 
731         self.settings_bar = settings_bar;
732         Ok(())
733     }
734 
735     fn read_cap_pci_cfg(&mut self, offset: usize, mut data: &mut [u8]) {
736         let cap_slice = self.cap_pci_cfg_info.cap.as_slice();
737         let data_len = data.len();
738         let cap_len = cap_slice.len();
739         if offset + data_len > cap_len {
740             error!("Failed to read cap_pci_cfg from config space");
741             return;
742         }
743 
744         if offset < std::mem::size_of::<VirtioPciCap>() {
745             if let Some(end) = offset.checked_add(data_len) {
746                 // This write can't fail, offset and end are checked against config_len.
747                 data.write_all(&cap_slice[offset..cmp::min(end, cap_len)])
748                     .unwrap();
749             }
750         } else {
751             let bar_offset: u32 =
752                 // SAFETY: we know self.cap_pci_cfg_info.cap.cap.offset is 32bits long.
753                 unsafe { std::mem::transmute(self.cap_pci_cfg_info.cap.cap.offset) };
754             self.read_bar(0, bar_offset as u64, data)
755         }
756     }
757 
758     fn write_cap_pci_cfg(&mut self, offset: usize, data: &[u8]) -> Option<Arc<Barrier>> {
759         let cap_slice = self.cap_pci_cfg_info.cap.as_mut_slice();
760         let data_len = data.len();
761         let cap_len = cap_slice.len();
762         if offset + data_len > cap_len {
763             error!("Failed to write cap_pci_cfg to config space");
764             return None;
765         }
766 
767         if offset < std::mem::size_of::<VirtioPciCap>() {
768             let (_, right) = cap_slice.split_at_mut(offset);
769             right[..data_len].copy_from_slice(data);
770             None
771         } else {
772             let bar_offset: u32 =
773                 // SAFETY: we know self.cap_pci_cfg_info.cap.cap.offset is 32bits long.
774                 unsafe { std::mem::transmute(self.cap_pci_cfg_info.cap.cap.offset) };
775             self.write_bar(0, bar_offset as u64, data)
776         }
777     }
778 
779     pub fn virtio_device(&self) -> Arc<Mutex<dyn VirtioDevice>> {
780         self.device.clone()
781     }
782 
783     fn prepare_activator(&mut self, barrier: Option<Arc<Barrier>>) -> VirtioPciDeviceActivator {
784         let mut queues = Vec::new();
785 
786         for (queue_index, queue) in self.queues.iter().enumerate() {
787             if !queue.ready() {
788                 continue;
789             }
790 
791             if !queue.is_valid(self.memory.memory().deref()) {
792                 error!("Queue {} is not valid", queue_index);
793             }
794 
795             queues.push((
796                 queue_index,
797                 vm_virtio::clone_queue(queue),
798                 self.queue_evts[queue_index].try_clone().unwrap(),
799             ));
800         }
801 
802         VirtioPciDeviceActivator {
803             interrupt: self.virtio_interrupt.take(),
804             memory: Some(self.memory.clone()),
805             device: self.device.clone(),
806             queues: Some(queues),
807             device_activated: self.device_activated.clone(),
808             barrier,
809             id: self.id.clone(),
810         }
811     }
812 
813     fn activate(&mut self) -> ActivateResult {
814         self.prepare_activator(None).activate()
815     }
816 
817     fn needs_activation(&self) -> bool {
818         !self.device_activated.load(Ordering::SeqCst) && self.is_driver_ready()
819     }
820 
821     pub fn dma_handler(&self) -> Option<&Arc<dyn ExternalDmaMapping>> {
822         self.dma_handler.as_ref()
823     }
824 }
825 
826 impl VirtioTransport for VirtioPciDevice {
827     fn ioeventfds(&self, base_addr: u64) -> Vec<(&EventFd, u64)> {
828         let notify_base = base_addr + NOTIFICATION_BAR_OFFSET;
829         self.queue_evts()
830             .iter()
831             .enumerate()
832             .map(|(i, event)| {
833                 (
834                     event,
835                     notify_base + i as u64 * u64::from(NOTIFY_OFF_MULTIPLIER),
836                 )
837             })
838             .collect()
839     }
840 }
841 
842 pub struct VirtioInterruptMsix {
843     msix_config: Arc<Mutex<MsixConfig>>,
844     config_vector: Arc<AtomicU16>,
845     queues_vectors: Arc<Mutex<Vec<u16>>>,
846     interrupt_source_group: Arc<dyn InterruptSourceGroup>,
847 }
848 
849 impl VirtioInterruptMsix {
850     pub fn new(
851         msix_config: Arc<Mutex<MsixConfig>>,
852         config_vector: Arc<AtomicU16>,
853         queues_vectors: Arc<Mutex<Vec<u16>>>,
854         interrupt_source_group: Arc<dyn InterruptSourceGroup>,
855     ) -> Self {
856         VirtioInterruptMsix {
857             msix_config,
858             config_vector,
859             queues_vectors,
860             interrupt_source_group,
861         }
862     }
863 }
864 
865 impl VirtioInterrupt for VirtioInterruptMsix {
866     fn trigger(&self, int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> {
867         let vector = match int_type {
868             VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire),
869             VirtioInterruptType::Queue(queue_index) => {
870                 self.queues_vectors.lock().unwrap()[queue_index as usize]
871             }
872         };
873 
874         if vector == VIRTQ_MSI_NO_VECTOR {
875             return Ok(());
876         }
877 
878         let config = &mut self.msix_config.lock().unwrap();
879         let entry = &config.table_entries[vector as usize];
880         // In case the vector control register associated with the entry
881         // has its first bit set, this means the vector is masked and the
882         // device should not inject the interrupt.
883         // Instead, the Pending Bit Array table is updated to reflect there
884         // is a pending interrupt for this specific vector.
885         if config.masked() || entry.masked() {
886             config.set_pba_bit(vector, false);
887             return Ok(());
888         }
889 
890         self.interrupt_source_group
891             .trigger(vector as InterruptIndex)
892     }
893 
894     fn notifier(&self, int_type: VirtioInterruptType) -> Option<EventFd> {
895         let vector = match int_type {
896             VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire),
897             VirtioInterruptType::Queue(queue_index) => {
898                 self.queues_vectors.lock().unwrap()[queue_index as usize]
899             }
900         };
901 
902         self.interrupt_source_group
903             .notifier(vector as InterruptIndex)
904     }
905 }
906 
907 impl PciDevice for VirtioPciDevice {
908     fn write_config_register(
909         &mut self,
910         reg_idx: usize,
911         offset: u64,
912         data: &[u8],
913     ) -> Option<Arc<Barrier>> {
914         // Handle the special case where the capability VIRTIO_PCI_CAP_PCI_CFG
915         // is accessed. This capability has a special meaning as it allows the
916         // guest to access other capabilities without mapping the PCI BAR.
917         let base = reg_idx * 4;
918         if base + offset as usize >= self.cap_pci_cfg_info.offset
919             && base + offset as usize + data.len()
920                 <= self.cap_pci_cfg_info.offset + self.cap_pci_cfg_info.cap.bytes().len()
921         {
922             let offset = base + offset as usize - self.cap_pci_cfg_info.offset;
923             self.write_cap_pci_cfg(offset, data)
924         } else {
925             self.configuration
926                 .write_config_register(reg_idx, offset, data);
927             None
928         }
929     }
930 
931     fn read_config_register(&mut self, reg_idx: usize) -> u32 {
932         // Handle the special case where the capability VIRTIO_PCI_CAP_PCI_CFG
933         // is accessed. This capability has a special meaning as it allows the
934         // guest to access other capabilities without mapping the PCI BAR.
935         let base = reg_idx * 4;
936         if base >= self.cap_pci_cfg_info.offset
937             && base + 4 <= self.cap_pci_cfg_info.offset + self.cap_pci_cfg_info.cap.bytes().len()
938         {
939             let offset = base - self.cap_pci_cfg_info.offset;
940             let mut data = [0u8; 4];
941             self.read_cap_pci_cfg(offset, &mut data);
942             u32::from_le_bytes(data)
943         } else {
944             self.configuration.read_reg(reg_idx)
945         }
946     }
947 
948     fn detect_bar_reprogramming(
949         &mut self,
950         reg_idx: usize,
951         data: &[u8],
952     ) -> Option<BarReprogrammingParams> {
953         self.configuration.detect_bar_reprogramming(reg_idx, data)
954     }
955 
956     fn allocate_bars(
957         &mut self,
958         _allocator: &Arc<Mutex<SystemAllocator>>,
959         mmio32_allocator: &mut AddressAllocator,
960         mmio64_allocator: &mut AddressAllocator,
961         resources: Option<Vec<Resource>>,
962     ) -> std::result::Result<Vec<PciBarConfiguration>, PciDeviceError> {
963         let mut bars = Vec::new();
964         let device_clone = self.device.clone();
965         let device = device_clone.lock().unwrap();
966 
967         let mut settings_bar_addr = None;
968         let mut use_64bit_bar = self.use_64bit_bar;
969         let restoring = resources.is_some();
970         if let Some(resources) = resources {
971             for resource in resources {
972                 if let Resource::PciBar {
973                     index, base, type_, ..
974                 } = resource
975                 {
976                     if index == VIRTIO_COMMON_BAR_INDEX {
977                         settings_bar_addr = Some(GuestAddress(base));
978                         use_64bit_bar = match type_ {
979                             PciBarType::Io => {
980                                 return Err(PciDeviceError::InvalidResource(resource))
981                             }
982                             PciBarType::Mmio32 => false,
983                             PciBarType::Mmio64 => true,
984                         };
985                         break;
986                     }
987                 }
988             }
989             // Error out if no resource was matching the BAR id.
990             if settings_bar_addr.is_none() {
991                 return Err(PciDeviceError::MissingResource);
992             }
993         }
994 
995         // Allocate the virtio-pci capability BAR.
996         // See http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-740004
997         let (virtio_pci_bar_addr, region_type) = if use_64bit_bar {
998             let region_type = PciBarRegionType::Memory64BitRegion;
999             let addr = mmio64_allocator
1000                 .allocate(
1001                     settings_bar_addr,
1002                     CAPABILITY_BAR_SIZE,
1003                     Some(CAPABILITY_BAR_SIZE),
1004                 )
1005                 .ok_or(PciDeviceError::IoAllocationFailed(CAPABILITY_BAR_SIZE))?;
1006             (addr, region_type)
1007         } else {
1008             let region_type = PciBarRegionType::Memory32BitRegion;
1009             let addr = mmio32_allocator
1010                 .allocate(
1011                     settings_bar_addr,
1012                     CAPABILITY_BAR_SIZE,
1013                     Some(CAPABILITY_BAR_SIZE),
1014                 )
1015                 .ok_or(PciDeviceError::IoAllocationFailed(CAPABILITY_BAR_SIZE))?;
1016             (addr, region_type)
1017         };
1018 
1019         let bar = PciBarConfiguration::default()
1020             .set_index(VIRTIO_COMMON_BAR_INDEX)
1021             .set_address(virtio_pci_bar_addr.raw_value())
1022             .set_size(CAPABILITY_BAR_SIZE)
1023             .set_region_type(region_type);
1024 
1025         // The creation of the PCI BAR and its associated capabilities must
1026         // happen only during the creation of a brand new VM. When a VM is
1027         // restored from a known state, the BARs are already created with the
1028         // right content, therefore we don't need to go through this codepath.
1029         if !restoring {
1030             self.configuration.add_pci_bar(&bar).map_err(|e| {
1031                 PciDeviceError::IoRegistrationFailed(virtio_pci_bar_addr.raw_value(), e)
1032             })?;
1033 
1034             // Once the BARs are allocated, the capabilities can be added to the PCI configuration.
1035             self.add_pci_capabilities(VIRTIO_COMMON_BAR_INDEX as u8)?;
1036         }
1037 
1038         bars.push(bar);
1039 
1040         // Allocate a dedicated BAR if there are some shared memory regions.
1041         if let Some(shm_list) = device.get_shm_regions() {
1042             let bar = PciBarConfiguration::default()
1043                 .set_index(VIRTIO_SHM_BAR_INDEX)
1044                 .set_address(shm_list.addr.raw_value())
1045                 .set_size(shm_list.len);
1046 
1047             // The creation of the PCI BAR and its associated capabilities must
1048             // happen only during the creation of a brand new VM. When a VM is
1049             // restored from a known state, the BARs are already created with the
1050             // right content, therefore we don't need to go through this codepath.
1051             if !restoring {
1052                 self.configuration.add_pci_bar(&bar).map_err(|e| {
1053                     PciDeviceError::IoRegistrationFailed(shm_list.addr.raw_value(), e)
1054                 })?;
1055 
1056                 for (idx, shm) in shm_list.region_list.iter().enumerate() {
1057                     let shm_cap = VirtioPciCap64::new(
1058                         PciCapabilityType::SharedMemory,
1059                         VIRTIO_SHM_BAR_INDEX as u8,
1060                         idx as u8,
1061                         shm.offset,
1062                         shm.len,
1063                     );
1064                     self.configuration
1065                         .add_capability(&shm_cap)
1066                         .map_err(PciDeviceError::CapabilitiesSetup)?;
1067                 }
1068             }
1069 
1070             bars.push(bar);
1071         }
1072 
1073         self.bar_regions = bars.clone();
1074 
1075         Ok(bars)
1076     }
1077 
1078     fn free_bars(
1079         &mut self,
1080         _allocator: &mut SystemAllocator,
1081         mmio32_allocator: &mut AddressAllocator,
1082         mmio64_allocator: &mut AddressAllocator,
1083     ) -> std::result::Result<(), PciDeviceError> {
1084         for bar in self.bar_regions.drain(..) {
1085             match bar.region_type() {
1086                 PciBarRegionType::Memory32BitRegion => {
1087                     mmio32_allocator.free(GuestAddress(bar.addr()), bar.size());
1088                 }
1089                 PciBarRegionType::Memory64BitRegion => {
1090                     mmio64_allocator.free(GuestAddress(bar.addr()), bar.size());
1091                 }
1092                 _ => error!("Unexpected PCI bar type"),
1093             }
1094         }
1095         Ok(())
1096     }
1097 
1098     fn move_bar(
1099         &mut self,
1100         old_base: u64,
1101         new_base: u64,
1102     ) -> std::result::Result<(), std::io::Error> {
1103         // We only update our idea of the bar in order to support free_bars() above.
1104         // The majority of the reallocation is done inside DeviceManager.
1105         for bar in self.bar_regions.iter_mut() {
1106             if bar.addr() == old_base {
1107                 *bar = bar.set_address(new_base);
1108             }
1109         }
1110 
1111         Ok(())
1112     }
1113 
1114     fn read_bar(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
1115         match offset {
1116             o if o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => self.common_config.read(
1117                 o - COMMON_CONFIG_BAR_OFFSET,
1118                 data,
1119                 &self.queues,
1120                 self.device.clone(),
1121             ),
1122             o if (ISR_CONFIG_BAR_OFFSET..ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE).contains(&o) => {
1123                 if let Some(v) = data.get_mut(0) {
1124                     // Reading this register resets it to 0.
1125                     *v = self.interrupt_status.swap(0, Ordering::AcqRel) as u8;
1126                 }
1127             }
1128             o if (DEVICE_CONFIG_BAR_OFFSET..DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE)
1129                 .contains(&o) =>
1130             {
1131                 let device = self.device.lock().unwrap();
1132                 device.read_config(o - DEVICE_CONFIG_BAR_OFFSET, data);
1133             }
1134             o if (NOTIFICATION_BAR_OFFSET..NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE)
1135                 .contains(&o) =>
1136             {
1137                 // Handled with ioeventfds.
1138             }
1139             o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => {
1140                 if let Some(msix_config) = &self.msix_config {
1141                     msix_config
1142                         .lock()
1143                         .unwrap()
1144                         .read_table(o - MSIX_TABLE_BAR_OFFSET, data);
1145                 }
1146             }
1147             o if (MSIX_PBA_BAR_OFFSET..MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE).contains(&o) => {
1148                 if let Some(msix_config) = &self.msix_config {
1149                     msix_config
1150                         .lock()
1151                         .unwrap()
1152                         .read_pba(o - MSIX_PBA_BAR_OFFSET, data);
1153                 }
1154             }
1155             _ => (),
1156         }
1157     }
1158 
1159     fn write_bar(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
1160         match offset {
1161             o if o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => self.common_config.write(
1162                 o - COMMON_CONFIG_BAR_OFFSET,
1163                 data,
1164                 &mut self.queues,
1165                 self.device.clone(),
1166             ),
1167             o if (ISR_CONFIG_BAR_OFFSET..ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE).contains(&o) => {
1168                 if let Some(v) = data.first() {
1169                     self.interrupt_status
1170                         .fetch_and(!(*v as usize), Ordering::AcqRel);
1171                 }
1172             }
1173             o if (DEVICE_CONFIG_BAR_OFFSET..DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE)
1174                 .contains(&o) =>
1175             {
1176                 let mut device = self.device.lock().unwrap();
1177                 device.write_config(o - DEVICE_CONFIG_BAR_OFFSET, data);
1178             }
1179             o if (NOTIFICATION_BAR_OFFSET..NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE)
1180                 .contains(&o) =>
1181             {
1182                 // Handled with ioeventfds.
1183                 error!("Unexpected write to notification BAR: offset = 0x{:x}", o);
1184             }
1185             o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => {
1186                 if let Some(msix_config) = &self.msix_config {
1187                     msix_config
1188                         .lock()
1189                         .unwrap()
1190                         .write_table(o - MSIX_TABLE_BAR_OFFSET, data);
1191                 }
1192             }
1193             o if (MSIX_PBA_BAR_OFFSET..MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE).contains(&o) => {
1194                 if let Some(msix_config) = &self.msix_config {
1195                     msix_config
1196                         .lock()
1197                         .unwrap()
1198                         .write_pba(o - MSIX_PBA_BAR_OFFSET, data);
1199                 }
1200             }
1201             _ => (),
1202         };
1203 
1204         // Try and activate the device if the driver status has changed
1205         if self.needs_activation() {
1206             let barrier = Arc::new(Barrier::new(2));
1207             let activator = self.prepare_activator(Some(barrier.clone()));
1208             self.pending_activations.lock().unwrap().push(activator);
1209             info!(
1210                 "{}: Needs activation; writing to activate event fd",
1211                 self.id
1212             );
1213             self.activate_evt.write(1).ok();
1214             info!("{}: Needs activation; returning barrier", self.id);
1215             return Some(barrier);
1216         }
1217 
1218         // Device has been reset by the driver
1219         if self.device_activated.load(Ordering::SeqCst) && self.is_driver_init() {
1220             let mut device = self.device.lock().unwrap();
1221             if let Some(virtio_interrupt) = device.reset() {
1222                 // Upon reset the device returns its interrupt EventFD
1223                 self.virtio_interrupt = Some(virtio_interrupt);
1224                 self.device_activated.store(false, Ordering::SeqCst);
1225 
1226                 // Reset queue readiness (changes queue_enable), queue sizes
1227                 // and selected_queue as per spec for reset
1228                 self.queues.iter_mut().for_each(Queue::reset);
1229                 self.common_config.queue_select = 0;
1230             } else {
1231                 error!("Attempt to reset device when not implemented in underlying device");
1232                 self.common_config.driver_status = crate::DEVICE_FAILED as u8;
1233             }
1234         }
1235 
1236         None
1237     }
1238 
1239     fn as_any(&mut self) -> &mut dyn Any {
1240         self
1241     }
1242 
1243     fn id(&self) -> Option<String> {
1244         Some(self.id.clone())
1245     }
1246 }
1247 
1248 impl BusDevice for VirtioPciDevice {
1249     fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) {
1250         self.read_bar(base, offset, data)
1251     }
1252 
1253     fn write(&mut self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
1254         self.write_bar(base, offset, data)
1255     }
1256 }
1257 
1258 impl Pausable for VirtioPciDevice {
1259     fn pause(&mut self) -> std::result::Result<(), MigratableError> {
1260         Ok(())
1261     }
1262 
1263     fn resume(&mut self) -> std::result::Result<(), MigratableError> {
1264         Ok(())
1265     }
1266 }
1267 
1268 impl Snapshottable for VirtioPciDevice {
1269     fn id(&self) -> String {
1270         self.id.clone()
1271     }
1272 
1273     fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
1274         let mut virtio_pci_dev_snapshot = Snapshot::new_from_versioned_state(&self.state())?;
1275 
1276         // Snapshot PciConfiguration
1277         virtio_pci_dev_snapshot
1278             .add_snapshot(self.configuration.id(), self.configuration.snapshot()?);
1279 
1280         // Snapshot VirtioPciCommonConfig
1281         virtio_pci_dev_snapshot
1282             .add_snapshot(self.common_config.id(), self.common_config.snapshot()?);
1283 
1284         // Snapshot MSI-X
1285         if let Some(msix_config) = &self.msix_config {
1286             let mut msix_config = msix_config.lock().unwrap();
1287             virtio_pci_dev_snapshot.add_snapshot(msix_config.id(), msix_config.snapshot()?);
1288         }
1289 
1290         Ok(virtio_pci_dev_snapshot)
1291     }
1292 }
1293 impl Transportable for VirtioPciDevice {}
1294 impl Migratable for VirtioPciDevice {}
1295