xref: /cloud-hypervisor/virtio-devices/src/transport/pci_device.rs (revision fa7a000dbe9637eb256af18ae8c3c4a8d5bf9c8f)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE-BSD-3-Clause file.
4 //
5 // Copyright © 2019 Intel Corporation
6 //
7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
8 
9 use crate::transport::{VirtioPciCommonConfig, VirtioTransport, VIRTIO_PCI_COMMON_CONFIG_ID};
10 use crate::GuestMemoryMmap;
11 use crate::{
12     ActivateResult, VirtioDevice, VirtioDeviceType, VirtioInterrupt, VirtioInterruptType,
13     DEVICE_ACKNOWLEDGE, DEVICE_DRIVER, DEVICE_DRIVER_OK, DEVICE_FAILED, DEVICE_FEATURES_OK,
14     DEVICE_INIT,
15 };
16 use anyhow::anyhow;
17 use libc::EFD_NONBLOCK;
18 use pci::{
19     BarReprogrammingParams, MsixCap, MsixConfig, PciBarConfiguration, PciBarRegionType,
20     PciCapability, PciCapabilityId, PciClassCode, PciConfiguration, PciDevice, PciDeviceError,
21     PciHeaderType, PciMassStorageSubclass, PciNetworkControllerSubclass, PciSubclass,
22 };
23 use std::any::Any;
24 use std::cmp;
25 use std::io::Write;
26 use std::ops::Deref;
27 use std::sync::atomic::{AtomicBool, AtomicU16, AtomicUsize, Ordering};
28 use std::sync::{Arc, Barrier, Mutex};
29 use thiserror::Error;
30 use versionize::{VersionMap, Versionize, VersionizeResult};
31 use versionize_derive::Versionize;
32 use virtio_queue::{Queue, QueueT};
33 use vm_allocator::{AddressAllocator, SystemAllocator};
34 use vm_device::dma_mapping::ExternalDmaMapping;
35 use vm_device::interrupt::{
36     InterruptIndex, InterruptManager, InterruptSourceGroup, MsiIrqGroupConfig,
37 };
38 use vm_device::{BusDevice, PciBarType, Resource};
39 use vm_memory::{Address, ByteValued, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, Le32};
40 use vm_migration::{
41     Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
42 };
43 use vm_virtio::AccessPlatform;
44 use vmm_sys_util::eventfd::EventFd;
45 
46 use super::pci_common_config::VirtioPciCommonConfigState;
47 
48 /// Vector value used to disable MSI for a queue.
49 const VIRTQ_MSI_NO_VECTOR: u16 = 0xffff;
50 
51 enum PciCapabilityType {
52     Common = 1,
53     Notify = 2,
54     Isr = 3,
55     Device = 4,
56     Pci = 5,
57     SharedMemory = 8,
58 }
59 
60 // This offset represents the 2 bytes omitted from the VirtioPciCap structure
61 // as they are already handled through add_capability(). These 2 bytes are the
62 // fields cap_vndr (1 byte) and cap_next (1 byte) defined in the virtio spec.
63 const VIRTIO_PCI_CAP_OFFSET: usize = 2;
64 
65 #[allow(dead_code)]
66 #[repr(packed)]
67 #[derive(Clone, Copy, Default)]
68 struct VirtioPciCap {
69     cap_len: u8,      // Generic PCI field: capability length
70     cfg_type: u8,     // Identifies the structure.
71     pci_bar: u8,      // Where to find it.
72     id: u8,           // Multiple capabilities of the same type
73     padding: [u8; 2], // Pad to full dword.
74     offset: Le32,     // Offset within bar.
75     length: Le32,     // Length of the structure, in bytes.
76 }
77 // SAFETY: All members are simple numbers and any value is valid.
78 unsafe impl ByteValued for VirtioPciCap {}
79 
80 impl PciCapability for VirtioPciCap {
81     fn bytes(&self) -> &[u8] {
82         self.as_slice()
83     }
84 
85     fn id(&self) -> PciCapabilityId {
86         PciCapabilityId::VendorSpecific
87     }
88 }
89 
90 const VIRTIO_PCI_CAP_LEN_OFFSET: u8 = 2;
91 
92 impl VirtioPciCap {
93     pub fn new(cfg_type: PciCapabilityType, pci_bar: u8, offset: u32, length: u32) -> Self {
94         VirtioPciCap {
95             cap_len: (std::mem::size_of::<VirtioPciCap>() as u8) + VIRTIO_PCI_CAP_LEN_OFFSET,
96             cfg_type: cfg_type as u8,
97             pci_bar,
98             id: 0,
99             padding: [0; 2],
100             offset: Le32::from(offset),
101             length: Le32::from(length),
102         }
103     }
104 }
105 
106 #[allow(dead_code)]
107 #[repr(packed)]
108 #[derive(Clone, Copy, Default)]
109 struct VirtioPciNotifyCap {
110     cap: VirtioPciCap,
111     notify_off_multiplier: Le32,
112 }
113 // SAFETY: All members are simple numbers and any value is valid.
114 unsafe impl ByteValued for VirtioPciNotifyCap {}
115 
116 impl PciCapability for VirtioPciNotifyCap {
117     fn bytes(&self) -> &[u8] {
118         self.as_slice()
119     }
120 
121     fn id(&self) -> PciCapabilityId {
122         PciCapabilityId::VendorSpecific
123     }
124 }
125 
126 impl VirtioPciNotifyCap {
127     pub fn new(
128         cfg_type: PciCapabilityType,
129         pci_bar: u8,
130         offset: u32,
131         length: u32,
132         multiplier: Le32,
133     ) -> Self {
134         VirtioPciNotifyCap {
135             cap: VirtioPciCap {
136                 cap_len: (std::mem::size_of::<VirtioPciNotifyCap>() as u8)
137                     + VIRTIO_PCI_CAP_LEN_OFFSET,
138                 cfg_type: cfg_type as u8,
139                 pci_bar,
140                 id: 0,
141                 padding: [0; 2],
142                 offset: Le32::from(offset),
143                 length: Le32::from(length),
144             },
145             notify_off_multiplier: multiplier,
146         }
147     }
148 }
149 
150 #[allow(dead_code)]
151 #[repr(packed)]
152 #[derive(Clone, Copy, Default)]
153 struct VirtioPciCap64 {
154     cap: VirtioPciCap,
155     offset_hi: Le32,
156     length_hi: Le32,
157 }
158 // SAFETY: All members are simple numbers and any value is valid.
159 unsafe impl ByteValued for VirtioPciCap64 {}
160 
161 impl PciCapability for VirtioPciCap64 {
162     fn bytes(&self) -> &[u8] {
163         self.as_slice()
164     }
165 
166     fn id(&self) -> PciCapabilityId {
167         PciCapabilityId::VendorSpecific
168     }
169 }
170 
171 impl VirtioPciCap64 {
172     pub fn new(cfg_type: PciCapabilityType, pci_bar: u8, id: u8, offset: u64, length: u64) -> Self {
173         VirtioPciCap64 {
174             cap: VirtioPciCap {
175                 cap_len: (std::mem::size_of::<VirtioPciCap64>() as u8) + VIRTIO_PCI_CAP_LEN_OFFSET,
176                 cfg_type: cfg_type as u8,
177                 pci_bar,
178                 id,
179                 padding: [0; 2],
180                 offset: Le32::from(offset as u32),
181                 length: Le32::from(length as u32),
182             },
183             offset_hi: Le32::from((offset >> 32) as u32),
184             length_hi: Le32::from((length >> 32) as u32),
185         }
186     }
187 }
188 
189 #[allow(dead_code)]
190 #[repr(packed)]
191 #[derive(Clone, Copy, Default)]
192 struct VirtioPciCfgCap {
193     cap: VirtioPciCap,
194     pci_cfg_data: [u8; 4],
195 }
196 // SAFETY: All members are simple numbers and any value is valid.
197 unsafe impl ByteValued for VirtioPciCfgCap {}
198 
199 impl PciCapability for VirtioPciCfgCap {
200     fn bytes(&self) -> &[u8] {
201         self.as_slice()
202     }
203 
204     fn id(&self) -> PciCapabilityId {
205         PciCapabilityId::VendorSpecific
206     }
207 }
208 
209 impl VirtioPciCfgCap {
210     fn new() -> Self {
211         VirtioPciCfgCap {
212             cap: VirtioPciCap::new(PciCapabilityType::Pci, 0, 0, 0),
213             ..Default::default()
214         }
215     }
216 }
217 
218 #[derive(Clone, Copy, Default)]
219 struct VirtioPciCfgCapInfo {
220     offset: usize,
221     cap: VirtioPciCfgCap,
222 }
223 
224 #[allow(dead_code)]
225 #[derive(Copy, Clone)]
226 pub enum PciVirtioSubclass {
227     NonTransitionalBase = 0xff,
228 }
229 
230 impl PciSubclass for PciVirtioSubclass {
231     fn get_register_value(&self) -> u8 {
232         *self as u8
233     }
234 }
235 
236 // Allocate one bar for the structs pointed to by the capability structures.
237 // As per the PCI specification, because the same BAR shares MSI-X and non
238 // MSI-X structures, it is recommended to use 8KiB alignment for all those
239 // structures.
240 const COMMON_CONFIG_BAR_OFFSET: u64 = 0x0000;
241 const COMMON_CONFIG_SIZE: u64 = 56;
242 const ISR_CONFIG_BAR_OFFSET: u64 = 0x2000;
243 const ISR_CONFIG_SIZE: u64 = 1;
244 const DEVICE_CONFIG_BAR_OFFSET: u64 = 0x4000;
245 const DEVICE_CONFIG_SIZE: u64 = 0x1000;
246 const NOTIFICATION_BAR_OFFSET: u64 = 0x6000;
247 const NOTIFICATION_SIZE: u64 = 0x1000;
248 const MSIX_TABLE_BAR_OFFSET: u64 = 0x8000;
249 // The size is 256KiB because the table can hold up to 2048 entries, with each
250 // entry being 128 bits (4 DWORDS).
251 const MSIX_TABLE_SIZE: u64 = 0x40000;
252 const MSIX_PBA_BAR_OFFSET: u64 = 0x48000;
253 // The size is 2KiB because the Pending Bit Array has one bit per vector and it
254 // can support up to 2048 vectors.
255 const MSIX_PBA_SIZE: u64 = 0x800;
256 // The BAR size must be a power of 2.
257 const CAPABILITY_BAR_SIZE: u64 = 0x80000;
258 const VIRTIO_COMMON_BAR_INDEX: usize = 0;
259 const VIRTIO_SHM_BAR_INDEX: usize = 2;
260 
261 const NOTIFY_OFF_MULTIPLIER: u32 = 4; // A dword per notification address.
262 
263 const VIRTIO_PCI_VENDOR_ID: u16 = 0x1af4;
264 const VIRTIO_PCI_DEVICE_ID_BASE: u16 = 0x1040; // Add to device type to get device ID.
265 
266 #[derive(Versionize)]
267 struct QueueState {
268     max_size: u16,
269     size: u16,
270     ready: bool,
271     desc_table: u64,
272     avail_ring: u64,
273     used_ring: u64,
274 }
275 
276 #[derive(Versionize)]
277 pub struct VirtioPciDeviceState {
278     device_activated: bool,
279     queues: Vec<QueueState>,
280     interrupt_status: usize,
281     cap_pci_cfg_offset: usize,
282     cap_pci_cfg: Vec<u8>,
283 }
284 
285 impl VersionMapped for VirtioPciDeviceState {}
286 
287 pub struct VirtioPciDeviceActivator {
288     interrupt: Option<Arc<dyn VirtioInterrupt>>,
289     memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
290     device: Arc<Mutex<dyn VirtioDevice>>,
291     device_activated: Arc<AtomicBool>,
292     queues: Option<Vec<(usize, Queue, EventFd)>>,
293     barrier: Option<Arc<Barrier>>,
294     id: String,
295 }
296 
297 impl VirtioPciDeviceActivator {
298     pub fn activate(&mut self) -> ActivateResult {
299         self.device.lock().unwrap().activate(
300             self.memory.take().unwrap(),
301             self.interrupt.take().unwrap(),
302             self.queues.take().unwrap(),
303         )?;
304         self.device_activated.store(true, Ordering::SeqCst);
305 
306         if let Some(barrier) = self.barrier.take() {
307             info!("{}: Waiting for barrier", self.id);
308             barrier.wait();
309             info!("{}: Barrier released", self.id);
310         }
311 
312         Ok(())
313     }
314 }
315 
316 #[derive(Error, Debug)]
317 pub enum VirtioPciDeviceError {
318     #[error("Failed creating VirtioPciDevice: {0}")]
319     CreateVirtioPciDevice(#[source] anyhow::Error),
320 }
321 pub type Result<T> = std::result::Result<T, VirtioPciDeviceError>;
322 
323 pub struct VirtioPciDevice {
324     id: String,
325 
326     // PCI configuration registers.
327     configuration: PciConfiguration,
328 
329     // virtio PCI common configuration
330     common_config: VirtioPciCommonConfig,
331 
332     // MSI-X config
333     msix_config: Option<Arc<Mutex<MsixConfig>>>,
334 
335     // Number of MSI-X vectors
336     msix_num: u16,
337 
338     // Virtio device reference and status
339     device: Arc<Mutex<dyn VirtioDevice>>,
340     device_activated: Arc<AtomicBool>,
341 
342     // PCI interrupts.
343     interrupt_status: Arc<AtomicUsize>,
344     virtio_interrupt: Option<Arc<dyn VirtioInterrupt>>,
345     interrupt_source_group: Arc<dyn InterruptSourceGroup>,
346 
347     // virtio queues
348     queues: Vec<Queue>,
349     queue_evts: Vec<EventFd>,
350 
351     // Guest memory
352     memory: GuestMemoryAtomic<GuestMemoryMmap>,
353 
354     // Settings PCI BAR
355     settings_bar: u8,
356 
357     // Whether to use 64-bit bar location or 32-bit
358     use_64bit_bar: bool,
359 
360     // Add a dedicated structure to hold information about the very specific
361     // virtio-pci capability VIRTIO_PCI_CAP_PCI_CFG. This is needed to support
362     // the legacy/backward compatible mechanism of letting the guest access the
363     // other virtio capabilities without mapping the PCI BARs. This can be
364     // needed when the guest tries to early access the virtio configuration of
365     // a device.
366     cap_pci_cfg_info: VirtioPciCfgCapInfo,
367 
368     // Details of bar regions to free
369     bar_regions: Vec<PciBarConfiguration>,
370 
371     // EventFd to signal on to request activation
372     activate_evt: EventFd,
373 
374     // Optional DMA handler
375     dma_handler: Option<Arc<dyn ExternalDmaMapping>>,
376 
377     // Pending activations
378     pending_activations: Arc<Mutex<Vec<VirtioPciDeviceActivator>>>,
379 }
380 
381 impl VirtioPciDevice {
382     /// Constructs a new PCI transport for the given virtio device.
383     #[allow(clippy::too_many_arguments)]
384     pub fn new(
385         id: String,
386         memory: GuestMemoryAtomic<GuestMemoryMmap>,
387         device: Arc<Mutex<dyn VirtioDevice>>,
388         msix_num: u16,
389         access_platform: Option<Arc<dyn AccessPlatform>>,
390         interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
391         pci_device_bdf: u32,
392         activate_evt: EventFd,
393         use_64bit_bar: bool,
394         dma_handler: Option<Arc<dyn ExternalDmaMapping>>,
395         pending_activations: Arc<Mutex<Vec<VirtioPciDeviceActivator>>>,
396         snapshot: Option<Snapshot>,
397     ) -> Result<Self> {
398         let mut locked_device = device.lock().unwrap();
399         let mut queue_evts = Vec::new();
400         for _ in locked_device.queue_max_sizes().iter() {
401             queue_evts.push(EventFd::new(EFD_NONBLOCK).map_err(|e| {
402                 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
403                     "Failed creating eventfd: {}",
404                     e
405                 ))
406             })?)
407         }
408         let num_queues = locked_device.queue_max_sizes().len();
409 
410         if let Some(access_platform) = &access_platform {
411             locked_device.set_access_platform(access_platform.clone());
412         }
413 
414         let mut queues: Vec<Queue> = locked_device
415             .queue_max_sizes()
416             .iter()
417             .map(|&s| Queue::new(s).unwrap())
418             .collect();
419 
420         let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + locked_device.device_type() as u16;
421 
422         let interrupt_source_group = interrupt_manager
423             .create_group(MsiIrqGroupConfig {
424                 base: 0,
425                 count: msix_num as InterruptIndex,
426             })
427             .map_err(|e| {
428                 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
429                     "Failed creating MSI interrupt group: {}",
430                     e
431                 ))
432             })?;
433 
434         let msix_state =
435             vm_migration::versioned_state_from_id(snapshot.as_ref(), pci::MSIX_CONFIG_ID).map_err(
436                 |e| {
437                     VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
438                         "Failed to get MsixConfigState from Snapshot: {}",
439                         e
440                     ))
441                 },
442             )?;
443 
444         let (msix_config, msix_config_clone) = if msix_num > 0 {
445             let msix_config = Arc::new(Mutex::new(
446                 MsixConfig::new(
447                     msix_num,
448                     interrupt_source_group.clone(),
449                     pci_device_bdf,
450                     msix_state,
451                 )
452                 .unwrap(),
453             ));
454             let msix_config_clone = msix_config.clone();
455             (Some(msix_config), Some(msix_config_clone))
456         } else {
457             (None, None)
458         };
459 
460         let (class, subclass) = match VirtioDeviceType::from(locked_device.device_type()) {
461             VirtioDeviceType::Net => (
462                 PciClassCode::NetworkController,
463                 &PciNetworkControllerSubclass::EthernetController as &dyn PciSubclass,
464             ),
465             VirtioDeviceType::Block => (
466                 PciClassCode::MassStorage,
467                 &PciMassStorageSubclass::MassStorage as &dyn PciSubclass,
468             ),
469             _ => (
470                 PciClassCode::Other,
471                 &PciVirtioSubclass::NonTransitionalBase as &dyn PciSubclass,
472             ),
473         };
474 
475         let pci_configuration_state =
476             vm_migration::versioned_state_from_id(snapshot.as_ref(), pci::PCI_CONFIGURATION_ID)
477                 .map_err(|e| {
478                     VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
479                         "Failed to get PciConfigurationState from Snapshot: {}",
480                         e
481                     ))
482                 })?;
483 
484         let configuration = PciConfiguration::new(
485             VIRTIO_PCI_VENDOR_ID,
486             pci_device_id,
487             0x1, // For modern virtio-PCI devices
488             class,
489             subclass,
490             None,
491             PciHeaderType::Device,
492             VIRTIO_PCI_VENDOR_ID,
493             pci_device_id,
494             msix_config_clone,
495             pci_configuration_state,
496         );
497 
498         let common_config_state =
499             vm_migration::versioned_state_from_id(snapshot.as_ref(), VIRTIO_PCI_COMMON_CONFIG_ID)
500                 .map_err(|e| {
501                 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
502                     "Failed to get VirtioPciCommonConfigState from Snapshot: {}",
503                     e
504                 ))
505             })?;
506 
507         let common_config = if let Some(common_config_state) = common_config_state {
508             VirtioPciCommonConfig::new(common_config_state, access_platform)
509         } else {
510             VirtioPciCommonConfig::new(
511                 VirtioPciCommonConfigState {
512                     driver_status: 0,
513                     config_generation: 0,
514                     device_feature_select: 0,
515                     driver_feature_select: 0,
516                     queue_select: 0,
517                     msix_config: VIRTQ_MSI_NO_VECTOR,
518                     msix_queues: vec![VIRTQ_MSI_NO_VECTOR; num_queues],
519                 },
520                 access_platform,
521             )
522         };
523 
524         let state: Option<VirtioPciDeviceState> = snapshot
525             .as_ref()
526             .map(|s| s.to_versioned_state())
527             .transpose()
528             .map_err(|e| {
529                 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
530                     "Failed to get VirtioPciDeviceState from Snapshot: {}",
531                     e
532                 ))
533             })?;
534 
535         let (device_activated, interrupt_status, cap_pci_cfg_info) = if let Some(state) = state {
536             // Update virtqueues indexes for both available and used rings.
537             for (i, queue) in queues.iter_mut().enumerate() {
538                 queue.set_size(state.queues[i].size);
539                 queue.set_ready(state.queues[i].ready);
540                 queue
541                     .try_set_desc_table_address(GuestAddress(state.queues[i].desc_table))
542                     .unwrap();
543                 queue
544                     .try_set_avail_ring_address(GuestAddress(state.queues[i].avail_ring))
545                     .unwrap();
546                 queue
547                     .try_set_used_ring_address(GuestAddress(state.queues[i].used_ring))
548                     .unwrap();
549                 queue.set_next_avail(
550                     queue
551                         .used_idx(memory.memory().deref(), Ordering::Acquire)
552                         .unwrap()
553                         .0,
554                 );
555                 queue.set_next_used(
556                     queue
557                         .used_idx(memory.memory().deref(), Ordering::Acquire)
558                         .unwrap()
559                         .0,
560                 );
561             }
562 
563             (
564                 state.device_activated,
565                 state.interrupt_status,
566                 VirtioPciCfgCapInfo {
567                     offset: state.cap_pci_cfg_offset,
568                     cap: *VirtioPciCfgCap::from_slice(&state.cap_pci_cfg).unwrap(),
569                 },
570             )
571         } else {
572             (false, 0, VirtioPciCfgCapInfo::default())
573         };
574 
575         // Dropping the MutexGuard to unlock the VirtioDevice. This is required
576         // in the context of a restore given the device might require some
577         // activation, meaning it will require locking. Dropping the lock
578         // prevents from a subtle deadlock.
579         std::mem::drop(locked_device);
580 
581         let mut virtio_pci_device = VirtioPciDevice {
582             id,
583             configuration,
584             common_config,
585             msix_config,
586             msix_num,
587             device,
588             device_activated: Arc::new(AtomicBool::new(device_activated)),
589             interrupt_status: Arc::new(AtomicUsize::new(interrupt_status)),
590             virtio_interrupt: None,
591             queues,
592             queue_evts,
593             memory,
594             settings_bar: 0,
595             use_64bit_bar,
596             interrupt_source_group,
597             cap_pci_cfg_info,
598             bar_regions: vec![],
599             activate_evt,
600             dma_handler,
601             pending_activations,
602         };
603 
604         if let Some(msix_config) = &virtio_pci_device.msix_config {
605             virtio_pci_device.virtio_interrupt = Some(Arc::new(VirtioInterruptMsix::new(
606                 msix_config.clone(),
607                 virtio_pci_device.common_config.msix_config.clone(),
608                 virtio_pci_device.common_config.msix_queues.clone(),
609                 virtio_pci_device.interrupt_source_group.clone(),
610             )));
611         }
612 
613         // In case of a restore, we can activate the device, as we know at
614         // this point the virtqueues are in the right state and the device is
615         // ready to be activated, which will spawn each virtio worker thread.
616         if virtio_pci_device.device_activated.load(Ordering::SeqCst)
617             && virtio_pci_device.is_driver_ready()
618         {
619             virtio_pci_device.activate().map_err(|e| {
620                 VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
621                     "Failed activating the device: {}",
622                     e
623                 ))
624             })?;
625         }
626 
627         Ok(virtio_pci_device)
628     }
629 
630     fn state(&self) -> VirtioPciDeviceState {
631         VirtioPciDeviceState {
632             device_activated: self.device_activated.load(Ordering::Acquire),
633             interrupt_status: self.interrupt_status.load(Ordering::Acquire),
634             queues: self
635                 .queues
636                 .iter()
637                 .map(|q| QueueState {
638                     max_size: q.max_size(),
639                     size: q.size(),
640                     ready: q.ready(),
641                     desc_table: q.desc_table(),
642                     avail_ring: q.avail_ring(),
643                     used_ring: q.used_ring(),
644                 })
645                 .collect(),
646             cap_pci_cfg_offset: self.cap_pci_cfg_info.offset,
647             cap_pci_cfg: self.cap_pci_cfg_info.cap.bytes().to_vec(),
648         }
649     }
650 
651     /// Gets the list of queue events that must be triggered whenever the VM writes to
652     /// `virtio::NOTIFY_REG_OFFSET` past the MMIO base. Each event must be triggered when the
653     /// value being written equals the index of the event in this list.
654     fn queue_evts(&self) -> &[EventFd] {
655         self.queue_evts.as_slice()
656     }
657 
658     fn is_driver_ready(&self) -> bool {
659         let ready_bits =
660             (DEVICE_ACKNOWLEDGE | DEVICE_DRIVER | DEVICE_DRIVER_OK | DEVICE_FEATURES_OK) as u8;
661         self.common_config.driver_status == ready_bits
662             && self.common_config.driver_status & DEVICE_FAILED as u8 == 0
663     }
664 
665     /// Determines if the driver has requested the device (re)init / reset itself
666     fn is_driver_init(&self) -> bool {
667         self.common_config.driver_status == DEVICE_INIT as u8
668     }
669 
670     pub fn config_bar_addr(&self) -> u64 {
671         self.configuration.get_bar_addr(self.settings_bar as usize)
672     }
673 
674     fn add_pci_capabilities(
675         &mut self,
676         settings_bar: u8,
677     ) -> std::result::Result<(), PciDeviceError> {
678         // Add pointers to the different configuration structures from the PCI capabilities.
679         let common_cap = VirtioPciCap::new(
680             PciCapabilityType::Common,
681             settings_bar,
682             COMMON_CONFIG_BAR_OFFSET as u32,
683             COMMON_CONFIG_SIZE as u32,
684         );
685         self.configuration
686             .add_capability(&common_cap)
687             .map_err(PciDeviceError::CapabilitiesSetup)?;
688 
689         let isr_cap = VirtioPciCap::new(
690             PciCapabilityType::Isr,
691             settings_bar,
692             ISR_CONFIG_BAR_OFFSET as u32,
693             ISR_CONFIG_SIZE as u32,
694         );
695         self.configuration
696             .add_capability(&isr_cap)
697             .map_err(PciDeviceError::CapabilitiesSetup)?;
698 
699         // TODO(dgreid) - set based on device's configuration size?
700         let device_cap = VirtioPciCap::new(
701             PciCapabilityType::Device,
702             settings_bar,
703             DEVICE_CONFIG_BAR_OFFSET as u32,
704             DEVICE_CONFIG_SIZE as u32,
705         );
706         self.configuration
707             .add_capability(&device_cap)
708             .map_err(PciDeviceError::CapabilitiesSetup)?;
709 
710         let notify_cap = VirtioPciNotifyCap::new(
711             PciCapabilityType::Notify,
712             settings_bar,
713             NOTIFICATION_BAR_OFFSET as u32,
714             NOTIFICATION_SIZE as u32,
715             Le32::from(NOTIFY_OFF_MULTIPLIER),
716         );
717         self.configuration
718             .add_capability(&notify_cap)
719             .map_err(PciDeviceError::CapabilitiesSetup)?;
720 
721         let configuration_cap = VirtioPciCfgCap::new();
722         self.cap_pci_cfg_info.offset = self
723             .configuration
724             .add_capability(&configuration_cap)
725             .map_err(PciDeviceError::CapabilitiesSetup)?
726             + VIRTIO_PCI_CAP_OFFSET;
727         self.cap_pci_cfg_info.cap = configuration_cap;
728 
729         if self.msix_config.is_some() {
730             let msix_cap = MsixCap::new(
731                 settings_bar,
732                 self.msix_num,
733                 MSIX_TABLE_BAR_OFFSET as u32,
734                 settings_bar,
735                 MSIX_PBA_BAR_OFFSET as u32,
736             );
737             self.configuration
738                 .add_capability(&msix_cap)
739                 .map_err(PciDeviceError::CapabilitiesSetup)?;
740         }
741 
742         self.settings_bar = settings_bar;
743         Ok(())
744     }
745 
746     fn read_cap_pci_cfg(&mut self, offset: usize, mut data: &mut [u8]) {
747         let cap_slice = self.cap_pci_cfg_info.cap.as_slice();
748         let data_len = data.len();
749         let cap_len = cap_slice.len();
750         if offset + data_len > cap_len {
751             error!("Failed to read cap_pci_cfg from config space");
752             return;
753         }
754 
755         if offset < std::mem::size_of::<VirtioPciCap>() {
756             if let Some(end) = offset.checked_add(data_len) {
757                 // This write can't fail, offset and end are checked against config_len.
758                 data.write_all(&cap_slice[offset..cmp::min(end, cap_len)])
759                     .unwrap();
760             }
761         } else {
762             let bar_offset: u32 =
763                 // SAFETY: we know self.cap_pci_cfg_info.cap.cap.offset is 32bits long.
764                 unsafe { std::mem::transmute(self.cap_pci_cfg_info.cap.cap.offset) };
765             self.read_bar(0, bar_offset as u64, data)
766         }
767     }
768 
769     fn write_cap_pci_cfg(&mut self, offset: usize, data: &[u8]) -> Option<Arc<Barrier>> {
770         let cap_slice = self.cap_pci_cfg_info.cap.as_mut_slice();
771         let data_len = data.len();
772         let cap_len = cap_slice.len();
773         if offset + data_len > cap_len {
774             error!("Failed to write cap_pci_cfg to config space");
775             return None;
776         }
777 
778         if offset < std::mem::size_of::<VirtioPciCap>() {
779             let (_, right) = cap_slice.split_at_mut(offset);
780             right[..data_len].copy_from_slice(data);
781             None
782         } else {
783             let bar_offset: u32 =
784                 // SAFETY: we know self.cap_pci_cfg_info.cap.cap.offset is 32bits long.
785                 unsafe { std::mem::transmute(self.cap_pci_cfg_info.cap.cap.offset) };
786             self.write_bar(0, bar_offset as u64, data)
787         }
788     }
789 
790     pub fn virtio_device(&self) -> Arc<Mutex<dyn VirtioDevice>> {
791         self.device.clone()
792     }
793 
794     fn prepare_activator(&mut self, barrier: Option<Arc<Barrier>>) -> VirtioPciDeviceActivator {
795         let mut queues = Vec::new();
796 
797         for (queue_index, queue) in self.queues.iter().enumerate() {
798             if !queue.ready() {
799                 continue;
800             }
801 
802             if !queue.is_valid(self.memory.memory().deref()) {
803                 error!("Queue {} is not valid", queue_index);
804             }
805 
806             queues.push((
807                 queue_index,
808                 vm_virtio::clone_queue(queue),
809                 self.queue_evts[queue_index].try_clone().unwrap(),
810             ));
811         }
812 
813         VirtioPciDeviceActivator {
814             interrupt: self.virtio_interrupt.take(),
815             memory: Some(self.memory.clone()),
816             device: self.device.clone(),
817             queues: Some(queues),
818             device_activated: self.device_activated.clone(),
819             barrier,
820             id: self.id.clone(),
821         }
822     }
823 
824     fn activate(&mut self) -> ActivateResult {
825         self.prepare_activator(None).activate()
826     }
827 
828     fn needs_activation(&self) -> bool {
829         !self.device_activated.load(Ordering::SeqCst) && self.is_driver_ready()
830     }
831 
832     pub fn dma_handler(&self) -> Option<&Arc<dyn ExternalDmaMapping>> {
833         self.dma_handler.as_ref()
834     }
835 }
836 
837 impl VirtioTransport for VirtioPciDevice {
838     fn ioeventfds(&self, base_addr: u64) -> Vec<(&EventFd, u64)> {
839         let notify_base = base_addr + NOTIFICATION_BAR_OFFSET;
840         self.queue_evts()
841             .iter()
842             .enumerate()
843             .map(|(i, event)| {
844                 (
845                     event,
846                     notify_base + i as u64 * u64::from(NOTIFY_OFF_MULTIPLIER),
847                 )
848             })
849             .collect()
850     }
851 }
852 
853 pub struct VirtioInterruptMsix {
854     msix_config: Arc<Mutex<MsixConfig>>,
855     config_vector: Arc<AtomicU16>,
856     queues_vectors: Arc<Mutex<Vec<u16>>>,
857     interrupt_source_group: Arc<dyn InterruptSourceGroup>,
858 }
859 
860 impl VirtioInterruptMsix {
861     pub fn new(
862         msix_config: Arc<Mutex<MsixConfig>>,
863         config_vector: Arc<AtomicU16>,
864         queues_vectors: Arc<Mutex<Vec<u16>>>,
865         interrupt_source_group: Arc<dyn InterruptSourceGroup>,
866     ) -> Self {
867         VirtioInterruptMsix {
868             msix_config,
869             config_vector,
870             queues_vectors,
871             interrupt_source_group,
872         }
873     }
874 }
875 
876 impl VirtioInterrupt for VirtioInterruptMsix {
877     fn trigger(&self, int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> {
878         let vector = match int_type {
879             VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire),
880             VirtioInterruptType::Queue(queue_index) => {
881                 self.queues_vectors.lock().unwrap()[queue_index as usize]
882             }
883         };
884 
885         if vector == VIRTQ_MSI_NO_VECTOR {
886             return Ok(());
887         }
888 
889         let config = &mut self.msix_config.lock().unwrap();
890         let entry = &config.table_entries[vector as usize];
891         // In case the vector control register associated with the entry
892         // has its first bit set, this means the vector is masked and the
893         // device should not inject the interrupt.
894         // Instead, the Pending Bit Array table is updated to reflect there
895         // is a pending interrupt for this specific vector.
896         if config.masked() || entry.masked() {
897             config.set_pba_bit(vector, false);
898             return Ok(());
899         }
900 
901         self.interrupt_source_group
902             .trigger(vector as InterruptIndex)
903     }
904 
905     fn notifier(&self, int_type: VirtioInterruptType) -> Option<EventFd> {
906         let vector = match int_type {
907             VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire),
908             VirtioInterruptType::Queue(queue_index) => {
909                 self.queues_vectors.lock().unwrap()[queue_index as usize]
910             }
911         };
912 
913         self.interrupt_source_group
914             .notifier(vector as InterruptIndex)
915     }
916 }
917 
918 impl PciDevice for VirtioPciDevice {
919     fn write_config_register(
920         &mut self,
921         reg_idx: usize,
922         offset: u64,
923         data: &[u8],
924     ) -> Option<Arc<Barrier>> {
925         // Handle the special case where the capability VIRTIO_PCI_CAP_PCI_CFG
926         // is accessed. This capability has a special meaning as it allows the
927         // guest to access other capabilities without mapping the PCI BAR.
928         let base = reg_idx * 4;
929         if base + offset as usize >= self.cap_pci_cfg_info.offset
930             && base + offset as usize + data.len()
931                 <= self.cap_pci_cfg_info.offset + self.cap_pci_cfg_info.cap.bytes().len()
932         {
933             let offset = base + offset as usize - self.cap_pci_cfg_info.offset;
934             self.write_cap_pci_cfg(offset, data)
935         } else {
936             self.configuration
937                 .write_config_register(reg_idx, offset, data);
938             None
939         }
940     }
941 
942     fn read_config_register(&mut self, reg_idx: usize) -> u32 {
943         // Handle the special case where the capability VIRTIO_PCI_CAP_PCI_CFG
944         // is accessed. This capability has a special meaning as it allows the
945         // guest to access other capabilities without mapping the PCI BAR.
946         let base = reg_idx * 4;
947         if base >= self.cap_pci_cfg_info.offset
948             && base + 4 <= self.cap_pci_cfg_info.offset + self.cap_pci_cfg_info.cap.bytes().len()
949         {
950             let offset = base - self.cap_pci_cfg_info.offset;
951             let mut data = [0u8; 4];
952             self.read_cap_pci_cfg(offset, &mut data);
953             u32::from_le_bytes(data)
954         } else {
955             self.configuration.read_reg(reg_idx)
956         }
957     }
958 
959     fn detect_bar_reprogramming(
960         &mut self,
961         reg_idx: usize,
962         data: &[u8],
963     ) -> Option<BarReprogrammingParams> {
964         self.configuration.detect_bar_reprogramming(reg_idx, data)
965     }
966 
967     fn allocate_bars(
968         &mut self,
969         _allocator: &Arc<Mutex<SystemAllocator>>,
970         mmio32_allocator: &mut AddressAllocator,
971         mmio64_allocator: &mut AddressAllocator,
972         resources: Option<Vec<Resource>>,
973     ) -> std::result::Result<Vec<PciBarConfiguration>, PciDeviceError> {
974         let mut bars = Vec::new();
975         let device_clone = self.device.clone();
976         let device = device_clone.lock().unwrap();
977 
978         let mut settings_bar_addr = None;
979         let mut use_64bit_bar = self.use_64bit_bar;
980         let restoring = resources.is_some();
981         if let Some(resources) = resources {
982             for resource in resources {
983                 if let Resource::PciBar {
984                     index, base, type_, ..
985                 } = resource
986                 {
987                     if index == VIRTIO_COMMON_BAR_INDEX {
988                         settings_bar_addr = Some(GuestAddress(base));
989                         use_64bit_bar = match type_ {
990                             PciBarType::Io => {
991                                 return Err(PciDeviceError::InvalidResource(resource))
992                             }
993                             PciBarType::Mmio32 => false,
994                             PciBarType::Mmio64 => true,
995                         };
996                         break;
997                     }
998                 }
999             }
1000             // Error out if no resource was matching the BAR id.
1001             if settings_bar_addr.is_none() {
1002                 return Err(PciDeviceError::MissingResource);
1003             }
1004         }
1005 
1006         // Allocate the virtio-pci capability BAR.
1007         // See http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-740004
1008         let (virtio_pci_bar_addr, region_type) = if use_64bit_bar {
1009             let region_type = PciBarRegionType::Memory64BitRegion;
1010             let addr = mmio64_allocator
1011                 .allocate(
1012                     settings_bar_addr,
1013                     CAPABILITY_BAR_SIZE,
1014                     Some(CAPABILITY_BAR_SIZE),
1015                 )
1016                 .ok_or(PciDeviceError::IoAllocationFailed(CAPABILITY_BAR_SIZE))?;
1017             (addr, region_type)
1018         } else {
1019             let region_type = PciBarRegionType::Memory32BitRegion;
1020             let addr = mmio32_allocator
1021                 .allocate(
1022                     settings_bar_addr,
1023                     CAPABILITY_BAR_SIZE,
1024                     Some(CAPABILITY_BAR_SIZE),
1025                 )
1026                 .ok_or(PciDeviceError::IoAllocationFailed(CAPABILITY_BAR_SIZE))?;
1027             (addr, region_type)
1028         };
1029 
1030         let bar = PciBarConfiguration::default()
1031             .set_index(VIRTIO_COMMON_BAR_INDEX)
1032             .set_address(virtio_pci_bar_addr.raw_value())
1033             .set_size(CAPABILITY_BAR_SIZE)
1034             .set_region_type(region_type);
1035 
1036         // The creation of the PCI BAR and its associated capabilities must
1037         // happen only during the creation of a brand new VM. When a VM is
1038         // restored from a known state, the BARs are already created with the
1039         // right content, therefore we don't need to go through this codepath.
1040         if !restoring {
1041             self.configuration.add_pci_bar(&bar).map_err(|e| {
1042                 PciDeviceError::IoRegistrationFailed(virtio_pci_bar_addr.raw_value(), e)
1043             })?;
1044 
1045             // Once the BARs are allocated, the capabilities can be added to the PCI configuration.
1046             self.add_pci_capabilities(VIRTIO_COMMON_BAR_INDEX as u8)?;
1047         }
1048 
1049         bars.push(bar);
1050 
1051         // Allocate a dedicated BAR if there are some shared memory regions.
1052         if let Some(shm_list) = device.get_shm_regions() {
1053             let bar = PciBarConfiguration::default()
1054                 .set_index(VIRTIO_SHM_BAR_INDEX)
1055                 .set_address(shm_list.addr.raw_value())
1056                 .set_size(shm_list.len);
1057 
1058             // The creation of the PCI BAR and its associated capabilities must
1059             // happen only during the creation of a brand new VM. When a VM is
1060             // restored from a known state, the BARs are already created with the
1061             // right content, therefore we don't need to go through this codepath.
1062             if !restoring {
1063                 self.configuration.add_pci_bar(&bar).map_err(|e| {
1064                     PciDeviceError::IoRegistrationFailed(shm_list.addr.raw_value(), e)
1065                 })?;
1066 
1067                 for (idx, shm) in shm_list.region_list.iter().enumerate() {
1068                     let shm_cap = VirtioPciCap64::new(
1069                         PciCapabilityType::SharedMemory,
1070                         VIRTIO_SHM_BAR_INDEX as u8,
1071                         idx as u8,
1072                         shm.offset,
1073                         shm.len,
1074                     );
1075                     self.configuration
1076                         .add_capability(&shm_cap)
1077                         .map_err(PciDeviceError::CapabilitiesSetup)?;
1078                 }
1079             }
1080 
1081             bars.push(bar);
1082         }
1083 
1084         self.bar_regions.clone_from(&bars);
1085 
1086         Ok(bars)
1087     }
1088 
1089     fn free_bars(
1090         &mut self,
1091         _allocator: &mut SystemAllocator,
1092         mmio32_allocator: &mut AddressAllocator,
1093         mmio64_allocator: &mut AddressAllocator,
1094     ) -> std::result::Result<(), PciDeviceError> {
1095         for bar in self.bar_regions.drain(..) {
1096             match bar.region_type() {
1097                 PciBarRegionType::Memory32BitRegion => {
1098                     mmio32_allocator.free(GuestAddress(bar.addr()), bar.size());
1099                 }
1100                 PciBarRegionType::Memory64BitRegion => {
1101                     mmio64_allocator.free(GuestAddress(bar.addr()), bar.size());
1102                 }
1103                 _ => error!("Unexpected PCI bar type"),
1104             }
1105         }
1106         Ok(())
1107     }
1108 
1109     fn move_bar(
1110         &mut self,
1111         old_base: u64,
1112         new_base: u64,
1113     ) -> std::result::Result<(), std::io::Error> {
1114         // We only update our idea of the bar in order to support free_bars() above.
1115         // The majority of the reallocation is done inside DeviceManager.
1116         for bar in self.bar_regions.iter_mut() {
1117             if bar.addr() == old_base {
1118                 *bar = bar.set_address(new_base);
1119             }
1120         }
1121 
1122         Ok(())
1123     }
1124 
1125     fn read_bar(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
1126         match offset {
1127             o if o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => self.common_config.read(
1128                 o - COMMON_CONFIG_BAR_OFFSET,
1129                 data,
1130                 &self.queues,
1131                 self.device.clone(),
1132             ),
1133             o if (ISR_CONFIG_BAR_OFFSET..ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE).contains(&o) => {
1134                 if let Some(v) = data.get_mut(0) {
1135                     // Reading this register resets it to 0.
1136                     *v = self.interrupt_status.swap(0, Ordering::AcqRel) as u8;
1137                 }
1138             }
1139             o if (DEVICE_CONFIG_BAR_OFFSET..DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE)
1140                 .contains(&o) =>
1141             {
1142                 let device = self.device.lock().unwrap();
1143                 device.read_config(o - DEVICE_CONFIG_BAR_OFFSET, data);
1144             }
1145             o if (NOTIFICATION_BAR_OFFSET..NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE)
1146                 .contains(&o) =>
1147             {
1148                 // Handled with ioeventfds.
1149             }
1150             o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => {
1151                 if let Some(msix_config) = &self.msix_config {
1152                     msix_config
1153                         .lock()
1154                         .unwrap()
1155                         .read_table(o - MSIX_TABLE_BAR_OFFSET, data);
1156                 }
1157             }
1158             o if (MSIX_PBA_BAR_OFFSET..MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE).contains(&o) => {
1159                 if let Some(msix_config) = &self.msix_config {
1160                     msix_config
1161                         .lock()
1162                         .unwrap()
1163                         .read_pba(o - MSIX_PBA_BAR_OFFSET, data);
1164                 }
1165             }
1166             _ => (),
1167         }
1168     }
1169 
1170     fn write_bar(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
1171         match offset {
1172             o if o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => self.common_config.write(
1173                 o - COMMON_CONFIG_BAR_OFFSET,
1174                 data,
1175                 &mut self.queues,
1176                 self.device.clone(),
1177             ),
1178             o if (ISR_CONFIG_BAR_OFFSET..ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE).contains(&o) => {
1179                 if let Some(v) = data.first() {
1180                     self.interrupt_status
1181                         .fetch_and(!(*v as usize), Ordering::AcqRel);
1182                 }
1183             }
1184             o if (DEVICE_CONFIG_BAR_OFFSET..DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE)
1185                 .contains(&o) =>
1186             {
1187                 let mut device = self.device.lock().unwrap();
1188                 device.write_config(o - DEVICE_CONFIG_BAR_OFFSET, data);
1189             }
1190             o if (NOTIFICATION_BAR_OFFSET..NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE)
1191                 .contains(&o) =>
1192             {
1193                 #[cfg(feature = "sev_snp")]
1194                 for (_event, _addr) in self.ioeventfds(_base) {
1195                     if _addr == _base + offset {
1196                         _event.write(1).unwrap();
1197                     }
1198                 }
1199                 // Handled with ioeventfds.
1200                 #[cfg(not(feature = "sev_snp"))]
1201                 error!("Unexpected write to notification BAR: offset = 0x{:x}", o);
1202             }
1203             o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => {
1204                 if let Some(msix_config) = &self.msix_config {
1205                     msix_config
1206                         .lock()
1207                         .unwrap()
1208                         .write_table(o - MSIX_TABLE_BAR_OFFSET, data);
1209                 }
1210             }
1211             o if (MSIX_PBA_BAR_OFFSET..MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE).contains(&o) => {
1212                 if let Some(msix_config) = &self.msix_config {
1213                     msix_config
1214                         .lock()
1215                         .unwrap()
1216                         .write_pba(o - MSIX_PBA_BAR_OFFSET, data);
1217                 }
1218             }
1219             _ => (),
1220         };
1221 
1222         // Try and activate the device if the driver status has changed
1223         if self.needs_activation() {
1224             let barrier = Arc::new(Barrier::new(2));
1225             let activator = self.prepare_activator(Some(barrier.clone()));
1226             self.pending_activations.lock().unwrap().push(activator);
1227             info!(
1228                 "{}: Needs activation; writing to activate event fd",
1229                 self.id
1230             );
1231             self.activate_evt.write(1).ok();
1232             info!("{}: Needs activation; returning barrier", self.id);
1233             return Some(barrier);
1234         }
1235 
1236         // Device has been reset by the driver
1237         if self.device_activated.load(Ordering::SeqCst) && self.is_driver_init() {
1238             let mut device = self.device.lock().unwrap();
1239             if let Some(virtio_interrupt) = device.reset() {
1240                 // Upon reset the device returns its interrupt EventFD
1241                 self.virtio_interrupt = Some(virtio_interrupt);
1242                 self.device_activated.store(false, Ordering::SeqCst);
1243 
1244                 // Reset queue readiness (changes queue_enable), queue sizes
1245                 // and selected_queue as per spec for reset
1246                 self.queues.iter_mut().for_each(Queue::reset);
1247                 self.common_config.queue_select = 0;
1248             } else {
1249                 error!("Attempt to reset device when not implemented in underlying device");
1250                 self.common_config.driver_status = crate::DEVICE_FAILED as u8;
1251             }
1252         }
1253 
1254         None
1255     }
1256 
1257     fn as_any(&mut self) -> &mut dyn Any {
1258         self
1259     }
1260 
1261     fn id(&self) -> Option<String> {
1262         Some(self.id.clone())
1263     }
1264 }
1265 
1266 impl BusDevice for VirtioPciDevice {
1267     fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) {
1268         self.read_bar(base, offset, data)
1269     }
1270 
1271     fn write(&mut self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
1272         self.write_bar(base, offset, data)
1273     }
1274 }
1275 
1276 impl Pausable for VirtioPciDevice {
1277     fn pause(&mut self) -> std::result::Result<(), MigratableError> {
1278         Ok(())
1279     }
1280 
1281     fn resume(&mut self) -> std::result::Result<(), MigratableError> {
1282         Ok(())
1283     }
1284 }
1285 
1286 impl Snapshottable for VirtioPciDevice {
1287     fn id(&self) -> String {
1288         self.id.clone()
1289     }
1290 
1291     fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
1292         let mut virtio_pci_dev_snapshot = Snapshot::new_from_versioned_state(&self.state())?;
1293 
1294         // Snapshot PciConfiguration
1295         virtio_pci_dev_snapshot
1296             .add_snapshot(self.configuration.id(), self.configuration.snapshot()?);
1297 
1298         // Snapshot VirtioPciCommonConfig
1299         virtio_pci_dev_snapshot
1300             .add_snapshot(self.common_config.id(), self.common_config.snapshot()?);
1301 
1302         // Snapshot MSI-X
1303         if let Some(msix_config) = &self.msix_config {
1304             let mut msix_config = msix_config.lock().unwrap();
1305             virtio_pci_dev_snapshot.add_snapshot(msix_config.id(), msix_config.snapshot()?);
1306         }
1307 
1308         Ok(virtio_pci_dev_snapshot)
1309     }
1310 }
1311 impl Transportable for VirtioPciDevice {}
1312 impl Migratable for VirtioPciDevice {}
1313