xref: /cloud-hypervisor/virtio-devices/src/transport/pci_common_config.rs (revision 4ad44caa5217cf55eed512fc10fd68416a37d31c)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE-BSD-3-Clause file.
4 //
5 // Copyright © 2019 Intel Corporation
6 //
7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
8 
9 use crate::VirtioDevice;
10 use byteorder::{ByteOrder, LittleEndian};
11 use serde::{Deserialize, Serialize};
12 use std::sync::atomic::{AtomicU16, Ordering};
13 use std::sync::{Arc, Mutex};
14 use virtio_queue::{Queue, QueueT};
15 use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable};
16 use vm_virtio::AccessPlatform;
17 
18 pub const VIRTIO_PCI_COMMON_CONFIG_ID: &str = "virtio_pci_common_config";
19 
20 #[derive(Clone, Serialize, Deserialize)]
21 pub struct VirtioPciCommonConfigState {
22     pub driver_status: u8,
23     pub config_generation: u8,
24     pub device_feature_select: u32,
25     pub driver_feature_select: u32,
26     pub queue_select: u16,
27     pub msix_config: u16,
28     pub msix_queues: Vec<u16>,
29 }
30 
31 /* The standard layout for the ring is a continuous chunk of memory which looks
32  * like this.  We assume num is a power of 2.
33  *
34  * struct vring
35  * {
36  *	// The actual descriptors (16 bytes each)
37  *	struct vring_desc desc[num];
38  *
39  *	// A ring of available descriptor heads with free-running index.
40  *	__virtio16 avail_flags;
41  *	__virtio16 avail_idx;
42  *	__virtio16 available[num];
43  *	__virtio16 used_event_idx;
44  *
45  *	// Padding to the next align boundary.
46  *	char pad[];
47  *
48  *	// A ring of used descriptor heads with free-running index.
49  *	__virtio16 used_flags;
50  *	__virtio16 used_idx;
51  *	struct vring_used_elem used[num];
52  *	__virtio16 avail_event_idx;
53  * };
54  * struct vring_desc {
55  *	__virtio64 addr;
56  *	__virtio32 len;
57  *	__virtio16 flags;
58  *	__virtio16 next;
59  * };
60  *
61  * struct vring_avail {
62  *	__virtio16 flags;
63  *	__virtio16 idx;
64  *	__virtio16 ring[];
65  * };
66  *
67  * // u32 is used here for ids for padding reasons.
68  * struct vring_used_elem {
69  *	// Index of start of used descriptor chain.
70  *	__virtio32 id;
71  *	// Total length of the descriptor chain which was used (written to)
72  *	__virtio32 len;
73  * };
74 *
75  * Kernel header used for this reference: include/uapi/linux/virtio_ring.h
76  * Virtio Spec: https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html
77  *
78  */
79 const VRING_DESC_ELEMENT_SIZE: usize = 16;
80 const VRING_AVAIL_ELEMENT_SIZE: usize = 2;
81 const VRING_USED_ELEMENT_SIZE: usize = 8;
82 pub enum VringType {
83     Desc,
84     Avail,
85     Used,
86 }
87 
88 pub fn get_vring_size(t: VringType, queue_size: u16) -> u64 {
89     let (length_except_ring, element_size) = match t {
90         VringType::Desc => (0, VRING_DESC_ELEMENT_SIZE),
91         VringType::Avail => (6, VRING_AVAIL_ELEMENT_SIZE),
92         VringType::Used => (6, VRING_USED_ELEMENT_SIZE),
93     };
94     (length_except_ring + element_size * queue_size as usize) as u64
95 }
96 
97 /// Contains the data for reading and writing the common configuration structure of a virtio PCI
98 /// device.
99 ///
100 /// * Registers:
101 ///
102 /// ** About the whole device.
103 ///    le32 device_feature_select;     // 0x00 // read-write
104 ///    le32 device_feature;            // 0x04 // read-only for driver
105 ///    le32 driver_feature_select;     // 0x08 // read-write
106 ///    le32 driver_feature;            // 0x0C // read-write
107 ///    le16 msix_config;               // 0x10 // read-write
108 ///    le16 num_queues;                // 0x12 // read-only for driver
109 ///    u8 device_status;               // 0x14 // read-write (driver_status)
110 ///    u8 config_generation;           // 0x15 // read-only for driver
111 ///
112 /// ** About a specific virtqueue.
113 ///    le16 queue_select;              // 0x16 // read-write
114 ///    le16 queue_size;                // 0x18 // read-write, power of 2, or 0.
115 ///    le16 queue_msix_vector;         // 0x1A // read-write
116 ///    le16 queue_enable;              // 0x1C // read-write (Ready)
117 ///    le16 queue_notify_off;          // 0x1E // read-only for driver
118 ///    le64 queue_desc;                // 0x20 // read-write
119 ///    le64 queue_avail;               // 0x28 // read-write
120 ///    le64 queue_used;                // 0x30 // read-write
121 pub struct VirtioPciCommonConfig {
122     pub access_platform: Option<Arc<dyn AccessPlatform>>,
123     pub driver_status: u8,
124     pub config_generation: u8,
125     pub device_feature_select: u32,
126     pub driver_feature_select: u32,
127     pub queue_select: u16,
128     pub msix_config: Arc<AtomicU16>,
129     pub msix_queues: Arc<Mutex<Vec<u16>>>,
130 }
131 
132 impl VirtioPciCommonConfig {
133     pub fn new(
134         state: VirtioPciCommonConfigState,
135         access_platform: Option<Arc<dyn AccessPlatform>>,
136     ) -> Self {
137         VirtioPciCommonConfig {
138             access_platform,
139             driver_status: state.driver_status,
140             config_generation: state.config_generation,
141             device_feature_select: state.device_feature_select,
142             driver_feature_select: state.driver_feature_select,
143             queue_select: state.queue_select,
144             msix_config: Arc::new(AtomicU16::new(state.msix_config)),
145             msix_queues: Arc::new(Mutex::new(state.msix_queues)),
146         }
147     }
148 
149     fn state(&self) -> VirtioPciCommonConfigState {
150         VirtioPciCommonConfigState {
151             driver_status: self.driver_status,
152             config_generation: self.config_generation,
153             device_feature_select: self.device_feature_select,
154             driver_feature_select: self.driver_feature_select,
155             queue_select: self.queue_select,
156             msix_config: self.msix_config.load(Ordering::Acquire),
157             msix_queues: self.msix_queues.lock().unwrap().clone(),
158         }
159     }
160 
161     pub fn read(
162         &mut self,
163         offset: u64,
164         data: &mut [u8],
165         queues: &[Queue],
166         device: Arc<Mutex<dyn VirtioDevice>>,
167     ) {
168         assert!(data.len() <= 8);
169 
170         match data.len() {
171             1 => {
172                 let v = self.read_common_config_byte(offset);
173                 data[0] = v;
174             }
175             2 => {
176                 let v = self.read_common_config_word(offset, queues);
177                 LittleEndian::write_u16(data, v);
178             }
179             4 => {
180                 let v = self.read_common_config_dword(offset, device);
181                 LittleEndian::write_u32(data, v);
182             }
183             8 => {
184                 let v = self.read_common_config_qword(offset);
185                 LittleEndian::write_u64(data, v);
186             }
187             _ => error!("invalid data length for virtio read: len {}", data.len()),
188         }
189     }
190 
191     pub fn write(
192         &mut self,
193         offset: u64,
194         data: &[u8],
195         queues: &mut [Queue],
196         device: Arc<Mutex<dyn VirtioDevice>>,
197     ) {
198         assert!(data.len() <= 8);
199 
200         match data.len() {
201             1 => self.write_common_config_byte(offset, data[0]),
202             2 => self.write_common_config_word(offset, LittleEndian::read_u16(data), queues),
203             4 => {
204                 self.write_common_config_dword(offset, LittleEndian::read_u32(data), queues, device)
205             }
206             8 => self.write_common_config_qword(offset, LittleEndian::read_u64(data), queues),
207             _ => error!("invalid data length for virtio write: len {}", data.len()),
208         }
209     }
210 
211     fn read_common_config_byte(&self, offset: u64) -> u8 {
212         debug!("read_common_config_byte: offset 0x{:x}", offset);
213         // The driver is only allowed to do aligned, properly sized access.
214         match offset {
215             0x14 => self.driver_status,
216             0x15 => self.config_generation,
217             _ => {
218                 warn!("invalid virtio config byte read: 0x{:x}", offset);
219                 0
220             }
221         }
222     }
223 
224     fn write_common_config_byte(&mut self, offset: u64, value: u8) {
225         debug!("write_common_config_byte: offset 0x{:x}", offset);
226         match offset {
227             0x14 => self.driver_status = value,
228             _ => {
229                 warn!("invalid virtio config byte write: 0x{:x}", offset);
230             }
231         }
232     }
233 
234     fn read_common_config_word(&self, offset: u64, queues: &[Queue]) -> u16 {
235         debug!("read_common_config_word: offset 0x{:x}", offset);
236         match offset {
237             0x10 => self.msix_config.load(Ordering::Acquire),
238             0x12 => queues.len() as u16, // num_queues
239             0x16 => self.queue_select,
240             0x18 => self.with_queue(queues, |q| q.size()).unwrap_or(0),
241             0x1a => self.msix_queues.lock().unwrap()[self.queue_select as usize],
242             0x1c => u16::from(self.with_queue(queues, |q| q.ready()).unwrap_or(false)),
243             0x1e => self.queue_select, // notify_off
244             _ => {
245                 warn!("invalid virtio register word read: 0x{:x}", offset);
246                 0
247             }
248         }
249     }
250 
251     fn write_common_config_word(&mut self, offset: u64, value: u16, queues: &mut [Queue]) {
252         debug!("write_common_config_word: offset 0x{:x}", offset);
253         match offset {
254             0x10 => self.msix_config.store(value, Ordering::Release),
255             0x16 => self.queue_select = value,
256             0x18 => self.with_queue_mut(queues, |q| q.set_size(value)),
257             0x1a => self.msix_queues.lock().unwrap()[self.queue_select as usize] = value,
258             0x1c => self.with_queue_mut(queues, |q| {
259                 let ready = value == 1;
260                 q.set_ready(ready);
261                 // Translate address of descriptor table and vrings.
262                 if let Some(access_platform) = &self.access_platform {
263                     if ready {
264                         let desc_table = access_platform
265                             .translate_gva(
266                                 q.desc_table(),
267                                 get_vring_size(VringType::Desc, q.size()),
268                             )
269                             .unwrap();
270                         let avail_ring = access_platform
271                             .translate_gva(
272                                 q.avail_ring(),
273                                 get_vring_size(VringType::Avail, q.size()),
274                             )
275                             .unwrap();
276                         let used_ring = access_platform
277                             .translate_gva(q.used_ring(), get_vring_size(VringType::Used, q.size()))
278                             .unwrap();
279                         q.set_desc_table_address(
280                             Some((desc_table & 0xffff_ffff) as u32),
281                             Some((desc_table >> 32) as u32),
282                         );
283                         q.set_avail_ring_address(
284                             Some((avail_ring & 0xffff_ffff) as u32),
285                             Some((avail_ring >> 32) as u32),
286                         );
287                         q.set_used_ring_address(
288                             Some((used_ring & 0xffff_ffff) as u32),
289                             Some((used_ring >> 32) as u32),
290                         );
291                     }
292                 }
293             }),
294             _ => {
295                 warn!("invalid virtio register word write: 0x{:x}", offset);
296             }
297         }
298     }
299 
300     fn read_common_config_dword(&self, offset: u64, device: Arc<Mutex<dyn VirtioDevice>>) -> u32 {
301         debug!("read_common_config_dword: offset 0x{:x}", offset);
302         match offset {
303             0x00 => self.device_feature_select,
304             0x04 => {
305                 let locked_device = device.lock().unwrap();
306                 // Only 64 bits of features (2 pages) are defined for now, so limit
307                 // device_feature_select to avoid shifting by 64 or more bits.
308                 if self.device_feature_select < 2 {
309                     (locked_device.features() >> (self.device_feature_select * 32)) as u32
310                 } else {
311                     0
312                 }
313             }
314             0x08 => self.driver_feature_select,
315             _ => {
316                 warn!("invalid virtio register dword read: 0x{:x}", offset);
317                 0
318             }
319         }
320     }
321 
322     fn write_common_config_dword(
323         &mut self,
324         offset: u64,
325         value: u32,
326         queues: &mut [Queue],
327         device: Arc<Mutex<dyn VirtioDevice>>,
328     ) {
329         debug!("write_common_config_dword: offset 0x{:x}", offset);
330 
331         match offset {
332             0x00 => self.device_feature_select = value,
333             0x08 => self.driver_feature_select = value,
334             0x0c => {
335                 if self.driver_feature_select < 2 {
336                     let mut locked_device = device.lock().unwrap();
337                     locked_device
338                         .ack_features(u64::from(value) << (self.driver_feature_select * 32));
339                 } else {
340                     warn!(
341                         "invalid ack_features (page {}, value 0x{:x})",
342                         self.driver_feature_select, value
343                     );
344                 }
345             }
346             0x20 => self.with_queue_mut(queues, |q| q.set_desc_table_address(Some(value), None)),
347             0x24 => self.with_queue_mut(queues, |q| q.set_desc_table_address(None, Some(value))),
348             0x28 => self.with_queue_mut(queues, |q| q.set_avail_ring_address(Some(value), None)),
349             0x2c => self.with_queue_mut(queues, |q| q.set_avail_ring_address(None, Some(value))),
350             0x30 => self.with_queue_mut(queues, |q| q.set_used_ring_address(Some(value), None)),
351             0x34 => self.with_queue_mut(queues, |q| q.set_used_ring_address(None, Some(value))),
352             _ => {
353                 warn!("invalid virtio register dword write: 0x{:x}", offset);
354             }
355         }
356     }
357 
358     fn read_common_config_qword(&self, _offset: u64) -> u64 {
359         debug!("read_common_config_qword: offset 0x{:x}", _offset);
360         0 // Assume the guest has no reason to read write-only registers.
361     }
362 
363     fn write_common_config_qword(&mut self, offset: u64, value: u64, queues: &mut [Queue]) {
364         debug!("write_common_config_qword: offset 0x{:x}", offset);
365 
366         let low = Some((value & 0xffff_ffff) as u32);
367         let high = Some((value >> 32) as u32);
368 
369         match offset {
370             0x20 => self.with_queue_mut(queues, |q| q.set_desc_table_address(low, high)),
371             0x28 => self.with_queue_mut(queues, |q| q.set_avail_ring_address(low, high)),
372             0x30 => self.with_queue_mut(queues, |q| q.set_used_ring_address(low, high)),
373             _ => {
374                 warn!("invalid virtio register qword write: 0x{:x}", offset);
375             }
376         }
377     }
378 
379     fn with_queue<U, F>(&self, queues: &[Queue], f: F) -> Option<U>
380     where
381         F: FnOnce(&Queue) -> U,
382     {
383         queues.get(self.queue_select as usize).map(f)
384     }
385 
386     fn with_queue_mut<F: FnOnce(&mut Queue)>(&self, queues: &mut [Queue], f: F) {
387         if let Some(queue) = queues.get_mut(self.queue_select as usize) {
388             f(queue);
389         }
390     }
391 }
392 
393 impl Pausable for VirtioPciCommonConfig {}
394 
395 impl Snapshottable for VirtioPciCommonConfig {
396     fn id(&self) -> String {
397         String::from(VIRTIO_PCI_COMMON_CONFIG_ID)
398     }
399 
400     fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
401         Snapshot::new_from_state(&self.state())
402     }
403 }
404 
405 #[cfg(test)]
406 mod tests {
407     use super::*;
408     use crate::GuestMemoryMmap;
409     use crate::{ActivateResult, VirtioInterrupt};
410     use vm_memory::GuestMemoryAtomic;
411     use vmm_sys_util::eventfd::EventFd;
412 
413     struct DummyDevice(u32);
414     const QUEUE_SIZE: u16 = 256;
415     const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
416     const DUMMY_FEATURES: u64 = 0x5555_aaaa;
417     impl VirtioDevice for DummyDevice {
418         fn device_type(&self) -> u32 {
419             self.0
420         }
421         fn queue_max_sizes(&self) -> &[u16] {
422             QUEUE_SIZES
423         }
424         fn activate(
425             &mut self,
426             _mem: GuestMemoryAtomic<GuestMemoryMmap>,
427             _interrupt_evt: Arc<dyn VirtioInterrupt>,
428             _queues: Vec<(usize, Queue, EventFd)>,
429         ) -> ActivateResult {
430             Ok(())
431         }
432 
433         fn features(&self) -> u64 {
434             DUMMY_FEATURES
435         }
436 
437         fn ack_features(&mut self, _value: u64) {}
438 
439         fn read_config(&self, _offset: u64, _data: &mut [u8]) {}
440 
441         fn write_config(&mut self, _offset: u64, _data: &[u8]) {}
442     }
443 
444     #[test]
445     fn write_base_regs() {
446         let mut regs = VirtioPciCommonConfig {
447             access_platform: None,
448             driver_status: 0xaa,
449             config_generation: 0x55,
450             device_feature_select: 0x0,
451             driver_feature_select: 0x0,
452             queue_select: 0xff,
453             msix_config: Arc::new(AtomicU16::new(0)),
454             msix_queues: Arc::new(Mutex::new(vec![0; 3])),
455         };
456 
457         let dev = Arc::new(Mutex::new(DummyDevice(0)));
458         let mut queues = Vec::new();
459 
460         // Can set all bits of driver_status.
461         regs.write(0x14, &[0x55], &mut queues, dev.clone());
462         let mut read_back = vec![0x00];
463         regs.read(0x14, &mut read_back, &queues, dev.clone());
464         assert_eq!(read_back[0], 0x55);
465 
466         // The config generation register is read only.
467         regs.write(0x15, &[0xaa], &mut queues, dev.clone());
468         let mut read_back = vec![0x00];
469         regs.read(0x15, &mut read_back, &queues, dev.clone());
470         assert_eq!(read_back[0], 0x55);
471 
472         // Device features is read-only and passed through from the device.
473         regs.write(0x04, &[0, 0, 0, 0], &mut queues, dev.clone());
474         let mut read_back = vec![0, 0, 0, 0];
475         regs.read(0x04, &mut read_back, &queues, dev.clone());
476         assert_eq!(LittleEndian::read_u32(&read_back), DUMMY_FEATURES as u32);
477 
478         // Feature select registers are read/write.
479         regs.write(0x00, &[1, 2, 3, 4], &mut queues, dev.clone());
480         let mut read_back = vec![0, 0, 0, 0];
481         regs.read(0x00, &mut read_back, &queues, dev.clone());
482         assert_eq!(LittleEndian::read_u32(&read_back), 0x0403_0201);
483         regs.write(0x08, &[1, 2, 3, 4], &mut queues, dev.clone());
484         let mut read_back = vec![0, 0, 0, 0];
485         regs.read(0x08, &mut read_back, &queues, dev.clone());
486         assert_eq!(LittleEndian::read_u32(&read_back), 0x0403_0201);
487 
488         // 'queue_select' can be read and written.
489         regs.write(0x16, &[0xaa, 0x55], &mut queues, dev.clone());
490         let mut read_back = vec![0x00, 0x00];
491         regs.read(0x16, &mut read_back, &queues, dev);
492         assert_eq!(read_back[0], 0xaa);
493         assert_eq!(read_back[1], 0x55);
494     }
495 }
496