xref: /cloud-hypervisor/virtio-devices/src/transport/pci_common_config.rs (revision 7d7bfb2034001d4cb15df2ddc56d2d350c8da30f)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE-BSD-3-Clause file.
4 //
5 // Copyright © 2019 Intel Corporation
6 //
7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
8 
9 use crate::{GuestMemoryMmap, VirtioDevice};
10 use byteorder::{ByteOrder, LittleEndian};
11 use std::sync::atomic::{AtomicU16, Ordering};
12 use std::sync::{Arc, Mutex};
13 use versionize::{VersionMap, Versionize, VersionizeResult};
14 use versionize_derive::Versionize;
15 use virtio_queue::Queue;
16 use vm_memory::{GuestAddress, GuestMemoryAtomic};
17 use vm_migration::{MigratableError, Pausable, Snapshot, Snapshottable, VersionMapped};
18 use vm_virtio::AccessPlatform;
19 
20 #[derive(Clone, Versionize)]
21 pub struct VirtioPciCommonConfigState {
22     pub driver_status: u8,
23     pub config_generation: u8,
24     pub device_feature_select: u32,
25     pub driver_feature_select: u32,
26     pub queue_select: u16,
27     pub msix_config: u16,
28     pub msix_queues: Vec<u16>,
29 }
30 
31 impl VersionMapped for VirtioPciCommonConfigState {}
32 
33 /// Contains the data for reading and writing the common configuration structure of a virtio PCI
34 /// device.
35 ///
36 /// * Registers:
37 /// ** About the whole device.
38 /// le32 device_feature_select;     // 0x00 // read-write
39 /// le32 device_feature;            // 0x04 // read-only for driver
40 /// le32 driver_feature_select;     // 0x08 // read-write
41 /// le32 driver_feature;            // 0x0C // read-write
42 /// le16 msix_config;               // 0x10 // read-write
43 /// le16 num_queues;                // 0x12 // read-only for driver
44 /// u8 device_status;               // 0x14 // read-write (driver_status)
45 /// u8 config_generation;           // 0x15 // read-only for driver
46 /// ** About a specific virtqueue.
47 /// le16 queue_select;              // 0x16 // read-write
48 /// le16 queue_size;                // 0x18 // read-write, power of 2, or 0.
49 /// le16 queue_msix_vector;         // 0x1A // read-write
50 /// le16 queue_enable;              // 0x1C // read-write (Ready)
51 /// le16 queue_notify_off;          // 0x1E // read-only for driver
52 /// le64 queue_desc;                // 0x20 // read-write
53 /// le64 queue_avail;               // 0x28 // read-write
54 /// le64 queue_used;                // 0x30 // read-write
55 pub struct VirtioPciCommonConfig {
56     pub access_platform: Option<Arc<dyn AccessPlatform>>,
57     pub driver_status: u8,
58     pub config_generation: u8,
59     pub device_feature_select: u32,
60     pub driver_feature_select: u32,
61     pub queue_select: u16,
62     pub msix_config: Arc<AtomicU16>,
63     pub msix_queues: Arc<Mutex<Vec<u16>>>,
64 }
65 
66 impl VirtioPciCommonConfig {
67     fn state(&self) -> VirtioPciCommonConfigState {
68         VirtioPciCommonConfigState {
69             driver_status: self.driver_status,
70             config_generation: self.config_generation,
71             device_feature_select: self.device_feature_select,
72             driver_feature_select: self.driver_feature_select,
73             queue_select: self.queue_select,
74             msix_config: self.msix_config.load(Ordering::Acquire),
75             msix_queues: self.msix_queues.lock().unwrap().clone(),
76         }
77     }
78 
79     fn set_state(&mut self, state: &VirtioPciCommonConfigState) {
80         self.driver_status = state.driver_status;
81         self.config_generation = state.config_generation;
82         self.device_feature_select = state.device_feature_select;
83         self.driver_feature_select = state.driver_feature_select;
84         self.queue_select = state.queue_select;
85         self.msix_config.store(state.msix_config, Ordering::Release);
86         *(self.msix_queues.lock().unwrap()) = state.msix_queues.clone();
87     }
88 
89     pub fn read(
90         &mut self,
91         offset: u64,
92         data: &mut [u8],
93         queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
94         device: Arc<Mutex<dyn VirtioDevice>>,
95     ) {
96         assert!(data.len() <= 8);
97 
98         match data.len() {
99             1 => {
100                 let v = self.read_common_config_byte(offset);
101                 data[0] = v;
102             }
103             2 => {
104                 let v = self.read_common_config_word(offset, queues);
105                 LittleEndian::write_u16(data, v);
106             }
107             4 => {
108                 let v = self.read_common_config_dword(offset, device);
109                 LittleEndian::write_u32(data, v);
110             }
111             8 => {
112                 let v = self.read_common_config_qword(offset);
113                 LittleEndian::write_u64(data, v);
114             }
115             _ => error!("invalid data length for virtio read: len {}", data.len()),
116         }
117     }
118 
119     pub fn write(
120         &mut self,
121         offset: u64,
122         data: &[u8],
123         queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
124         device: Arc<Mutex<dyn VirtioDevice>>,
125     ) {
126         assert!(data.len() <= 8);
127 
128         match data.len() {
129             1 => self.write_common_config_byte(offset, data[0]),
130             2 => self.write_common_config_word(offset, LittleEndian::read_u16(data), queues),
131             4 => {
132                 self.write_common_config_dword(offset, LittleEndian::read_u32(data), queues, device)
133             }
134             8 => self.write_common_config_qword(offset, LittleEndian::read_u64(data), queues),
135             _ => error!("invalid data length for virtio write: len {}", data.len()),
136         }
137     }
138 
139     fn read_common_config_byte(&self, offset: u64) -> u8 {
140         debug!("read_common_config_byte: offset 0x{:x}", offset);
141         // The driver is only allowed to do aligned, properly sized access.
142         match offset {
143             0x14 => self.driver_status,
144             0x15 => self.config_generation,
145             _ => {
146                 warn!("invalid virtio config byte read: 0x{:x}", offset);
147                 0
148             }
149         }
150     }
151 
152     fn write_common_config_byte(&mut self, offset: u64, value: u8) {
153         debug!("write_common_config_byte: offset 0x{:x}", offset);
154         match offset {
155             0x14 => self.driver_status = value,
156             _ => {
157                 warn!("invalid virtio config byte write: 0x{:x}", offset);
158             }
159         }
160     }
161 
162     fn read_common_config_word(
163         &self,
164         offset: u64,
165         queues: &[Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
166     ) -> u16 {
167         debug!("read_common_config_word: offset 0x{:x}", offset);
168         match offset {
169             0x10 => self.msix_config.load(Ordering::Acquire),
170             0x12 => queues.len() as u16, // num_queues
171             0x16 => self.queue_select,
172             0x18 => self.with_queue(queues, |q| q.state.size).unwrap_or(0),
173             0x1a => self.msix_queues.lock().unwrap()[self.queue_select as usize],
174             0x1c => {
175                 if self.with_queue(queues, |q| q.state.ready).unwrap_or(false) {
176                     1
177                 } else {
178                     0
179                 }
180             }
181             0x1e => self.queue_select, // notify_off
182             _ => {
183                 warn!("invalid virtio register word read: 0x{:x}", offset);
184                 0
185             }
186         }
187     }
188 
189     fn write_common_config_word(
190         &mut self,
191         offset: u64,
192         value: u16,
193         queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
194     ) {
195         debug!("write_common_config_word: offset 0x{:x}", offset);
196         match offset {
197             0x10 => self.msix_config.store(value, Ordering::Release),
198             0x16 => self.queue_select = value,
199             0x18 => self.with_queue_mut(queues, |q| q.state.size = value),
200             0x1a => self.msix_queues.lock().unwrap()[self.queue_select as usize] = value,
201             0x1c => self.with_queue_mut(queues, |q| {
202                 let ready = value == 1;
203                 q.set_ready(ready);
204                 // Translate address of descriptor table and vrings.
205                 if let Some(access_platform) = &self.access_platform {
206                     if ready {
207                         let desc_table = access_platform
208                             .translate_gva(q.state.desc_table.0, 0)
209                             .unwrap();
210                         let avail_ring = access_platform
211                             .translate_gva(q.state.avail_ring.0, 0)
212                             .unwrap();
213                         let used_ring = access_platform
214                             .translate_gva(q.state.used_ring.0, 0)
215                             .unwrap();
216                         q.set_desc_table_address(
217                             Some((desc_table & 0xffff_ffff) as u32),
218                             Some((desc_table >> 32) as u32),
219                         );
220                         q.set_avail_ring_address(
221                             Some((avail_ring & 0xffff_ffff) as u32),
222                             Some((avail_ring >> 32) as u32),
223                         );
224                         q.set_used_ring_address(
225                             Some((used_ring & 0xffff_ffff) as u32),
226                             Some((used_ring >> 32) as u32),
227                         );
228                     }
229                 }
230             }),
231             _ => {
232                 warn!("invalid virtio register word write: 0x{:x}", offset);
233             }
234         }
235     }
236 
237     fn read_common_config_dword(&self, offset: u64, device: Arc<Mutex<dyn VirtioDevice>>) -> u32 {
238         debug!("read_common_config_dword: offset 0x{:x}", offset);
239         match offset {
240             0x00 => self.device_feature_select,
241             0x04 => {
242                 let locked_device = device.lock().unwrap();
243                 // Only 64 bits of features (2 pages) are defined for now, so limit
244                 // device_feature_select to avoid shifting by 64 or more bits.
245                 if self.device_feature_select < 2 {
246                     (locked_device.features() >> (self.device_feature_select * 32)) as u32
247                 } else {
248                     0
249                 }
250             }
251             0x08 => self.driver_feature_select,
252             _ => {
253                 warn!("invalid virtio register dword read: 0x{:x}", offset);
254                 0
255             }
256         }
257     }
258 
259     fn write_common_config_dword(
260         &mut self,
261         offset: u64,
262         value: u32,
263         queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
264         device: Arc<Mutex<dyn VirtioDevice>>,
265     ) {
266         debug!("write_common_config_dword: offset 0x{:x}", offset);
267         fn hi(v: &mut GuestAddress, x: u32) {
268             *v = (*v & 0xffff_ffff) | ((u64::from(x)) << 32)
269         }
270 
271         fn lo(v: &mut GuestAddress, x: u32) {
272             *v = (*v & !0xffff_ffff) | (u64::from(x))
273         }
274 
275         match offset {
276             0x00 => self.device_feature_select = value,
277             0x08 => self.driver_feature_select = value,
278             0x0c => {
279                 if self.driver_feature_select < 2 {
280                     let mut locked_device = device.lock().unwrap();
281                     locked_device
282                         .ack_features(u64::from(value) << (self.driver_feature_select * 32));
283                 } else {
284                     warn!(
285                         "invalid ack_features (page {}, value 0x{:x})",
286                         self.driver_feature_select, value
287                     );
288                 }
289             }
290             0x20 => self.with_queue_mut(queues, |q| lo(&mut q.state.desc_table, value)),
291             0x24 => self.with_queue_mut(queues, |q| hi(&mut q.state.desc_table, value)),
292             0x28 => self.with_queue_mut(queues, |q| lo(&mut q.state.avail_ring, value)),
293             0x2c => self.with_queue_mut(queues, |q| hi(&mut q.state.avail_ring, value)),
294             0x30 => self.with_queue_mut(queues, |q| lo(&mut q.state.used_ring, value)),
295             0x34 => self.with_queue_mut(queues, |q| hi(&mut q.state.used_ring, value)),
296             _ => {
297                 warn!("invalid virtio register dword write: 0x{:x}", offset);
298             }
299         }
300     }
301 
302     fn read_common_config_qword(&self, _offset: u64) -> u64 {
303         debug!("read_common_config_qword: offset 0x{:x}", _offset);
304         0 // Assume the guest has no reason to read write-only registers.
305     }
306 
307     fn write_common_config_qword(
308         &mut self,
309         offset: u64,
310         value: u64,
311         queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
312     ) {
313         debug!("write_common_config_qword: offset 0x{:x}", offset);
314         match offset {
315             0x20 => self.with_queue_mut(queues, |q| q.state.desc_table = GuestAddress(value)),
316             0x28 => self.with_queue_mut(queues, |q| q.state.avail_ring = GuestAddress(value)),
317             0x30 => self.with_queue_mut(queues, |q| q.state.used_ring = GuestAddress(value)),
318             _ => {
319                 warn!("invalid virtio register qword write: 0x{:x}", offset);
320             }
321         }
322     }
323 
324     fn with_queue<U, F>(
325         &self,
326         queues: &[Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
327         f: F,
328     ) -> Option<U>
329     where
330         F: FnOnce(&Queue<GuestMemoryAtomic<GuestMemoryMmap>>) -> U,
331     {
332         queues.get(self.queue_select as usize).map(f)
333     }
334 
335     fn with_queue_mut<F: FnOnce(&mut Queue<GuestMemoryAtomic<GuestMemoryMmap>>)>(
336         &self,
337         queues: &mut [Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
338         f: F,
339     ) {
340         if let Some(queue) = queues.get_mut(self.queue_select as usize) {
341             f(queue);
342         }
343     }
344 }
345 
346 impl Pausable for VirtioPciCommonConfig {}
347 
348 impl Snapshottable for VirtioPciCommonConfig {
349     fn id(&self) -> String {
350         String::from("virtio_pci_common_config")
351     }
352 
353     fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
354         Snapshot::new_from_versioned_state(&self.id(), &self.state())
355     }
356 
357     fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
358         self.set_state(&snapshot.to_versioned_state(&self.id())?);
359         Ok(())
360     }
361 }
362 
363 #[cfg(test)]
364 mod tests {
365     use super::*;
366     use crate::GuestMemoryMmap;
367     use crate::{ActivateResult, VirtioInterrupt};
368     use std::sync::Arc;
369     use virtio_queue::Queue;
370     use vm_memory::GuestMemoryAtomic;
371     use vmm_sys_util::eventfd::EventFd;
372 
373     struct DummyDevice(u32);
374     const QUEUE_SIZE: u16 = 256;
375     const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
376     const DUMMY_FEATURES: u64 = 0x5555_aaaa;
377     impl VirtioDevice for DummyDevice {
378         fn device_type(&self) -> u32 {
379             self.0
380         }
381         fn queue_max_sizes(&self) -> &[u16] {
382             QUEUE_SIZES
383         }
384         fn activate(
385             &mut self,
386             _mem: GuestMemoryAtomic<GuestMemoryMmap>,
387             _interrupt_evt: Arc<dyn VirtioInterrupt>,
388             _queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
389             _queue_evts: Vec<EventFd>,
390         ) -> ActivateResult {
391             Ok(())
392         }
393 
394         fn features(&self) -> u64 {
395             DUMMY_FEATURES
396         }
397 
398         fn ack_features(&mut self, _value: u64) {}
399 
400         fn read_config(&self, _offset: u64, _data: &mut [u8]) {}
401 
402         fn write_config(&mut self, _offset: u64, _data: &[u8]) {}
403     }
404 
405     #[test]
406     fn write_base_regs() {
407         let mut regs = VirtioPciCommonConfig {
408             access_platform: None,
409             driver_status: 0xaa,
410             config_generation: 0x55,
411             device_feature_select: 0x0,
412             driver_feature_select: 0x0,
413             queue_select: 0xff,
414             msix_config: Arc::new(AtomicU16::new(0)),
415             msix_queues: Arc::new(Mutex::new(vec![0; 3])),
416         };
417 
418         let dev = Arc::new(Mutex::new(DummyDevice(0)));
419         let mut queues = Vec::new();
420 
421         // Can set all bits of driver_status.
422         regs.write(0x14, &[0x55], &mut queues, dev.clone());
423         let mut read_back = vec![0x00];
424         regs.read(0x14, &mut read_back, &mut queues, dev.clone());
425         assert_eq!(read_back[0], 0x55);
426 
427         // The config generation register is read only.
428         regs.write(0x15, &[0xaa], &mut queues, dev.clone());
429         let mut read_back = vec![0x00];
430         regs.read(0x15, &mut read_back, &mut queues, dev.clone());
431         assert_eq!(read_back[0], 0x55);
432 
433         // Device features is read-only and passed through from the device.
434         regs.write(0x04, &[0, 0, 0, 0], &mut queues, dev.clone());
435         let mut read_back = vec![0, 0, 0, 0];
436         regs.read(0x04, &mut read_back, &mut queues, dev.clone());
437         assert_eq!(LittleEndian::read_u32(&read_back), DUMMY_FEATURES as u32);
438 
439         // Feature select registers are read/write.
440         regs.write(0x00, &[1, 2, 3, 4], &mut queues, dev.clone());
441         let mut read_back = vec![0, 0, 0, 0];
442         regs.read(0x00, &mut read_back, &mut queues, dev.clone());
443         assert_eq!(LittleEndian::read_u32(&read_back), 0x0403_0201);
444         regs.write(0x08, &[1, 2, 3, 4], &mut queues, dev.clone());
445         let mut read_back = vec![0, 0, 0, 0];
446         regs.read(0x08, &mut read_back, &mut queues, dev.clone());
447         assert_eq!(LittleEndian::read_u32(&read_back), 0x0403_0201);
448 
449         // 'queue_select' can be read and written.
450         regs.write(0x16, &[0xaa, 0x55], &mut queues, dev.clone());
451         let mut read_back = vec![0x00, 0x00];
452         regs.read(0x16, &mut read_back, &mut queues, dev);
453         assert_eq!(read_back[0], 0xaa);
454         assert_eq!(read_back[1], 0x55);
455     }
456 }
457