xref: /cloud-hypervisor/virtio-devices/src/device.rs (revision 88a9f799449c04180c6b9a21d3b9c0c4b57e2bd6)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE-BSD-3-Clause file.
4 //
5 // Copyright © 2019 Intel Corporation
6 //
7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
8 
9 use std::collections::HashMap;
10 use std::io::Write;
11 use std::num::Wrapping;
12 use std::sync::{
13     atomic::{AtomicBool, Ordering},
14     Arc, Barrier,
15 };
16 use std::thread;
17 
18 use libc::EFD_NONBLOCK;
19 use virtio_queue::Queue;
20 use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestUsize};
21 use vm_migration::{MigratableError, Pausable};
22 use vm_virtio::AccessPlatform;
23 use vm_virtio::VirtioDeviceType;
24 use vmm_sys_util::eventfd::EventFd;
25 
26 use crate::{
27     ActivateError, ActivateResult, Error, GuestMemoryMmap, GuestRegionMmap,
28     VIRTIO_F_RING_INDIRECT_DESC,
29 };
30 
31 pub enum VirtioInterruptType {
32     Config,
33     Queue(u16),
34 }
35 
36 pub trait VirtioInterrupt: Send + Sync {
37     fn trigger(&self, int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error>;
38     fn notifier(&self, _int_type: VirtioInterruptType) -> Option<EventFd> {
39         None
40     }
41 }
42 
43 #[derive(Clone)]
44 pub struct UserspaceMapping {
45     pub host_addr: u64,
46     pub mem_slot: u32,
47     pub addr: GuestAddress,
48     pub len: GuestUsize,
49     pub mergeable: bool,
50 }
51 
52 #[derive(Clone)]
53 pub struct VirtioSharedMemory {
54     pub offset: u64,
55     pub len: u64,
56 }
57 
58 #[derive(Clone)]
59 pub struct VirtioSharedMemoryList {
60     pub host_addr: u64,
61     pub mem_slot: u32,
62     pub addr: GuestAddress,
63     pub len: GuestUsize,
64     pub region_list: Vec<VirtioSharedMemory>,
65 }
66 
67 /// Trait for virtio devices to be driven by a virtio transport.
68 ///
69 /// The lifecycle of a virtio device is to be moved to a virtio transport, which will then query the
70 /// device. Once the guest driver has configured the device, `VirtioDevice::activate` will be called
71 /// and all the events, memory, and queues for device operation will be moved into the device.
72 /// Optionally, a virtio device can implement device reset in which it returns said resources and
73 /// resets its internal.
74 pub trait VirtioDevice: Send {
75     /// The virtio device type.
76     fn device_type(&self) -> u32;
77 
78     /// The maximum size of each queue that this device supports.
79     fn queue_max_sizes(&self) -> &[u16];
80 
81     /// The set of feature bits that this device supports.
82     fn features(&self) -> u64 {
83         0
84     }
85 
86     /// Acknowledges that this set of features should be enabled.
87     fn ack_features(&mut self, value: u64) {
88         let _ = value;
89     }
90 
91     /// Reads this device configuration space at `offset`.
92     fn read_config(&self, _offset: u64, _data: &mut [u8]) {
93         warn!(
94             "No readable configuration fields for {}",
95             VirtioDeviceType::from(self.device_type())
96         );
97     }
98 
99     /// Writes to this device configuration space at `offset`.
100     fn write_config(&mut self, _offset: u64, _data: &[u8]) {
101         warn!(
102             "No writable configuration fields for {}",
103             VirtioDeviceType::from(self.device_type())
104         );
105     }
106 
107     /// Activates this device for real usage.
108     fn activate(
109         &mut self,
110         mem: GuestMemoryAtomic<GuestMemoryMmap>,
111         interrupt_evt: Arc<dyn VirtioInterrupt>,
112         queues: Vec<(usize, Queue, EventFd)>,
113     ) -> ActivateResult;
114 
115     /// Optionally deactivates this device and returns ownership of the guest memory map, interrupt
116     /// event, and queue events.
117     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
118         None
119     }
120 
121     /// Returns the list of shared memory regions required by the device.
122     fn get_shm_regions(&self) -> Option<VirtioSharedMemoryList> {
123         None
124     }
125 
126     /// Updates the list of shared memory regions required by the device.
127     fn set_shm_regions(
128         &mut self,
129         _shm_regions: VirtioSharedMemoryList,
130     ) -> std::result::Result<(), Error> {
131         std::unimplemented!()
132     }
133 
134     /// Some devices may need to do some explicit shutdown work. This method
135     /// may be implemented to do this. The VMM should call shutdown() on
136     /// every device as part of shutting down the VM. Acting on the device
137     /// after a shutdown() can lead to unpredictable results.
138     fn shutdown(&mut self) {}
139 
140     fn add_memory_region(
141         &mut self,
142         _region: &Arc<GuestRegionMmap>,
143     ) -> std::result::Result<(), Error> {
144         Ok(())
145     }
146 
147     /// Returns the list of userspace mappings associated with this device.
148     fn userspace_mappings(&self) -> Vec<UserspaceMapping> {
149         Vec::new()
150     }
151 
152     /// Return the counters that this device exposes
153     fn counters(&self) -> Option<HashMap<&'static str, Wrapping<u64>>> {
154         None
155     }
156 
157     /// Helper to allow common implementation of read_config
158     fn read_config_from_slice(&self, config: &[u8], offset: u64, mut data: &mut [u8]) {
159         let config_len = config.len() as u64;
160         let data_len = data.len() as u64;
161         if offset + data_len > config_len {
162             error!(
163                 "Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
164                 config_len,
165                 offset,
166                 data_len,
167                 self.device_type()
168             );
169             return;
170         }
171         if let Some(end) = offset.checked_add(data.len() as u64) {
172             data.write_all(&config[offset as usize..std::cmp::min(end, config_len) as usize])
173                 .unwrap();
174         }
175     }
176 
177     /// Set the access platform trait to let the device perform address
178     /// translations if needed.
179     fn set_access_platform(&mut self, _access_platform: Arc<dyn AccessPlatform>) {}
180 }
181 
182 /// Trait to define address translation for devices managed by virtio-iommu
183 ///
184 /// Trait providing address translation the same way a physical DMA remapping
185 /// table would provide translation between an IOVA and a physical address.
186 /// The goal of this trait is to be used by virtio devices to perform the
187 /// address translation before they try to read from the guest physical address.
188 /// On the other side, the implementation itself should be provided by the code
189 /// emulating the IOMMU for the guest.
190 pub trait DmaRemapping {
191     /// Provide a way to translate GVA address ranges into GPAs.
192     fn translate_gva(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error>;
193     /// Provide a way to translate GPA address ranges into GVAs.
194     fn translate_gpa(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error>;
195 }
196 
197 /// Structure to handle device state common to all devices
198 #[derive(Default)]
199 pub struct VirtioCommon {
200     pub avail_features: u64,
201     pub acked_features: u64,
202     pub kill_evt: Option<EventFd>,
203     pub interrupt_cb: Option<Arc<dyn VirtioInterrupt>>,
204     pub pause_evt: Option<EventFd>,
205     pub paused: Arc<AtomicBool>,
206     pub paused_sync: Option<Arc<Barrier>>,
207     pub epoll_threads: Option<Vec<thread::JoinHandle<()>>>,
208     pub queue_sizes: Vec<u16>,
209     pub device_type: u32,
210     pub min_queues: u16,
211     pub access_platform: Option<Arc<dyn AccessPlatform>>,
212 }
213 
214 impl VirtioCommon {
215     pub fn feature_acked(&self, feature: u64) -> bool {
216         self.acked_features & 1 << feature == 1 << feature
217     }
218 
219     pub fn ack_features(&mut self, value: u64) {
220         let mut v = value;
221         // Check if the guest is ACK'ing a feature that we didn't claim to have.
222         let unrequested_features = v & !self.avail_features;
223         if unrequested_features != 0 {
224             warn!("Received acknowledge request for unknown feature.");
225 
226             // Don't count these features as acked.
227             v &= !unrequested_features;
228         }
229         self.acked_features |= v;
230     }
231 
232     pub fn activate(
233         &mut self,
234         queues: &[(usize, Queue, EventFd)],
235         interrupt_cb: &Arc<dyn VirtioInterrupt>,
236     ) -> ActivateResult {
237         if queues.len() < self.min_queues.into() {
238             error!(
239                 "Number of enabled queues lower than min: {} vs {}",
240                 queues.len(),
241                 self.min_queues
242             );
243             return Err(ActivateError::BadActivate);
244         }
245 
246         let kill_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
247             error!("failed creating kill EventFd: {}", e);
248             ActivateError::BadActivate
249         })?;
250         self.kill_evt = Some(kill_evt);
251 
252         let pause_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
253             error!("failed creating pause EventFd: {}", e);
254             ActivateError::BadActivate
255         })?;
256         self.pause_evt = Some(pause_evt);
257 
258         // Save the interrupt EventFD as we need to return it on reset
259         // but clone it to pass into the thread.
260         self.interrupt_cb = Some(interrupt_cb.clone());
261 
262         Ok(())
263     }
264 
265     pub fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
266         // We first must resume the virtio thread if it was paused.
267         if self.pause_evt.take().is_some() {
268             self.resume().ok()?;
269         }
270 
271         if let Some(kill_evt) = self.kill_evt.take() {
272             // Ignore the result because there is nothing we can do about it.
273             let _ = kill_evt.write(1);
274         }
275 
276         if let Some(mut threads) = self.epoll_threads.take() {
277             for t in threads.drain(..) {
278                 if let Err(e) = t.join() {
279                     error!("Error joining thread: {:?}", e);
280                 }
281             }
282         }
283 
284         // Return the interrupt
285         Some(self.interrupt_cb.take().unwrap())
286     }
287 
288     // Wait for the worker thread to finish and return
289     pub fn wait_for_epoll_threads(&mut self) {
290         if let Some(mut threads) = self.epoll_threads.take() {
291             for t in threads.drain(..) {
292                 if let Err(e) = t.join() {
293                     error!("Error joining thread: {:?}", e);
294                 }
295             }
296         }
297     }
298 
299     pub fn dup_eventfds(&self) -> (EventFd, EventFd) {
300         (
301             self.kill_evt.as_ref().unwrap().try_clone().unwrap(),
302             self.pause_evt.as_ref().unwrap().try_clone().unwrap(),
303         )
304     }
305 
306     pub fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) {
307         self.access_platform = Some(access_platform);
308         // Indirect descriptors feature is not supported when the device
309         // requires the addresses held by the descriptors to be translated.
310         self.avail_features &= !(1 << VIRTIO_F_RING_INDIRECT_DESC);
311     }
312 }
313 
314 impl Pausable for VirtioCommon {
315     fn pause(&mut self) -> std::result::Result<(), MigratableError> {
316         info!(
317             "Pausing virtio-{}",
318             VirtioDeviceType::from(self.device_type)
319         );
320         self.paused.store(true, Ordering::SeqCst);
321         if let Some(pause_evt) = &self.pause_evt {
322             pause_evt
323                 .write(1)
324                 .map_err(|e| MigratableError::Pause(e.into()))?;
325 
326             // Wait for all threads to acknowledge the pause before going
327             // any further. This is exclusively performed when pause_evt
328             // eventfd is Some(), as this means the virtio device has been
329             // activated. One specific case where the device can be paused
330             // while it hasn't been yet activated is snapshot/restore.
331             self.paused_sync.as_ref().unwrap().wait();
332         }
333 
334         Ok(())
335     }
336 
337     fn resume(&mut self) -> std::result::Result<(), MigratableError> {
338         info!(
339             "Resuming virtio-{}",
340             VirtioDeviceType::from(self.device_type)
341         );
342         self.paused.store(false, Ordering::SeqCst);
343         if let Some(epoll_threads) = &self.epoll_threads {
344             for t in epoll_threads.iter() {
345                 t.thread().unpark();
346             }
347         }
348 
349         Ok(())
350     }
351 }
352