xref: /cloud-hypervisor/virtio-devices/src/device.rs (revision d90fa96bb70492dfa8cf7419120dab5051e768ed)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE-BSD-3-Clause file.
4 //
5 // Copyright © 2019 Intel Corporation
6 //
7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
8 
9 use crate::{
10     ActivateError, ActivateResult, Error, GuestMemoryMmap, GuestRegionMmap,
11     VIRTIO_F_RING_INDIRECT_DESC,
12 };
13 use libc::EFD_NONBLOCK;
14 use std::collections::HashMap;
15 use std::io::Write;
16 use std::num::Wrapping;
17 use std::sync::{
18     atomic::{AtomicBool, Ordering},
19     Arc, Barrier,
20 };
21 use std::thread;
22 use virtio_queue::Queue;
23 use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestUsize};
24 use vm_migration::{MigratableError, Pausable};
25 use vm_virtio::AccessPlatform;
26 use vm_virtio::VirtioDeviceType;
27 use vmm_sys_util::eventfd::EventFd;
28 
29 pub enum VirtioInterruptType {
30     Config,
31     Queue(u16),
32 }
33 
34 pub trait VirtioInterrupt: Send + Sync {
35     fn trigger(&self, int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error>;
36     fn notifier(&self, _int_type: VirtioInterruptType) -> Option<EventFd> {
37         None
38     }
39 }
40 
41 #[derive(Clone)]
42 pub struct UserspaceMapping {
43     pub host_addr: u64,
44     pub mem_slot: u32,
45     pub addr: GuestAddress,
46     pub len: GuestUsize,
47     pub mergeable: bool,
48 }
49 
50 #[derive(Clone)]
51 pub struct VirtioSharedMemory {
52     pub offset: u64,
53     pub len: u64,
54 }
55 
56 #[derive(Clone)]
57 pub struct VirtioSharedMemoryList {
58     pub host_addr: u64,
59     pub mem_slot: u32,
60     pub addr: GuestAddress,
61     pub len: GuestUsize,
62     pub region_list: Vec<VirtioSharedMemory>,
63 }
64 
65 /// Trait for virtio devices to be driven by a virtio transport.
66 ///
67 /// The lifecycle of a virtio device is to be moved to a virtio transport, which will then query the
68 /// device. Once the guest driver has configured the device, `VirtioDevice::activate` will be called
69 /// and all the events, memory, and queues for device operation will be moved into the device.
70 /// Optionally, a virtio device can implement device reset in which it returns said resources and
71 /// resets its internal.
72 pub trait VirtioDevice: Send {
73     /// The virtio device type.
74     fn device_type(&self) -> u32;
75 
76     /// The maximum size of each queue that this device supports.
77     fn queue_max_sizes(&self) -> &[u16];
78 
79     /// The set of feature bits that this device supports.
80     fn features(&self) -> u64 {
81         0
82     }
83 
84     /// Acknowledges that this set of features should be enabled.
85     fn ack_features(&mut self, value: u64) {
86         let _ = value;
87     }
88 
89     /// Reads this device configuration space at `offset`.
90     fn read_config(&self, _offset: u64, _data: &mut [u8]) {
91         warn!(
92             "No readable configuration fields for {}",
93             VirtioDeviceType::from(self.device_type())
94         );
95     }
96 
97     /// Writes to this device configuration space at `offset`.
98     fn write_config(&mut self, _offset: u64, _data: &[u8]) {
99         warn!(
100             "No writable configuration fields for {}",
101             VirtioDeviceType::from(self.device_type())
102         );
103     }
104 
105     /// Activates this device for real usage.
106     fn activate(
107         &mut self,
108         mem: GuestMemoryAtomic<GuestMemoryMmap>,
109         interrupt_evt: Arc<dyn VirtioInterrupt>,
110         queues: Vec<(usize, Queue, EventFd)>,
111     ) -> ActivateResult;
112 
113     /// Optionally deactivates this device and returns ownership of the guest memory map, interrupt
114     /// event, and queue events.
115     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
116         None
117     }
118 
119     /// Returns the list of shared memory regions required by the device.
120     fn get_shm_regions(&self) -> Option<VirtioSharedMemoryList> {
121         None
122     }
123 
124     /// Updates the list of shared memory regions required by the device.
125     fn set_shm_regions(
126         &mut self,
127         _shm_regions: VirtioSharedMemoryList,
128     ) -> std::result::Result<(), Error> {
129         std::unimplemented!()
130     }
131 
132     /// Some devices may need to do some explicit shutdown work. This method
133     /// may be implemented to do this. The VMM should call shutdown() on
134     /// every device as part of shutting down the VM. Acting on the device
135     /// after a shutdown() can lead to unpredictable results.
136     fn shutdown(&mut self) {}
137 
138     fn add_memory_region(
139         &mut self,
140         _region: &Arc<GuestRegionMmap>,
141     ) -> std::result::Result<(), Error> {
142         Ok(())
143     }
144 
145     /// Returns the list of userspace mappings associated with this device.
146     fn userspace_mappings(&self) -> Vec<UserspaceMapping> {
147         Vec::new()
148     }
149 
150     /// Return the counters that this device exposes
151     fn counters(&self) -> Option<HashMap<&'static str, Wrapping<u64>>> {
152         None
153     }
154 
155     /// Helper to allow common implementation of read_config
156     fn read_config_from_slice(&self, config: &[u8], offset: u64, mut data: &mut [u8]) {
157         let config_len = config.len() as u64;
158         let data_len = data.len() as u64;
159         if offset + data_len > config_len {
160             error!(
161                 "Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
162                 config_len,
163                 offset,
164                 data_len,
165                 self.device_type()
166             );
167             return;
168         }
169         if let Some(end) = offset.checked_add(data.len() as u64) {
170             data.write_all(&config[offset as usize..std::cmp::min(end, config_len) as usize])
171                 .unwrap();
172         }
173     }
174 
175     /// Set the access platform trait to let the device perform address
176     /// translations if needed.
177     fn set_access_platform(&mut self, _access_platform: Arc<dyn AccessPlatform>) {}
178 }
179 
180 /// Trait to define address translation for devices managed by virtio-iommu
181 ///
182 /// Trait providing address translation the same way a physical DMA remapping
183 /// table would provide translation between an IOVA and a physical address.
184 /// The goal of this trait is to be used by virtio devices to perform the
185 /// address translation before they try to read from the guest physical address.
186 /// On the other side, the implementation itself should be provided by the code
187 /// emulating the IOMMU for the guest.
188 pub trait DmaRemapping {
189     /// Provide a way to translate GVA address ranges into GPAs.
190     fn translate_gva(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error>;
191     /// Provide a way to translate GPA address ranges into GVAs.
192     fn translate_gpa(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error>;
193 }
194 
195 /// Structure to handle device state common to all devices
196 #[derive(Default)]
197 pub struct VirtioCommon {
198     pub avail_features: u64,
199     pub acked_features: u64,
200     pub kill_evt: Option<EventFd>,
201     pub interrupt_cb: Option<Arc<dyn VirtioInterrupt>>,
202     pub pause_evt: Option<EventFd>,
203     pub paused: Arc<AtomicBool>,
204     pub paused_sync: Option<Arc<Barrier>>,
205     pub epoll_threads: Option<Vec<thread::JoinHandle<()>>>,
206     pub queue_sizes: Vec<u16>,
207     pub device_type: u32,
208     pub min_queues: u16,
209     pub access_platform: Option<Arc<dyn AccessPlatform>>,
210 }
211 
212 impl VirtioCommon {
213     pub fn feature_acked(&self, feature: u64) -> bool {
214         self.acked_features & 1 << feature == 1 << feature
215     }
216 
217     pub fn ack_features(&mut self, value: u64) {
218         let mut v = value;
219         // Check if the guest is ACK'ing a feature that we didn't claim to have.
220         let unrequested_features = v & !self.avail_features;
221         if unrequested_features != 0 {
222             warn!("Received acknowledge request for unknown feature.");
223 
224             // Don't count these features as acked.
225             v &= !unrequested_features;
226         }
227         self.acked_features |= v;
228     }
229 
230     pub fn activate(
231         &mut self,
232         queues: &[(usize, Queue, EventFd)],
233         interrupt_cb: &Arc<dyn VirtioInterrupt>,
234     ) -> ActivateResult {
235         if queues.len() < self.min_queues.into() {
236             error!(
237                 "Number of enabled queues lower than min: {} vs {}",
238                 queues.len(),
239                 self.min_queues
240             );
241             return Err(ActivateError::BadActivate);
242         }
243 
244         let kill_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
245             error!("failed creating kill EventFd: {}", e);
246             ActivateError::BadActivate
247         })?;
248         self.kill_evt = Some(kill_evt);
249 
250         let pause_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
251             error!("failed creating pause EventFd: {}", e);
252             ActivateError::BadActivate
253         })?;
254         self.pause_evt = Some(pause_evt);
255 
256         // Save the interrupt EventFD as we need to return it on reset
257         // but clone it to pass into the thread.
258         self.interrupt_cb = Some(interrupt_cb.clone());
259 
260         Ok(())
261     }
262 
263     pub fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
264         // We first must resume the virtio thread if it was paused.
265         if self.pause_evt.take().is_some() {
266             self.resume().ok()?;
267         }
268 
269         if let Some(kill_evt) = self.kill_evt.take() {
270             // Ignore the result because there is nothing we can do about it.
271             let _ = kill_evt.write(1);
272         }
273 
274         if let Some(mut threads) = self.epoll_threads.take() {
275             for t in threads.drain(..) {
276                 if let Err(e) = t.join() {
277                     error!("Error joining thread: {:?}", e);
278                 }
279             }
280         }
281 
282         // Return the interrupt
283         Some(self.interrupt_cb.take().unwrap())
284     }
285 
286     // Wait for the worker thread to finish and return
287     pub fn wait_for_epoll_threads(&mut self) {
288         if let Some(mut threads) = self.epoll_threads.take() {
289             for t in threads.drain(..) {
290                 if let Err(e) = t.join() {
291                     error!("Error joining thread: {:?}", e);
292                 }
293             }
294         }
295     }
296 
297     pub fn dup_eventfds(&self) -> (EventFd, EventFd) {
298         (
299             self.kill_evt.as_ref().unwrap().try_clone().unwrap(),
300             self.pause_evt.as_ref().unwrap().try_clone().unwrap(),
301         )
302     }
303 
304     pub fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) {
305         self.access_platform = Some(access_platform);
306         // Indirect descriptors feature is not supported when the device
307         // requires the addresses held by the descriptors to be translated.
308         self.avail_features &= !(1 << VIRTIO_F_RING_INDIRECT_DESC);
309     }
310 }
311 
312 impl Pausable for VirtioCommon {
313     fn pause(&mut self) -> std::result::Result<(), MigratableError> {
314         info!(
315             "Pausing virtio-{}",
316             VirtioDeviceType::from(self.device_type)
317         );
318         self.paused.store(true, Ordering::SeqCst);
319         if let Some(pause_evt) = &self.pause_evt {
320             pause_evt
321                 .write(1)
322                 .map_err(|e| MigratableError::Pause(e.into()))?;
323 
324             // Wait for all threads to acknowledge the pause before going
325             // any further. This is exclusively performed when pause_evt
326             // eventfd is Some(), as this means the virtio device has been
327             // activated. One specific case where the device can be paused
328             // while it hasn't been yet activated is snapshot/restore.
329             self.paused_sync.as_ref().unwrap().wait();
330         }
331 
332         Ok(())
333     }
334 
335     fn resume(&mut self) -> std::result::Result<(), MigratableError> {
336         info!(
337             "Resuming virtio-{}",
338             VirtioDeviceType::from(self.device_type)
339         );
340         self.paused.store(false, Ordering::SeqCst);
341         if let Some(epoll_threads) = &self.epoll_threads {
342             for t in epoll_threads.iter() {
343                 t.thread().unpark();
344             }
345         }
346 
347         Ok(())
348     }
349 }
350