xref: /cloud-hypervisor/virtio-devices/src/device.rs (revision 80b2c98a68d4c68f372f849e8d26f7cae5867000)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE-BSD-3-Clause file.
4 //
5 // Copyright © 2019 Intel Corporation
6 //
7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
8 
9 use std::collections::HashMap;
10 use std::io::Write;
11 use std::num::Wrapping;
12 use std::sync::atomic::{AtomicBool, Ordering};
13 use std::sync::{Arc, Barrier};
14 use std::thread;
15 
16 use libc::EFD_NONBLOCK;
17 use virtio_queue::Queue;
18 use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestUsize};
19 use vm_migration::{MigratableError, Pausable};
20 use vm_virtio::{AccessPlatform, VirtioDeviceType};
21 use vmm_sys_util::eventfd::EventFd;
22 
23 use crate::{
24     ActivateError, ActivateResult, Error, GuestMemoryMmap, GuestRegionMmap,
25     VIRTIO_F_RING_INDIRECT_DESC,
26 };
27 
28 pub enum VirtioInterruptType {
29     Config,
30     Queue(u16),
31 }
32 
33 pub trait VirtioInterrupt: Send + Sync {
34     fn trigger(&self, int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error>;
35     fn notifier(&self, _int_type: VirtioInterruptType) -> Option<EventFd> {
36         None
37     }
38 }
39 
40 #[derive(Clone)]
41 pub struct UserspaceMapping {
42     pub host_addr: u64,
43     pub mem_slot: u32,
44     pub addr: GuestAddress,
45     pub len: GuestUsize,
46     pub mergeable: bool,
47 }
48 
49 #[derive(Clone)]
50 pub struct VirtioSharedMemory {
51     pub offset: u64,
52     pub len: u64,
53 }
54 
55 #[derive(Clone)]
56 pub struct VirtioSharedMemoryList {
57     pub host_addr: u64,
58     pub mem_slot: u32,
59     pub addr: GuestAddress,
60     pub len: GuestUsize,
61     pub region_list: Vec<VirtioSharedMemory>,
62 }
63 
64 /// Trait for virtio devices to be driven by a virtio transport.
65 ///
66 /// The lifecycle of a virtio device is to be moved to a virtio transport, which will then query the
67 /// device. Once the guest driver has configured the device, `VirtioDevice::activate` will be called
68 /// and all the events, memory, and queues for device operation will be moved into the device.
69 /// Optionally, a virtio device can implement device reset in which it returns said resources and
70 /// resets its internal.
71 pub trait VirtioDevice: Send {
72     /// The virtio device type.
73     fn device_type(&self) -> u32;
74 
75     /// The maximum size of each queue that this device supports.
76     fn queue_max_sizes(&self) -> &[u16];
77 
78     /// The set of feature bits that this device supports.
79     fn features(&self) -> u64 {
80         0
81     }
82 
83     /// Acknowledges that this set of features should be enabled.
84     fn ack_features(&mut self, value: u64) {
85         let _ = value;
86     }
87 
88     /// Reads this device configuration space at `offset`.
89     fn read_config(&self, _offset: u64, _data: &mut [u8]) {
90         warn!(
91             "No readable configuration fields for {}",
92             VirtioDeviceType::from(self.device_type())
93         );
94     }
95 
96     /// Writes to this device configuration space at `offset`.
97     fn write_config(&mut self, _offset: u64, _data: &[u8]) {
98         warn!(
99             "No writable configuration fields for {}",
100             VirtioDeviceType::from(self.device_type())
101         );
102     }
103 
104     /// Activates this device for real usage.
105     fn activate(
106         &mut self,
107         mem: GuestMemoryAtomic<GuestMemoryMmap>,
108         interrupt_evt: Arc<dyn VirtioInterrupt>,
109         queues: Vec<(usize, Queue, EventFd)>,
110     ) -> ActivateResult;
111 
112     /// Optionally deactivates this device and returns ownership of the guest memory map, interrupt
113     /// event, and queue events.
114     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
115         None
116     }
117 
118     /// Returns the list of shared memory regions required by the device.
119     fn get_shm_regions(&self) -> Option<VirtioSharedMemoryList> {
120         None
121     }
122 
123     /// Updates the list of shared memory regions required by the device.
124     fn set_shm_regions(
125         &mut self,
126         _shm_regions: VirtioSharedMemoryList,
127     ) -> std::result::Result<(), Error> {
128         std::unimplemented!()
129     }
130 
131     /// Some devices may need to do some explicit shutdown work. This method
132     /// may be implemented to do this. The VMM should call shutdown() on
133     /// every device as part of shutting down the VM. Acting on the device
134     /// after a shutdown() can lead to unpredictable results.
135     fn shutdown(&mut self) {}
136 
137     fn add_memory_region(
138         &mut self,
139         _region: &Arc<GuestRegionMmap>,
140     ) -> std::result::Result<(), Error> {
141         Ok(())
142     }
143 
144     /// Returns the list of userspace mappings associated with this device.
145     fn userspace_mappings(&self) -> Vec<UserspaceMapping> {
146         Vec::new()
147     }
148 
149     /// Return the counters that this device exposes
150     fn counters(&self) -> Option<HashMap<&'static str, Wrapping<u64>>> {
151         None
152     }
153 
154     /// Helper to allow common implementation of read_config
155     fn read_config_from_slice(&self, config: &[u8], offset: u64, mut data: &mut [u8]) {
156         let config_len = config.len() as u64;
157         let data_len = data.len() as u64;
158         if offset + data_len > config_len {
159             error!(
160                 "Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
161                 config_len,
162                 offset,
163                 data_len,
164                 self.device_type()
165             );
166             return;
167         }
168         if let Some(end) = offset.checked_add(data.len() as u64) {
169             data.write_all(&config[offset as usize..std::cmp::min(end, config_len) as usize])
170                 .unwrap();
171         }
172     }
173 
174     /// Set the access platform trait to let the device perform address
175     /// translations if needed.
176     fn set_access_platform(&mut self, _access_platform: Arc<dyn AccessPlatform>) {}
177 }
178 
179 /// Trait to define address translation for devices managed by virtio-iommu
180 ///
181 /// Trait providing address translation the same way a physical DMA remapping
182 /// table would provide translation between an IOVA and a physical address.
183 /// The goal of this trait is to be used by virtio devices to perform the
184 /// address translation before they try to read from the guest physical address.
185 /// On the other side, the implementation itself should be provided by the code
186 /// emulating the IOMMU for the guest.
187 pub trait DmaRemapping {
188     /// Provide a way to translate GVA address ranges into GPAs.
189     fn translate_gva(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error>;
190     /// Provide a way to translate GPA address ranges into GVAs.
191     fn translate_gpa(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error>;
192 }
193 
194 /// Structure to handle device state common to all devices
195 #[derive(Default)]
196 pub struct VirtioCommon {
197     pub avail_features: u64,
198     pub acked_features: u64,
199     pub kill_evt: Option<EventFd>,
200     pub interrupt_cb: Option<Arc<dyn VirtioInterrupt>>,
201     pub pause_evt: Option<EventFd>,
202     pub paused: Arc<AtomicBool>,
203     pub paused_sync: Option<Arc<Barrier>>,
204     pub epoll_threads: Option<Vec<thread::JoinHandle<()>>>,
205     pub queue_sizes: Vec<u16>,
206     pub device_type: u32,
207     pub min_queues: u16,
208     pub access_platform: Option<Arc<dyn AccessPlatform>>,
209 }
210 
211 impl VirtioCommon {
212     pub fn feature_acked(&self, feature: u64) -> bool {
213         self.acked_features & 1 << feature == 1 << feature
214     }
215 
216     pub fn ack_features(&mut self, value: u64) {
217         let mut v = value;
218         // Check if the guest is ACK'ing a feature that we didn't claim to have.
219         let unrequested_features = v & !self.avail_features;
220         if unrequested_features != 0 {
221             warn!("Received acknowledge request for unknown feature.");
222 
223             // Don't count these features as acked.
224             v &= !unrequested_features;
225         }
226         self.acked_features |= v;
227     }
228 
229     pub fn activate(
230         &mut self,
231         queues: &[(usize, Queue, EventFd)],
232         interrupt_cb: &Arc<dyn VirtioInterrupt>,
233     ) -> ActivateResult {
234         if queues.len() < self.min_queues.into() {
235             error!(
236                 "Number of enabled queues lower than min: {} vs {}",
237                 queues.len(),
238                 self.min_queues
239             );
240             return Err(ActivateError::BadActivate);
241         }
242 
243         let kill_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
244             error!("failed creating kill EventFd: {}", e);
245             ActivateError::BadActivate
246         })?;
247         self.kill_evt = Some(kill_evt);
248 
249         let pause_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
250             error!("failed creating pause EventFd: {}", e);
251             ActivateError::BadActivate
252         })?;
253         self.pause_evt = Some(pause_evt);
254 
255         // Save the interrupt EventFD as we need to return it on reset
256         // but clone it to pass into the thread.
257         self.interrupt_cb = Some(interrupt_cb.clone());
258 
259         Ok(())
260     }
261 
262     pub fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
263         // We first must resume the virtio thread if it was paused.
264         if self.pause_evt.take().is_some() {
265             self.resume().ok()?;
266         }
267 
268         if let Some(kill_evt) = self.kill_evt.take() {
269             // Ignore the result because there is nothing we can do about it.
270             let _ = kill_evt.write(1);
271         }
272 
273         if let Some(mut threads) = self.epoll_threads.take() {
274             for t in threads.drain(..) {
275                 if let Err(e) = t.join() {
276                     error!("Error joining thread: {:?}", e);
277                 }
278             }
279         }
280 
281         // Return the interrupt
282         Some(self.interrupt_cb.take().unwrap())
283     }
284 
285     // Wait for the worker thread to finish and return
286     pub fn wait_for_epoll_threads(&mut self) {
287         if let Some(mut threads) = self.epoll_threads.take() {
288             for t in threads.drain(..) {
289                 if let Err(e) = t.join() {
290                     error!("Error joining thread: {:?}", e);
291                 }
292             }
293         }
294     }
295 
296     pub fn dup_eventfds(&self) -> (EventFd, EventFd) {
297         (
298             self.kill_evt.as_ref().unwrap().try_clone().unwrap(),
299             self.pause_evt.as_ref().unwrap().try_clone().unwrap(),
300         )
301     }
302 
303     pub fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) {
304         self.access_platform = Some(access_platform);
305         // Indirect descriptors feature is not supported when the device
306         // requires the addresses held by the descriptors to be translated.
307         self.avail_features &= !(1 << VIRTIO_F_RING_INDIRECT_DESC);
308     }
309 }
310 
311 impl Pausable for VirtioCommon {
312     fn pause(&mut self) -> std::result::Result<(), MigratableError> {
313         info!(
314             "Pausing virtio-{}",
315             VirtioDeviceType::from(self.device_type)
316         );
317         self.paused.store(true, Ordering::SeqCst);
318         if let Some(pause_evt) = &self.pause_evt {
319             pause_evt
320                 .write(1)
321                 .map_err(|e| MigratableError::Pause(e.into()))?;
322 
323             // Wait for all threads to acknowledge the pause before going
324             // any further. This is exclusively performed when pause_evt
325             // eventfd is Some(), as this means the virtio device has been
326             // activated. One specific case where the device can be paused
327             // while it hasn't been yet activated is snapshot/restore.
328             self.paused_sync.as_ref().unwrap().wait();
329         }
330 
331         Ok(())
332     }
333 
334     fn resume(&mut self) -> std::result::Result<(), MigratableError> {
335         info!(
336             "Resuming virtio-{}",
337             VirtioDeviceType::from(self.device_type)
338         );
339         self.paused.store(false, Ordering::SeqCst);
340         if let Some(epoll_threads) = &self.epoll_threads {
341             for t in epoll_threads.iter() {
342                 t.thread().unpark();
343             }
344         }
345 
346         Ok(())
347     }
348 }
349