xref: /cloud-hypervisor/virtio-devices/src/device.rs (revision f67b3f79ea19c9a66e04074cbbf5d292f6529e43)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE-BSD-3-Clause file.
4 //
5 // Copyright © 2019 Intel Corporation
6 //
7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
8 
9 use crate::{ActivateError, ActivateResult, Error, Queue};
10 use crate::{GuestMemoryMmap, GuestRegionMmap};
11 use libc::EFD_NONBLOCK;
12 use std::collections::HashMap;
13 use std::io::Write;
14 use std::num::Wrapping;
15 use std::sync::{
16     atomic::{AtomicBool, Ordering},
17     Arc, Barrier,
18 };
19 use std::thread;
20 use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestUsize};
21 use vm_migration::{MigratableError, Pausable};
22 use vm_virtio::VirtioDeviceType;
23 use vmm_sys_util::eventfd::EventFd;
24 
25 pub enum VirtioInterruptType {
26     Config,
27     Queue,
28 }
29 
30 pub trait VirtioInterrupt: Send + Sync {
31     fn trigger(
32         &self,
33         int_type: &VirtioInterruptType,
34         queue: Option<&Queue>,
35     ) -> std::result::Result<(), std::io::Error>;
36     fn notifier(&self, _int_type: &VirtioInterruptType, _queue: Option<&Queue>) -> Option<EventFd> {
37         None
38     }
39 }
40 
41 #[derive(Clone)]
42 pub struct UserspaceMapping {
43     pub host_addr: u64,
44     pub mem_slot: u32,
45     pub addr: GuestAddress,
46     pub len: GuestUsize,
47     pub mergeable: bool,
48 }
49 
50 #[derive(Clone)]
51 pub struct VirtioSharedMemory {
52     pub offset: u64,
53     pub len: u64,
54 }
55 
56 #[derive(Clone)]
57 pub struct VirtioSharedMemoryList {
58     pub host_addr: u64,
59     pub mem_slot: u32,
60     pub addr: GuestAddress,
61     pub len: GuestUsize,
62     pub region_list: Vec<VirtioSharedMemory>,
63 }
64 
65 /// Trait for virtio devices to be driven by a virtio transport.
66 ///
67 /// The lifecycle of a virtio device is to be moved to a virtio transport, which will then query the
68 /// device. Once the guest driver has configured the device, `VirtioDevice::activate` will be called
69 /// and all the events, memory, and queues for device operation will be moved into the device.
70 /// Optionally, a virtio device can implement device reset in which it returns said resources and
71 /// resets its internal.
72 pub trait VirtioDevice: Send {
73     /// The virtio device type.
74     fn device_type(&self) -> u32;
75 
76     /// The maximum size of each queue that this device supports.
77     fn queue_max_sizes(&self) -> &[u16];
78 
79     /// The set of feature bits that this device supports.
80     fn features(&self) -> u64 {
81         0
82     }
83 
84     /// Acknowledges that this set of features should be enabled.
85     fn ack_features(&mut self, value: u64) {
86         let _ = value;
87     }
88 
89     /// Reads this device configuration space at `offset`.
90     fn read_config(&self, _offset: u64, _data: &mut [u8]) {
91         warn!(
92             "No readable configuration fields for {}",
93             VirtioDeviceType::from(self.device_type())
94         );
95     }
96 
97     /// Writes to this device configuration space at `offset`.
98     fn write_config(&mut self, _offset: u64, _data: &[u8]) {
99         warn!(
100             "No writable configuration fields for {}",
101             VirtioDeviceType::from(self.device_type())
102         );
103     }
104 
105     /// Activates this device for real usage.
106     fn activate(
107         &mut self,
108         mem: GuestMemoryAtomic<GuestMemoryMmap>,
109         interrupt_evt: Arc<dyn VirtioInterrupt>,
110         queues: Vec<Queue>,
111         queue_evts: Vec<EventFd>,
112     ) -> ActivateResult;
113 
114     /// Optionally deactivates this device and returns ownership of the guest memory map, interrupt
115     /// event, and queue events.
116     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
117         None
118     }
119 
120     /// Returns the list of shared memory regions required by the device.
121     fn get_shm_regions(&self) -> Option<VirtioSharedMemoryList> {
122         None
123     }
124 
125     /// Updates the list of shared memory regions required by the device.
126     fn set_shm_regions(
127         &mut self,
128         _shm_regions: VirtioSharedMemoryList,
129     ) -> std::result::Result<(), Error> {
130         std::unimplemented!()
131     }
132 
133     fn iommu_translate(&self, addr: u64) -> u64 {
134         addr
135     }
136 
137     /// Some devices may need to do some explicit shutdown work. This method
138     /// may be implemented to do this. The VMM should call shutdown() on
139     /// every device as part of shutting down the VM. Acting on the device
140     /// after a shutdown() can lead to unpredictable results.
141     fn shutdown(&mut self) {}
142 
143     fn add_memory_region(
144         &mut self,
145         _region: &Arc<GuestRegionMmap>,
146     ) -> std::result::Result<(), Error> {
147         Ok(())
148     }
149 
150     /// Returns the list of userspace mappings associated with this device.
151     fn userspace_mappings(&self) -> Vec<UserspaceMapping> {
152         Vec::new()
153     }
154 
155     /// Return the counters that this device exposes
156     fn counters(&self) -> Option<HashMap<&'static str, Wrapping<u64>>> {
157         None
158     }
159 
160     /// Helper to allow common implementation of read_config
161     fn read_config_from_slice(&self, config: &[u8], offset: u64, mut data: &mut [u8]) {
162         let config_len = config.len() as u64;
163         let data_len = data.len() as u64;
164         if offset + data_len > config_len {
165             error!(
166                 "Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
167                 config_len,
168                 offset,
169                 data_len,
170                 self.device_type()
171             );
172             return;
173         }
174         if let Some(end) = offset.checked_add(data.len() as u64) {
175             data.write_all(&config[offset as usize..std::cmp::min(end, config_len) as usize])
176                 .unwrap();
177         }
178     }
179 
180     /// Helper to allow common implementation of write_config
181     fn write_config_helper(&self, config: &mut [u8], offset: u64, data: &[u8]) {
182         let config_len = config.len() as u64;
183         let data_len = data.len() as u64;
184         if offset + data_len > config_len {
185             error!(
186                     "Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
187                     config_len,
188                     offset,
189                     data_len,
190                     self.device_type()
191                 );
192             return;
193         }
194 
195         if let Some(end) = offset.checked_add(config.len() as u64) {
196             let mut offset_config =
197                 &mut config[offset as usize..std::cmp::min(end, config_len) as usize];
198             offset_config.write_all(data).unwrap();
199         }
200     }
201 }
202 
203 /// Trait providing address translation the same way a physical DMA remapping
204 /// table would provide translation between an IOVA and a physical address.
205 /// The goal of this trait is to be used by virtio devices to perform the
206 /// address translation before they try to read from the guest physical address.
207 /// On the other side, the implementation itself should be provided by the code
208 /// emulating the IOMMU for the guest.
209 pub trait DmaRemapping: Send + Sync {
210     fn translate(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error>;
211 }
212 
213 /// Structure to handle device state common to all devices
214 #[derive(Default)]
215 pub struct VirtioCommon {
216     pub avail_features: u64,
217     pub acked_features: u64,
218     pub kill_evt: Option<EventFd>,
219     pub interrupt_cb: Option<Arc<dyn VirtioInterrupt>>,
220     pub queue_evts: Option<Vec<EventFd>>,
221     pub pause_evt: Option<EventFd>,
222     pub paused: Arc<AtomicBool>,
223     pub paused_sync: Option<Arc<Barrier>>,
224     pub epoll_threads: Option<Vec<thread::JoinHandle<()>>>,
225     pub queue_sizes: Vec<u16>,
226     pub device_type: u32,
227     pub min_queues: u16,
228 }
229 
230 impl VirtioCommon {
231     pub fn feature_acked(&self, feature: u64) -> bool {
232         self.acked_features & 1 << feature == 1 << feature
233     }
234 
235     pub fn ack_features(&mut self, value: u64) {
236         let mut v = value;
237         // Check if the guest is ACK'ing a feature that we didn't claim to have.
238         let unrequested_features = v & !self.avail_features;
239         if unrequested_features != 0 {
240             warn!("Received acknowledge request for unknown feature.");
241 
242             // Don't count these features as acked.
243             v &= !unrequested_features;
244         }
245         self.acked_features |= v;
246     }
247 
248     pub fn activate(
249         &mut self,
250         queues: &[Queue],
251         queue_evts: &[EventFd],
252         interrupt_cb: &Arc<dyn VirtioInterrupt>,
253     ) -> ActivateResult {
254         if queues.len() != queue_evts.len() {
255             error!(
256                 "Cannot activate: length mismatch: queue_evts={} queues={}",
257                 queue_evts.len(),
258                 queues.len()
259             );
260             return Err(ActivateError::BadActivate);
261         }
262 
263         if queues.len() < self.min_queues.into() {
264             error!(
265                 "Number of enabled queues lower than min: {} vs {}",
266                 queues.len(),
267                 self.min_queues
268             );
269             return Err(ActivateError::BadActivate);
270         }
271 
272         let kill_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
273             error!("failed creating kill EventFd: {}", e);
274             ActivateError::BadActivate
275         })?;
276         self.kill_evt = Some(kill_evt);
277 
278         let pause_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
279             error!("failed creating pause EventFd: {}", e);
280             ActivateError::BadActivate
281         })?;
282         self.pause_evt = Some(pause_evt);
283 
284         // Save the interrupt EventFD as we need to return it on reset
285         // but clone it to pass into the thread.
286         self.interrupt_cb = Some(interrupt_cb.clone());
287 
288         let mut tmp_queue_evts: Vec<EventFd> = Vec::new();
289         for queue_evt in queue_evts.iter() {
290             // Save the queue EventFD as we need to return it on reset
291             // but clone it to pass into the thread.
292             tmp_queue_evts.push(queue_evt.try_clone().map_err(|e| {
293                 error!("failed to clone queue EventFd: {}", e);
294                 ActivateError::BadActivate
295             })?);
296         }
297         self.queue_evts = Some(tmp_queue_evts);
298         Ok(())
299     }
300 
301     pub fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
302         // We first must resume the virtio thread if it was paused.
303         if self.pause_evt.take().is_some() {
304             self.resume().ok()?;
305         }
306 
307         if let Some(kill_evt) = self.kill_evt.take() {
308             // Ignore the result because there is nothing we can do about it.
309             let _ = kill_evt.write(1);
310         }
311 
312         if let Some(mut threads) = self.epoll_threads.take() {
313             for t in threads.drain(..) {
314                 if let Err(e) = t.join() {
315                     error!("Error joining thread: {:?}", e);
316                 }
317             }
318         }
319 
320         // Return the interrupt
321         Some(self.interrupt_cb.take().unwrap())
322     }
323 
324     pub fn dup_eventfds(&self) -> (EventFd, EventFd) {
325         (
326             self.kill_evt.as_ref().unwrap().try_clone().unwrap(),
327             self.pause_evt.as_ref().unwrap().try_clone().unwrap(),
328         )
329     }
330 }
331 
332 impl Pausable for VirtioCommon {
333     fn pause(&mut self) -> std::result::Result<(), MigratableError> {
334         info!(
335             "Pausing virtio-{}",
336             VirtioDeviceType::from(self.device_type)
337         );
338         self.paused.store(true, Ordering::SeqCst);
339         if let Some(pause_evt) = &self.pause_evt {
340             pause_evt
341                 .write(1)
342                 .map_err(|e| MigratableError::Pause(e.into()))?;
343 
344             // Wait for all threads to acknowledge the pause before going
345             // any further. This is exclusively performed when pause_evt
346             // eventfd is Some(), as this means the virtio device has been
347             // activated. One specific case where the device can be paused
348             // while it hasn't been yet activated is snapshot/restore.
349             self.paused_sync.as_ref().unwrap().wait();
350         }
351 
352         Ok(())
353     }
354 
355     fn resume(&mut self) -> std::result::Result<(), MigratableError> {
356         info!(
357             "Resuming virtio-{}",
358             VirtioDeviceType::from(self.device_type)
359         );
360         self.paused.store(false, Ordering::SeqCst);
361         if let Some(epoll_threads) = &self.epoll_threads {
362             for t in epoll_threads.iter() {
363                 t.thread().unpark();
364             }
365         }
366 
367         Ok(())
368     }
369 }
370