xref: /cloud-hypervisor/virtio-devices/src/device.rs (revision 7d7bfb2034001d4cb15df2ddc56d2d350c8da30f)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE-BSD-3-Clause file.
4 //
5 // Copyright © 2019 Intel Corporation
6 //
7 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
8 
9 use crate::{
10     ActivateError, ActivateResult, Error, GuestMemoryMmap, GuestRegionMmap,
11     VIRTIO_F_RING_INDIRECT_DESC,
12 };
13 use libc::EFD_NONBLOCK;
14 use std::collections::HashMap;
15 use std::io::Write;
16 use std::num::Wrapping;
17 use std::sync::{
18     atomic::{AtomicBool, Ordering},
19     Arc, Barrier,
20 };
21 use std::thread;
22 use virtio_queue::Queue;
23 use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestUsize};
24 use vm_migration::{MigratableError, Pausable};
25 use vm_virtio::AccessPlatform;
26 use vm_virtio::VirtioDeviceType;
27 use vmm_sys_util::eventfd::EventFd;
28 
29 pub enum VirtioInterruptType {
30     Config,
31     Queue(u16),
32 }
33 
34 pub trait VirtioInterrupt: Send + Sync {
35     fn trigger(&self, int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error>;
36     fn notifier(&self, _int_type: VirtioInterruptType) -> Option<EventFd> {
37         None
38     }
39 }
40 
41 #[derive(Clone)]
42 pub struct UserspaceMapping {
43     pub host_addr: u64,
44     pub mem_slot: u32,
45     pub addr: GuestAddress,
46     pub len: GuestUsize,
47     pub mergeable: bool,
48 }
49 
50 #[derive(Clone)]
51 pub struct VirtioSharedMemory {
52     pub offset: u64,
53     pub len: u64,
54 }
55 
56 #[derive(Clone)]
57 pub struct VirtioSharedMemoryList {
58     pub host_addr: u64,
59     pub mem_slot: u32,
60     pub addr: GuestAddress,
61     pub len: GuestUsize,
62     pub region_list: Vec<VirtioSharedMemory>,
63 }
64 
65 /// Trait for virtio devices to be driven by a virtio transport.
66 ///
67 /// The lifecycle of a virtio device is to be moved to a virtio transport, which will then query the
68 /// device. Once the guest driver has configured the device, `VirtioDevice::activate` will be called
69 /// and all the events, memory, and queues for device operation will be moved into the device.
70 /// Optionally, a virtio device can implement device reset in which it returns said resources and
71 /// resets its internal.
72 pub trait VirtioDevice: Send {
73     /// The virtio device type.
74     fn device_type(&self) -> u32;
75 
76     /// The maximum size of each queue that this device supports.
77     fn queue_max_sizes(&self) -> &[u16];
78 
79     /// The set of feature bits that this device supports.
80     fn features(&self) -> u64 {
81         0
82     }
83 
84     /// Acknowledges that this set of features should be enabled.
85     fn ack_features(&mut self, value: u64) {
86         let _ = value;
87     }
88 
89     /// Reads this device configuration space at `offset`.
90     fn read_config(&self, _offset: u64, _data: &mut [u8]) {
91         warn!(
92             "No readable configuration fields for {}",
93             VirtioDeviceType::from(self.device_type())
94         );
95     }
96 
97     /// Writes to this device configuration space at `offset`.
98     fn write_config(&mut self, _offset: u64, _data: &[u8]) {
99         warn!(
100             "No writable configuration fields for {}",
101             VirtioDeviceType::from(self.device_type())
102         );
103     }
104 
105     /// Activates this device for real usage.
106     fn activate(
107         &mut self,
108         mem: GuestMemoryAtomic<GuestMemoryMmap>,
109         interrupt_evt: Arc<dyn VirtioInterrupt>,
110         queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
111         queue_evts: Vec<EventFd>,
112     ) -> ActivateResult;
113 
114     /// Optionally deactivates this device and returns ownership of the guest memory map, interrupt
115     /// event, and queue events.
116     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
117         None
118     }
119 
120     /// Returns the list of shared memory regions required by the device.
121     fn get_shm_regions(&self) -> Option<VirtioSharedMemoryList> {
122         None
123     }
124 
125     /// Updates the list of shared memory regions required by the device.
126     fn set_shm_regions(
127         &mut self,
128         _shm_regions: VirtioSharedMemoryList,
129     ) -> std::result::Result<(), Error> {
130         std::unimplemented!()
131     }
132 
133     /// Some devices may need to do some explicit shutdown work. This method
134     /// may be implemented to do this. The VMM should call shutdown() on
135     /// every device as part of shutting down the VM. Acting on the device
136     /// after a shutdown() can lead to unpredictable results.
137     fn shutdown(&mut self) {}
138 
139     fn add_memory_region(
140         &mut self,
141         _region: &Arc<GuestRegionMmap>,
142     ) -> std::result::Result<(), Error> {
143         Ok(())
144     }
145 
146     /// Returns the list of userspace mappings associated with this device.
147     fn userspace_mappings(&self) -> Vec<UserspaceMapping> {
148         Vec::new()
149     }
150 
151     /// Return the counters that this device exposes
152     fn counters(&self) -> Option<HashMap<&'static str, Wrapping<u64>>> {
153         None
154     }
155 
156     /// Helper to allow common implementation of read_config
157     fn read_config_from_slice(&self, config: &[u8], offset: u64, mut data: &mut [u8]) {
158         let config_len = config.len() as u64;
159         let data_len = data.len() as u64;
160         if offset + data_len > config_len {
161             error!(
162                 "Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
163                 config_len,
164                 offset,
165                 data_len,
166                 self.device_type()
167             );
168             return;
169         }
170         if let Some(end) = offset.checked_add(data.len() as u64) {
171             data.write_all(&config[offset as usize..std::cmp::min(end, config_len) as usize])
172                 .unwrap();
173         }
174     }
175 
176     /// Helper to allow common implementation of write_config
177     fn write_config_helper(&self, config: &mut [u8], offset: u64, data: &[u8]) {
178         let config_len = config.len() as u64;
179         let data_len = data.len() as u64;
180         if offset + data_len > config_len {
181             error!(
182                     "Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
183                     config_len,
184                     offset,
185                     data_len,
186                     self.device_type()
187                 );
188             return;
189         }
190 
191         if let Some(end) = offset.checked_add(config.len() as u64) {
192             let mut offset_config =
193                 &mut config[offset as usize..std::cmp::min(end, config_len) as usize];
194             offset_config.write_all(data).unwrap();
195         }
196     }
197 
198     /// Set the access platform trait to let the device perform address
199     /// translations if needed.
200     fn set_access_platform(&mut self, _access_platform: Arc<dyn AccessPlatform>) {}
201 }
202 
203 /// Trait providing address translation the same way a physical DMA remapping
204 /// table would provide translation between an IOVA and a physical address.
205 /// The goal of this trait is to be used by virtio devices to perform the
206 /// address translation before they try to read from the guest physical address.
207 /// On the other side, the implementation itself should be provided by the code
208 /// emulating the IOMMU for the guest.
209 pub trait DmaRemapping: Send + Sync {
210     /// Provide a way to translate GVA address ranges into GPAs.
211     fn translate_gva(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error>;
212     /// Provide a way to translate GPA address ranges into GVAs.
213     fn translate_gpa(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error>;
214 }
215 
216 /// Structure to handle device state common to all devices
217 #[derive(Default)]
218 pub struct VirtioCommon {
219     pub avail_features: u64,
220     pub acked_features: u64,
221     pub kill_evt: Option<EventFd>,
222     pub interrupt_cb: Option<Arc<dyn VirtioInterrupt>>,
223     pub queue_evts: Option<Vec<EventFd>>,
224     pub pause_evt: Option<EventFd>,
225     pub paused: Arc<AtomicBool>,
226     pub paused_sync: Option<Arc<Barrier>>,
227     pub epoll_threads: Option<Vec<thread::JoinHandle<()>>>,
228     pub queue_sizes: Vec<u16>,
229     pub device_type: u32,
230     pub min_queues: u16,
231     pub access_platform: Option<Arc<dyn AccessPlatform>>,
232 }
233 
234 impl VirtioCommon {
235     pub fn feature_acked(&self, feature: u64) -> bool {
236         self.acked_features & 1 << feature == 1 << feature
237     }
238 
239     pub fn ack_features(&mut self, value: u64) {
240         let mut v = value;
241         // Check if the guest is ACK'ing a feature that we didn't claim to have.
242         let unrequested_features = v & !self.avail_features;
243         if unrequested_features != 0 {
244             warn!("Received acknowledge request for unknown feature.");
245 
246             // Don't count these features as acked.
247             v &= !unrequested_features;
248         }
249         self.acked_features |= v;
250     }
251 
252     pub fn activate(
253         &mut self,
254         queues: &[Queue<GuestMemoryAtomic<GuestMemoryMmap>>],
255         queue_evts: &[EventFd],
256         interrupt_cb: &Arc<dyn VirtioInterrupt>,
257     ) -> ActivateResult {
258         if queues.len() != queue_evts.len() {
259             error!(
260                 "Cannot activate: length mismatch: queue_evts={} queues={}",
261                 queue_evts.len(),
262                 queues.len()
263             );
264             return Err(ActivateError::BadActivate);
265         }
266 
267         if queues.len() < self.min_queues.into() {
268             error!(
269                 "Number of enabled queues lower than min: {} vs {}",
270                 queues.len(),
271                 self.min_queues
272             );
273             return Err(ActivateError::BadActivate);
274         }
275 
276         let kill_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
277             error!("failed creating kill EventFd: {}", e);
278             ActivateError::BadActivate
279         })?;
280         self.kill_evt = Some(kill_evt);
281 
282         let pause_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
283             error!("failed creating pause EventFd: {}", e);
284             ActivateError::BadActivate
285         })?;
286         self.pause_evt = Some(pause_evt);
287 
288         // Save the interrupt EventFD as we need to return it on reset
289         // but clone it to pass into the thread.
290         self.interrupt_cb = Some(interrupt_cb.clone());
291 
292         let mut tmp_queue_evts: Vec<EventFd> = Vec::new();
293         for queue_evt in queue_evts.iter() {
294             // Save the queue EventFD as we need to return it on reset
295             // but clone it to pass into the thread.
296             tmp_queue_evts.push(queue_evt.try_clone().map_err(|e| {
297                 error!("failed to clone queue EventFd: {}", e);
298                 ActivateError::BadActivate
299             })?);
300         }
301         self.queue_evts = Some(tmp_queue_evts);
302         Ok(())
303     }
304 
305     pub fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
306         // We first must resume the virtio thread if it was paused.
307         if self.pause_evt.take().is_some() {
308             self.resume().ok()?;
309         }
310 
311         if let Some(kill_evt) = self.kill_evt.take() {
312             // Ignore the result because there is nothing we can do about it.
313             let _ = kill_evt.write(1);
314         }
315 
316         if let Some(mut threads) = self.epoll_threads.take() {
317             for t in threads.drain(..) {
318                 if let Err(e) = t.join() {
319                     error!("Error joining thread: {:?}", e);
320                 }
321             }
322         }
323 
324         // Return the interrupt
325         Some(self.interrupt_cb.take().unwrap())
326     }
327 
328     pub fn dup_eventfds(&self) -> (EventFd, EventFd) {
329         (
330             self.kill_evt.as_ref().unwrap().try_clone().unwrap(),
331             self.pause_evt.as_ref().unwrap().try_clone().unwrap(),
332         )
333     }
334 
335     pub fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) {
336         self.access_platform = Some(access_platform);
337         // Indirect descriptors feature is not supported when the device
338         // requires the addresses held by the descriptors to be translated.
339         self.avail_features &= !(1 << VIRTIO_F_RING_INDIRECT_DESC);
340     }
341 }
342 
343 impl Pausable for VirtioCommon {
344     fn pause(&mut self) -> std::result::Result<(), MigratableError> {
345         info!(
346             "Pausing virtio-{}",
347             VirtioDeviceType::from(self.device_type)
348         );
349         self.paused.store(true, Ordering::SeqCst);
350         if let Some(pause_evt) = &self.pause_evt {
351             pause_evt
352                 .write(1)
353                 .map_err(|e| MigratableError::Pause(e.into()))?;
354 
355             // Wait for all threads to acknowledge the pause before going
356             // any further. This is exclusively performed when pause_evt
357             // eventfd is Some(), as this means the virtio device has been
358             // activated. One specific case where the device can be paused
359             // while it hasn't been yet activated is snapshot/restore.
360             self.paused_sync.as_ref().unwrap().wait();
361         }
362 
363         Ok(())
364     }
365 
366     fn resume(&mut self) -> std::result::Result<(), MigratableError> {
367         info!(
368             "Resuming virtio-{}",
369             VirtioDeviceType::from(self.device_type)
370         );
371         self.paused.store(false, Ordering::SeqCst);
372         if let Some(epoll_threads) = &self.epoll_threads {
373             for t in epoll_threads.iter() {
374                 t.thread().unpark();
375             }
376         }
377 
378         Ok(())
379     }
380 }
381