xref: /cloud-hypervisor/virtio-devices/src/vdpa.rs (revision 7d7bfb2034001d4cb15df2ddc56d2d350c8da30f)
1 // Copyright © 2022 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5 
6 use crate::{
7     ActivateError, ActivateResult, GuestMemoryMmap, VirtioCommon, VirtioDevice, VirtioInterrupt,
8     VirtioInterruptType, DEVICE_ACKNOWLEDGE, DEVICE_DRIVER, DEVICE_DRIVER_OK, DEVICE_FEATURES_OK,
9     VIRTIO_F_IOMMU_PLATFORM,
10 };
11 use std::{
12     io, result,
13     sync::{atomic::Ordering, Arc, Mutex},
14 };
15 use thiserror::Error;
16 use vhost::{
17     vdpa::{VhostVdpa, VhostVdpaIovaRange},
18     vhost_kern::vdpa::VhostKernVdpa,
19     vhost_kern::VhostKernFeatures,
20     VhostBackend, VringConfigData,
21 };
22 use virtio_queue::{Descriptor, Queue};
23 use vm_device::dma_mapping::ExternalDmaMapping;
24 use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic};
25 use vm_virtio::{AccessPlatform, Translatable};
26 use vmm_sys_util::eventfd::EventFd;
27 
28 #[derive(Error, Debug)]
29 pub enum Error {
30     #[error("Failed to create vhost-vdpa: {0}")]
31     CreateVhostVdpa(vhost::Error),
32     #[error("Failed to map DMA range: {0}")]
33     DmaMap(vhost::Error),
34     #[error("Failed to unmap DMA range: {0}")]
35     DmaUnmap(vhost::Error),
36     #[error("Failed to get address range")]
37     GetAddressRange,
38     #[error("Failed to get the available index from the virtio queue: {0}")]
39     GetAvailableIndex(virtio_queue::Error),
40     #[error("Get virtio device identifier: {0}")]
41     GetDeviceId(vhost::Error),
42     #[error("Failed to get backend specific features: {0}")]
43     GetBackendFeatures(vhost::Error),
44     #[error("Failed to get virtio features: {0}")]
45     GetFeatures(vhost::Error),
46     #[error("Failed to get the IOVA range: {0}")]
47     GetIovaRange(vhost::Error),
48     #[error("Failed to get queue size: {0}")]
49     GetVringNum(vhost::Error),
50     #[error("Invalid IOVA range: {0}-{1}")]
51     InvalidIovaRange(u64, u64),
52     #[error("Missing VIRTIO_F_ACCESS_PLATFORM feature")]
53     MissingAccessPlatformVirtioFeature,
54     #[error("Failed to reset owner: {0}")]
55     ResetOwner(vhost::Error),
56     #[error("Failed to set backend specific features: {0}")]
57     SetBackendFeatures(vhost::Error),
58     #[error("Failed to set eventfd notifying about a configuration change: {0}")]
59     SetConfigCall(vhost::Error),
60     #[error("Failed to set virtio features: {0}")]
61     SetFeatures(vhost::Error),
62     #[error("Failed to set memory table: {0}")]
63     SetMemTable(vhost::Error),
64     #[error("Failed to set owner: {0}")]
65     SetOwner(vhost::Error),
66     #[error("Failed to set virtio status: {0}")]
67     SetStatus(vhost::Error),
68     #[error("Failed to set vring address: {0}")]
69     SetVringAddr(vhost::Error),
70     #[error("Failed to set vring base: {0}")]
71     SetVringBase(vhost::Error),
72     #[error("Failed to set vring eventfd when buffer are used: {0}")]
73     SetVringCall(vhost::Error),
74     #[error("Failed to enable/disable vring: {0}")]
75     SetVringEnable(vhost::Error),
76     #[error("Failed to set vring eventfd when new descriptors are available: {0}")]
77     SetVringKick(vhost::Error),
78     #[error("Failed to set vring size: {0}")]
79     SetVringNum(vhost::Error),
80 }
81 
82 pub type Result<T> = std::result::Result<T, Error>;
83 
84 pub struct Vdpa {
85     common: VirtioCommon,
86     id: String,
87     vhost: VhostKernVdpa<GuestMemoryAtomic<GuestMemoryMmap>>,
88     iova_range: VhostVdpaIovaRange,
89     enabled_num_queues: Option<usize>,
90     backend_features: u64,
91 }
92 
93 impl Vdpa {
94     pub fn new(
95         id: String,
96         device_path: &str,
97         mem: GuestMemoryAtomic<GuestMemoryMmap>,
98         num_queues: u16,
99     ) -> Result<Self> {
100         let mut vhost = VhostKernVdpa::new(device_path, mem).map_err(Error::CreateVhostVdpa)?;
101         vhost.set_owner().map_err(Error::SetOwner)?;
102         let device_type = vhost.get_device_id().map_err(Error::GetDeviceId)?;
103         let queue_size = vhost.get_vring_num().map_err(Error::GetVringNum)?;
104         let avail_features = vhost.get_features().map_err(Error::GetFeatures)?;
105         let backend_features = vhost
106             .get_backend_features()
107             .map_err(Error::GetBackendFeatures)?;
108         vhost.set_backend_features_acked(backend_features);
109 
110         let iova_range = vhost.get_iova_range().map_err(Error::GetIovaRange)?;
111 
112         if avail_features & (1u64 << VIRTIO_F_IOMMU_PLATFORM) == 0 {
113             return Err(Error::MissingAccessPlatformVirtioFeature);
114         }
115 
116         Ok(Vdpa {
117             common: VirtioCommon {
118                 device_type,
119                 queue_sizes: vec![queue_size; num_queues as usize],
120                 avail_features,
121                 min_queues: num_queues,
122                 ..Default::default()
123             },
124             id,
125             vhost,
126             iova_range,
127             enabled_num_queues: None,
128             backend_features,
129         })
130     }
131 
132     fn enable_vrings(&mut self, num_queues: usize, enable: bool) -> Result<()> {
133         for queue_index in 0..num_queues {
134             self.vhost
135                 .set_vring_enable(queue_index, enable)
136                 .map_err(Error::SetVringEnable)?;
137         }
138 
139         self.enabled_num_queues = if enable { Some(num_queues) } else { None };
140 
141         Ok(())
142     }
143 
144     fn activate_vdpa(
145         &mut self,
146         _mem: &GuestMemoryMmap,
147         virtio_interrupt: &Arc<dyn VirtioInterrupt>,
148         queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
149         queue_evts: Vec<EventFd>,
150     ) -> Result<()> {
151         self.vhost
152             .set_features(self.common.acked_features)
153             .map_err(Error::SetFeatures)?;
154         self.vhost
155             .set_backend_features(self.backend_features)
156             .map_err(Error::SetBackendFeatures)?;
157 
158         for (queue_index, queue) in queues.iter().enumerate() {
159             let queue_max_size = queue.max_size();
160             let queue_size = queue.state.size;
161             self.vhost
162                 .set_vring_num(queue_index, queue_size)
163                 .map_err(Error::SetVringNum)?;
164 
165             let config_data = VringConfigData {
166                 queue_max_size,
167                 queue_size,
168                 flags: 0u32,
169                 desc_table_addr: queue
170                     .state
171                     .desc_table
172                     .translate_gpa(
173                         self.common.access_platform.as_ref(),
174                         queue_size as usize * std::mem::size_of::<Descriptor>(),
175                     )
176                     .0,
177                 used_ring_addr: queue
178                     .state
179                     .used_ring
180                     .translate_gpa(
181                         self.common.access_platform.as_ref(),
182                         4 + queue_size as usize * 8,
183                     )
184                     .0,
185                 avail_ring_addr: queue
186                     .state
187                     .avail_ring
188                     .translate_gpa(
189                         self.common.access_platform.as_ref(),
190                         4 + queue_size as usize * 2,
191                     )
192                     .0,
193                 log_addr: None,
194             };
195 
196             self.vhost
197                 .set_vring_addr(queue_index, &config_data)
198                 .map_err(Error::SetVringAddr)?;
199             self.vhost
200                 .set_vring_base(
201                     queue_index,
202                     queue
203                         .avail_idx(Ordering::Acquire)
204                         .map_err(Error::GetAvailableIndex)?
205                         .0,
206                 )
207                 .map_err(Error::SetVringBase)?;
208 
209             if let Some(eventfd) =
210                 virtio_interrupt.notifier(VirtioInterruptType::Queue(queue_index as u16))
211             {
212                 self.vhost
213                     .set_vring_call(queue_index, &eventfd)
214                     .map_err(Error::SetVringCall)?;
215             }
216 
217             self.vhost
218                 .set_vring_kick(queue_index, &queue_evts[queue_index])
219                 .map_err(Error::SetVringKick)?;
220         }
221 
222         // Setup the config eventfd if there is one
223         if let Some(eventfd) = virtio_interrupt.notifier(VirtioInterruptType::Config) {
224             self.vhost
225                 .set_config_call(&eventfd)
226                 .map_err(Error::SetConfigCall)?;
227         }
228 
229         self.enable_vrings(queues.len(), true)?;
230 
231         self.vhost
232             .set_status(
233                 (DEVICE_ACKNOWLEDGE | DEVICE_DRIVER | DEVICE_DRIVER_OK | DEVICE_FEATURES_OK) as u8,
234             )
235             .map_err(Error::SetStatus)
236     }
237 
238     fn reset_vdpa(&mut self) -> Result<()> {
239         if let Some(num_queues) = self.enabled_num_queues {
240             self.enable_vrings(num_queues, false)?;
241         }
242 
243         self.vhost.set_status(0).map_err(Error::SetStatus)
244     }
245 
246     fn dma_map(&self, iova: u64, size: u64, host_vaddr: *const u8, readonly: bool) -> Result<()> {
247         let iova_last = iova + size - 1;
248         if iova < self.iova_range.first || iova_last > self.iova_range.last {
249             return Err(Error::InvalidIovaRange(iova, iova_last));
250         }
251 
252         self.vhost
253             .dma_map(iova, size, host_vaddr, readonly)
254             .map_err(Error::DmaMap)
255     }
256 
257     fn dma_unmap(&self, iova: u64, size: u64) -> Result<()> {
258         let iova_last = iova + size - 1;
259         if iova < self.iova_range.first || iova_last > self.iova_range.last {
260             return Err(Error::InvalidIovaRange(iova, iova_last));
261         }
262 
263         self.vhost.dma_unmap(iova, size).map_err(Error::DmaUnmap)
264     }
265 }
266 
267 impl VirtioDevice for Vdpa {
268     fn device_type(&self) -> u32 {
269         self.common.device_type
270     }
271 
272     fn queue_max_sizes(&self) -> &[u16] {
273         &self.common.queue_sizes
274     }
275 
276     fn features(&self) -> u64 {
277         self.common.avail_features
278     }
279 
280     fn ack_features(&mut self, value: u64) {
281         self.common.ack_features(value)
282     }
283 
284     fn read_config(&self, offset: u64, data: &mut [u8]) {
285         if let Err(e) = self.vhost.get_config(offset as u32, data) {
286             error!("Failed reading virtio config: {}", e);
287         }
288     }
289 
290     fn write_config(&mut self, offset: u64, data: &[u8]) {
291         if let Err(e) = self.vhost.set_config(offset as u32, data) {
292             error!("Failed writing virtio config: {}", e);
293         }
294     }
295 
296     fn activate(
297         &mut self,
298         mem: GuestMemoryAtomic<GuestMemoryMmap>,
299         virtio_interrupt: Arc<dyn VirtioInterrupt>,
300         queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
301         queue_evts: Vec<EventFd>,
302     ) -> ActivateResult {
303         self.activate_vdpa(&mem.memory(), &virtio_interrupt, queues, queue_evts)
304             .map_err(ActivateError::ActivateVdpa)?;
305 
306         // Store the virtio interrupt handler as we need to return it on reset
307         self.common.interrupt_cb = Some(virtio_interrupt);
308 
309         event!("vdpa", "activated", "id", &self.id);
310         Ok(())
311     }
312 
313     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
314         if let Err(e) = self.reset_vdpa() {
315             error!("Failed to reset vhost-vdpa: {:?}", e);
316             return None;
317         }
318 
319         event!("vdpa", "reset", "id", &self.id);
320 
321         // Return the virtio interrupt handler
322         self.common.interrupt_cb.take()
323     }
324 
325     fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) {
326         self.common.set_access_platform(access_platform)
327     }
328 }
329 
330 pub struct VdpaDmaMapping<M: GuestAddressSpace> {
331     device: Arc<Mutex<Vdpa>>,
332     memory: Arc<M>,
333 }
334 
335 impl<M: GuestAddressSpace> VdpaDmaMapping<M> {
336     pub fn new(device: Arc<Mutex<Vdpa>>, memory: Arc<M>) -> Self {
337         Self { device, memory }
338     }
339 }
340 
341 impl<M: GuestAddressSpace + Sync + Send> ExternalDmaMapping for VdpaDmaMapping<M> {
342     fn map(&self, iova: u64, gpa: u64, size: u64) -> result::Result<(), io::Error> {
343         let mem = self.memory.memory();
344         let guest_addr = GuestAddress(gpa);
345         let user_addr = if mem.check_range(guest_addr, size as usize) {
346             mem.get_host_address(guest_addr).unwrap() as *const u8
347         } else {
348             return Err(io::Error::new(
349                 io::ErrorKind::Other,
350                 format!(
351                     "failed to convert guest address 0x{:x} into \
352                      host user virtual address",
353                     gpa
354                 ),
355             ));
356         };
357 
358         debug!(
359             "DMA map iova 0x{:x}, gpa 0x{:x}, size 0x{:x}, host_addr 0x{:x}",
360             iova, gpa, size, user_addr as u64
361         );
362         self.device
363             .lock()
364             .unwrap()
365             .dma_map(iova, size, user_addr, false)
366             .map_err(|e| {
367                 io::Error::new(
368                     io::ErrorKind::Other,
369                     format!(
370                         "failed to map memory for vDPA device, \
371                          iova 0x{:x}, gpa 0x{:x}, size 0x{:x}: {:?}",
372                         iova, gpa, size, e
373                     ),
374                 )
375             })
376     }
377 
378     fn unmap(&self, iova: u64, size: u64) -> std::result::Result<(), std::io::Error> {
379         debug!("DMA unmap iova 0x{:x} size 0x{:x}", iova, size);
380         self.device
381             .lock()
382             .unwrap()
383             .dma_unmap(iova, size)
384             .map_err(|e| {
385                 io::Error::new(
386                     io::ErrorKind::Other,
387                     format!(
388                         "failed to unmap memory for vDPA device, \
389                      iova 0x{:x}, size 0x{:x}: {:?}",
390                         iova, size, e
391                     ),
392                 )
393             })
394     }
395 }
396