xref: /cloud-hypervisor/virtio-devices/src/vdpa.rs (revision eea9bcea38e0c5649f444c829f3a4f9c22aa486c)
1 // Copyright © 2022 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5 
6 use crate::{
7     ActivateError, ActivateResult, GuestMemoryMmap, VirtioCommon, VirtioDevice, VirtioInterrupt,
8     VirtioInterruptType, DEVICE_ACKNOWLEDGE, DEVICE_DRIVER, DEVICE_DRIVER_OK, DEVICE_FEATURES_OK,
9     VIRTIO_F_IOMMU_PLATFORM,
10 };
11 use std::{
12     io, result,
13     sync::{atomic::Ordering, Arc, Mutex},
14 };
15 use thiserror::Error;
16 use vhost::{
17     vdpa::{VhostVdpa, VhostVdpaIovaRange},
18     vhost_kern::vdpa::VhostKernVdpa,
19     vhost_kern::VhostKernFeatures,
20     VhostBackend, VringConfigData,
21 };
22 use virtio_queue::{Descriptor, Queue, QueueT};
23 use vm_device::dma_mapping::ExternalDmaMapping;
24 use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic};
25 use vm_virtio::{AccessPlatform, Translatable};
26 use vmm_sys_util::eventfd::EventFd;
27 
28 #[derive(Error, Debug)]
29 pub enum Error {
30     #[error("Failed to create vhost-vdpa: {0}")]
31     CreateVhostVdpa(vhost::Error),
32     #[error("Failed to map DMA range: {0}")]
33     DmaMap(vhost::Error),
34     #[error("Failed to unmap DMA range: {0}")]
35     DmaUnmap(vhost::Error),
36     #[error("Failed to get address range")]
37     GetAddressRange,
38     #[error("Failed to get the available index from the virtio queue: {0}")]
39     GetAvailableIndex(virtio_queue::Error),
40     #[error("Get virtio device identifier: {0}")]
41     GetDeviceId(vhost::Error),
42     #[error("Failed to get backend specific features: {0}")]
43     GetBackendFeatures(vhost::Error),
44     #[error("Failed to get virtio features: {0}")]
45     GetFeatures(vhost::Error),
46     #[error("Failed to get the IOVA range: {0}")]
47     GetIovaRange(vhost::Error),
48     #[error("Failed to get queue size: {0}")]
49     GetVringNum(vhost::Error),
50     #[error("Invalid IOVA range: {0}-{1}")]
51     InvalidIovaRange(u64, u64),
52     #[error("Missing VIRTIO_F_ACCESS_PLATFORM feature")]
53     MissingAccessPlatformVirtioFeature,
54     #[error("Failed to reset owner: {0}")]
55     ResetOwner(vhost::Error),
56     #[error("Failed to set backend specific features: {0}")]
57     SetBackendFeatures(vhost::Error),
58     #[error("Failed to set eventfd notifying about a configuration change: {0}")]
59     SetConfigCall(vhost::Error),
60     #[error("Failed to set virtio features: {0}")]
61     SetFeatures(vhost::Error),
62     #[error("Failed to set memory table: {0}")]
63     SetMemTable(vhost::Error),
64     #[error("Failed to set owner: {0}")]
65     SetOwner(vhost::Error),
66     #[error("Failed to set virtio status: {0}")]
67     SetStatus(vhost::Error),
68     #[error("Failed to set vring address: {0}")]
69     SetVringAddr(vhost::Error),
70     #[error("Failed to set vring base: {0}")]
71     SetVringBase(vhost::Error),
72     #[error("Failed to set vring eventfd when buffer are used: {0}")]
73     SetVringCall(vhost::Error),
74     #[error("Failed to enable/disable vring: {0}")]
75     SetVringEnable(vhost::Error),
76     #[error("Failed to set vring eventfd when new descriptors are available: {0}")]
77     SetVringKick(vhost::Error),
78     #[error("Failed to set vring size: {0}")]
79     SetVringNum(vhost::Error),
80 }
81 
82 pub type Result<T> = std::result::Result<T, Error>;
83 
84 pub struct Vdpa {
85     common: VirtioCommon,
86     id: String,
87     vhost: VhostKernVdpa<GuestMemoryAtomic<GuestMemoryMmap>>,
88     iova_range: VhostVdpaIovaRange,
89     enabled_num_queues: Option<usize>,
90     backend_features: u64,
91 }
92 
93 impl Vdpa {
94     pub fn new(
95         id: String,
96         device_path: &str,
97         mem: GuestMemoryAtomic<GuestMemoryMmap>,
98         num_queues: u16,
99     ) -> Result<Self> {
100         let mut vhost = VhostKernVdpa::new(device_path, mem).map_err(Error::CreateVhostVdpa)?;
101         vhost.set_owner().map_err(Error::SetOwner)?;
102         let device_type = vhost.get_device_id().map_err(Error::GetDeviceId)?;
103         let queue_size = vhost.get_vring_num().map_err(Error::GetVringNum)?;
104         let avail_features = vhost.get_features().map_err(Error::GetFeatures)?;
105         let backend_features = vhost
106             .get_backend_features()
107             .map_err(Error::GetBackendFeatures)?;
108         vhost.set_backend_features_acked(backend_features);
109 
110         let iova_range = vhost.get_iova_range().map_err(Error::GetIovaRange)?;
111 
112         if avail_features & (1u64 << VIRTIO_F_IOMMU_PLATFORM) == 0 {
113             return Err(Error::MissingAccessPlatformVirtioFeature);
114         }
115 
116         Ok(Vdpa {
117             common: VirtioCommon {
118                 device_type,
119                 queue_sizes: vec![queue_size; num_queues as usize],
120                 avail_features,
121                 min_queues: num_queues,
122                 ..Default::default()
123             },
124             id,
125             vhost,
126             iova_range,
127             enabled_num_queues: None,
128             backend_features,
129         })
130     }
131 
132     fn enable_vrings(&mut self, num_queues: usize, enable: bool) -> Result<()> {
133         for queue_index in 0..num_queues {
134             self.vhost
135                 .set_vring_enable(queue_index, enable)
136                 .map_err(Error::SetVringEnable)?;
137         }
138 
139         self.enabled_num_queues = if enable { Some(num_queues) } else { None };
140 
141         Ok(())
142     }
143 
144     fn activate_vdpa(
145         &mut self,
146         mem: &GuestMemoryMmap,
147         virtio_interrupt: &Arc<dyn VirtioInterrupt>,
148         queues: Vec<(usize, Queue, EventFd)>,
149     ) -> Result<()> {
150         self.vhost
151             .set_features(self.common.acked_features)
152             .map_err(Error::SetFeatures)?;
153         self.vhost
154             .set_backend_features(self.backend_features)
155             .map_err(Error::SetBackendFeatures)?;
156 
157         for (queue_index, queue, queue_evt) in queues.iter() {
158             let queue_max_size = queue.max_size();
159             let queue_size = queue.size();
160             self.vhost
161                 .set_vring_num(*queue_index, queue_size)
162                 .map_err(Error::SetVringNum)?;
163 
164             let config_data = VringConfigData {
165                 queue_max_size,
166                 queue_size,
167                 flags: 0u32,
168                 desc_table_addr: queue.desc_table().translate_gpa(
169                     self.common.access_platform.as_ref(),
170                     queue_size as usize * std::mem::size_of::<Descriptor>(),
171                 ),
172                 used_ring_addr: queue.used_ring().translate_gpa(
173                     self.common.access_platform.as_ref(),
174                     4 + queue_size as usize * 8,
175                 ),
176                 avail_ring_addr: queue.avail_ring().translate_gpa(
177                     self.common.access_platform.as_ref(),
178                     4 + queue_size as usize * 2,
179                 ),
180                 log_addr: None,
181             };
182 
183             self.vhost
184                 .set_vring_addr(*queue_index, &config_data)
185                 .map_err(Error::SetVringAddr)?;
186             self.vhost
187                 .set_vring_base(
188                     *queue_index,
189                     queue
190                         .avail_idx(mem, Ordering::Acquire)
191                         .map_err(Error::GetAvailableIndex)?
192                         .0,
193                 )
194                 .map_err(Error::SetVringBase)?;
195 
196             if let Some(eventfd) =
197                 virtio_interrupt.notifier(VirtioInterruptType::Queue(*queue_index as u16))
198             {
199                 self.vhost
200                     .set_vring_call(*queue_index, &eventfd)
201                     .map_err(Error::SetVringCall)?;
202             }
203 
204             self.vhost
205                 .set_vring_kick(*queue_index, queue_evt)
206                 .map_err(Error::SetVringKick)?;
207         }
208 
209         // Setup the config eventfd if there is one
210         if let Some(eventfd) = virtio_interrupt.notifier(VirtioInterruptType::Config) {
211             self.vhost
212                 .set_config_call(&eventfd)
213                 .map_err(Error::SetConfigCall)?;
214         }
215 
216         self.enable_vrings(queues.len(), true)?;
217 
218         self.vhost
219             .set_status(
220                 (DEVICE_ACKNOWLEDGE | DEVICE_DRIVER | DEVICE_DRIVER_OK | DEVICE_FEATURES_OK) as u8,
221             )
222             .map_err(Error::SetStatus)
223     }
224 
225     fn reset_vdpa(&mut self) -> Result<()> {
226         if let Some(num_queues) = self.enabled_num_queues {
227             self.enable_vrings(num_queues, false)?;
228         }
229 
230         self.vhost.set_status(0).map_err(Error::SetStatus)
231     }
232 
233     fn dma_map(&self, iova: u64, size: u64, host_vaddr: *const u8, readonly: bool) -> Result<()> {
234         let iova_last = iova + size - 1;
235         if iova < self.iova_range.first || iova_last > self.iova_range.last {
236             return Err(Error::InvalidIovaRange(iova, iova_last));
237         }
238 
239         self.vhost
240             .dma_map(iova, size, host_vaddr, readonly)
241             .map_err(Error::DmaMap)
242     }
243 
244     fn dma_unmap(&self, iova: u64, size: u64) -> Result<()> {
245         let iova_last = iova + size - 1;
246         if iova < self.iova_range.first || iova_last > self.iova_range.last {
247             return Err(Error::InvalidIovaRange(iova, iova_last));
248         }
249 
250         self.vhost.dma_unmap(iova, size).map_err(Error::DmaUnmap)
251     }
252 }
253 
254 impl VirtioDevice for Vdpa {
255     fn device_type(&self) -> u32 {
256         self.common.device_type
257     }
258 
259     fn queue_max_sizes(&self) -> &[u16] {
260         &self.common.queue_sizes
261     }
262 
263     fn features(&self) -> u64 {
264         self.common.avail_features
265     }
266 
267     fn ack_features(&mut self, value: u64) {
268         self.common.ack_features(value)
269     }
270 
271     fn read_config(&self, offset: u64, data: &mut [u8]) {
272         if let Err(e) = self.vhost.get_config(offset as u32, data) {
273             error!("Failed reading virtio config: {}", e);
274         }
275     }
276 
277     fn write_config(&mut self, offset: u64, data: &[u8]) {
278         if let Err(e) = self.vhost.set_config(offset as u32, data) {
279             error!("Failed writing virtio config: {}", e);
280         }
281     }
282 
283     fn activate(
284         &mut self,
285         mem: GuestMemoryAtomic<GuestMemoryMmap>,
286         virtio_interrupt: Arc<dyn VirtioInterrupt>,
287         queues: Vec<(usize, Queue, EventFd)>,
288     ) -> ActivateResult {
289         self.activate_vdpa(&mem.memory(), &virtio_interrupt, queues)
290             .map_err(ActivateError::ActivateVdpa)?;
291 
292         // Store the virtio interrupt handler as we need to return it on reset
293         self.common.interrupt_cb = Some(virtio_interrupt);
294 
295         event!("vdpa", "activated", "id", &self.id);
296         Ok(())
297     }
298 
299     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
300         if let Err(e) = self.reset_vdpa() {
301             error!("Failed to reset vhost-vdpa: {:?}", e);
302             return None;
303         }
304 
305         event!("vdpa", "reset", "id", &self.id);
306 
307         // Return the virtio interrupt handler
308         self.common.interrupt_cb.take()
309     }
310 
311     fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) {
312         self.common.set_access_platform(access_platform)
313     }
314 }
315 
316 pub struct VdpaDmaMapping<M: GuestAddressSpace> {
317     device: Arc<Mutex<Vdpa>>,
318     memory: Arc<M>,
319 }
320 
321 impl<M: GuestAddressSpace> VdpaDmaMapping<M> {
322     pub fn new(device: Arc<Mutex<Vdpa>>, memory: Arc<M>) -> Self {
323         Self { device, memory }
324     }
325 }
326 
327 impl<M: GuestAddressSpace + Sync + Send> ExternalDmaMapping for VdpaDmaMapping<M> {
328     fn map(&self, iova: u64, gpa: u64, size: u64) -> result::Result<(), io::Error> {
329         let mem = self.memory.memory();
330         let guest_addr = GuestAddress(gpa);
331         let user_addr = if mem.check_range(guest_addr, size as usize) {
332             mem.get_host_address(guest_addr).unwrap() as *const u8
333         } else {
334             return Err(io::Error::new(
335                 io::ErrorKind::Other,
336                 format!(
337                     "failed to convert guest address 0x{:x} into \
338                      host user virtual address",
339                     gpa
340                 ),
341             ));
342         };
343 
344         debug!(
345             "DMA map iova 0x{:x}, gpa 0x{:x}, size 0x{:x}, host_addr 0x{:x}",
346             iova, gpa, size, user_addr as u64
347         );
348         self.device
349             .lock()
350             .unwrap()
351             .dma_map(iova, size, user_addr, false)
352             .map_err(|e| {
353                 io::Error::new(
354                     io::ErrorKind::Other,
355                     format!(
356                         "failed to map memory for vDPA device, \
357                          iova 0x{:x}, gpa 0x{:x}, size 0x{:x}: {:?}",
358                         iova, gpa, size, e
359                     ),
360                 )
361             })
362     }
363 
364     fn unmap(&self, iova: u64, size: u64) -> std::result::Result<(), std::io::Error> {
365         debug!("DMA unmap iova 0x{:x} size 0x{:x}", iova, size);
366         self.device
367             .lock()
368             .unwrap()
369             .dma_unmap(iova, size)
370             .map_err(|e| {
371                 io::Error::new(
372                     io::ErrorKind::Other,
373                     format!(
374                         "failed to unmap memory for vDPA device, \
375                      iova 0x{:x}, size 0x{:x}: {:?}",
376                         iova, size, e
377                     ),
378                 )
379             })
380     }
381 }
382