xref: /cloud-hypervisor/virtio-devices/src/iommu.rs (revision 5e52729453cb62edbe4fb3a4aa24f8cca31e667e)
1 // Copyright © 2019 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
4 
5 use super::Error as DeviceError;
6 use super::{
7     ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, VirtioCommon, VirtioDevice,
8     VirtioDeviceType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_VERSION_1,
9 };
10 use crate::seccomp_filters::Thread;
11 use crate::thread_helper::spawn_virtio_thread;
12 use crate::GuestMemoryMmap;
13 use crate::{DmaRemapping, VirtioInterrupt, VirtioInterruptType};
14 use anyhow::anyhow;
15 use seccompiler::SeccompAction;
16 use std::collections::BTreeMap;
17 use std::io;
18 use std::mem::size_of;
19 use std::ops::Bound::Included;
20 use std::os::unix::io::AsRawFd;
21 use std::result;
22 use std::sync::atomic::{AtomicBool, Ordering};
23 use std::sync::{Arc, Barrier, Mutex, RwLock};
24 use thiserror::Error;
25 use versionize::{VersionMap, Versionize, VersionizeResult};
26 use versionize_derive::Versionize;
27 use virtio_queue::{DescriptorChain, Queue, QueueT};
28 use vm_device::dma_mapping::ExternalDmaMapping;
29 use vm_memory::{
30     Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic,
31     GuestMemoryError, GuestMemoryLoadGuard,
32 };
33 use vm_migration::VersionMapped;
34 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
35 use vm_virtio::AccessPlatform;
36 use vmm_sys_util::eventfd::EventFd;
37 
38 /// Queues sizes
39 const QUEUE_SIZE: u16 = 256;
40 const NUM_QUEUES: usize = 2;
41 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
42 
43 /// New descriptors are pending on the request queue.
44 /// "requestq" is meant to be used anytime an action is required to be
45 /// performed on behalf of the guest driver.
46 const REQUEST_Q_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
47 /// New descriptors are pending on the event queue.
48 /// "eventq" lets the device report any fault or other asynchronous event to
49 /// the guest driver.
50 const _EVENT_Q_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2;
51 
52 /// PROBE properties size.
53 /// This is the minimal size to provide at least one RESV_MEM property.
54 /// Because virtio-iommu expects one MSI reserved region, we must provide it,
55 /// otherwise the driver in the guest will define a predefined one between
56 /// 0x8000000 and 0x80FFFFF, which is only relevant for ARM architecture, but
57 /// will conflict with x86.
58 const PROBE_PROP_SIZE: u32 =
59     (size_of::<VirtioIommuProbeProperty>() + size_of::<VirtioIommuProbeResvMem>()) as u32;
60 
61 /// Virtio IOMMU features
62 #[allow(unused)]
63 const VIRTIO_IOMMU_F_INPUT_RANGE: u32 = 0;
64 #[allow(unused)]
65 const VIRTIO_IOMMU_F_DOMAIN_RANGE: u32 = 1;
66 #[allow(unused)]
67 const VIRTIO_IOMMU_F_MAP_UNMAP: u32 = 2;
68 #[allow(unused)]
69 const VIRTIO_IOMMU_F_BYPASS: u32 = 3;
70 const VIRTIO_IOMMU_F_PROBE: u32 = 4;
71 #[allow(unused)]
72 const VIRTIO_IOMMU_F_MMIO: u32 = 5;
73 const VIRTIO_IOMMU_F_BYPASS_CONFIG: u32 = 6;
74 
75 // Support 2MiB and 4KiB page sizes.
76 const VIRTIO_IOMMU_PAGE_SIZE_MASK: u64 = (2 << 20) | (4 << 10);
77 
78 #[derive(Copy, Clone, Debug, Default)]
79 #[repr(packed)]
80 #[allow(dead_code)]
81 struct VirtioIommuRange32 {
82     start: u32,
83     end: u32,
84 }
85 
86 #[derive(Copy, Clone, Debug, Default)]
87 #[repr(packed)]
88 #[allow(dead_code)]
89 struct VirtioIommuRange64 {
90     start: u64,
91     end: u64,
92 }
93 
94 #[derive(Copy, Clone, Debug, Default)]
95 #[repr(packed)]
96 #[allow(dead_code)]
97 struct VirtioIommuConfig {
98     page_size_mask: u64,
99     input_range: VirtioIommuRange64,
100     domain_range: VirtioIommuRange32,
101     probe_size: u32,
102     bypass: u8,
103     _reserved: [u8; 7],
104 }
105 
106 /// Virtio IOMMU request type
107 const VIRTIO_IOMMU_T_ATTACH: u8 = 1;
108 const VIRTIO_IOMMU_T_DETACH: u8 = 2;
109 const VIRTIO_IOMMU_T_MAP: u8 = 3;
110 const VIRTIO_IOMMU_T_UNMAP: u8 = 4;
111 const VIRTIO_IOMMU_T_PROBE: u8 = 5;
112 
113 #[derive(Copy, Clone, Debug, Default)]
114 #[repr(packed)]
115 struct VirtioIommuReqHead {
116     type_: u8,
117     _reserved: [u8; 3],
118 }
119 
120 /// Virtio IOMMU request status
121 const VIRTIO_IOMMU_S_OK: u8 = 0;
122 #[allow(unused)]
123 const VIRTIO_IOMMU_S_IOERR: u8 = 1;
124 #[allow(unused)]
125 const VIRTIO_IOMMU_S_UNSUPP: u8 = 2;
126 #[allow(unused)]
127 const VIRTIO_IOMMU_S_DEVERR: u8 = 3;
128 #[allow(unused)]
129 const VIRTIO_IOMMU_S_INVAL: u8 = 4;
130 #[allow(unused)]
131 const VIRTIO_IOMMU_S_RANGE: u8 = 5;
132 #[allow(unused)]
133 const VIRTIO_IOMMU_S_NOENT: u8 = 6;
134 #[allow(unused)]
135 const VIRTIO_IOMMU_S_FAULT: u8 = 7;
136 #[allow(unused)]
137 const VIRTIO_IOMMU_S_NOMEM: u8 = 8;
138 
139 #[derive(Copy, Clone, Debug, Default)]
140 #[repr(packed)]
141 #[allow(dead_code)]
142 struct VirtioIommuReqTail {
143     status: u8,
144     _reserved: [u8; 3],
145 }
146 
147 /// ATTACH request
148 #[derive(Copy, Clone, Debug, Default)]
149 #[repr(packed)]
150 struct VirtioIommuReqAttach {
151     domain: u32,
152     endpoint: u32,
153     flags: u32,
154     _reserved: [u8; 4],
155 }
156 
157 const VIRTIO_IOMMU_ATTACH_F_BYPASS: u32 = 1;
158 
159 /// DETACH request
160 #[derive(Copy, Clone, Debug, Default)]
161 #[repr(packed)]
162 struct VirtioIommuReqDetach {
163     domain: u32,
164     endpoint: u32,
165     _reserved: [u8; 8],
166 }
167 
168 /// Virtio IOMMU request MAP flags
169 #[allow(unused)]
170 const VIRTIO_IOMMU_MAP_F_READ: u32 = 1;
171 #[allow(unused)]
172 const VIRTIO_IOMMU_MAP_F_WRITE: u32 = 1 << 1;
173 #[allow(unused)]
174 const VIRTIO_IOMMU_MAP_F_MMIO: u32 = 1 << 2;
175 #[allow(unused)]
176 const VIRTIO_IOMMU_MAP_F_MASK: u32 =
177     VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE | VIRTIO_IOMMU_MAP_F_MMIO;
178 
179 /// MAP request
180 #[derive(Copy, Clone, Debug, Default)]
181 #[repr(packed)]
182 struct VirtioIommuReqMap {
183     domain: u32,
184     virt_start: u64,
185     virt_end: u64,
186     phys_start: u64,
187     _flags: u32,
188 }
189 
190 /// UNMAP request
191 #[derive(Copy, Clone, Debug, Default)]
192 #[repr(packed)]
193 struct VirtioIommuReqUnmap {
194     domain: u32,
195     virt_start: u64,
196     virt_end: u64,
197     _reserved: [u8; 4],
198 }
199 
200 /// Virtio IOMMU request PROBE types
201 #[allow(unused)]
202 const VIRTIO_IOMMU_PROBE_T_NONE: u16 = 0;
203 const VIRTIO_IOMMU_PROBE_T_RESV_MEM: u16 = 1;
204 #[allow(unused)]
205 const VIRTIO_IOMMU_PROBE_T_MASK: u16 = 0xfff;
206 
207 /// PROBE request
208 #[derive(Copy, Clone, Debug, Default)]
209 #[repr(packed)]
210 #[allow(dead_code)]
211 struct VirtioIommuReqProbe {
212     endpoint: u32,
213     _reserved: [u64; 8],
214 }
215 
216 #[derive(Copy, Clone, Debug, Default)]
217 #[repr(packed)]
218 #[allow(dead_code)]
219 struct VirtioIommuProbeProperty {
220     type_: u16,
221     length: u16,
222 }
223 
224 /// Virtio IOMMU request PROBE property RESV_MEM subtypes
225 #[allow(unused)]
226 const VIRTIO_IOMMU_RESV_MEM_T_RESERVED: u8 = 0;
227 const VIRTIO_IOMMU_RESV_MEM_T_MSI: u8 = 1;
228 
229 #[derive(Copy, Clone, Debug, Default)]
230 #[repr(packed)]
231 #[allow(dead_code)]
232 struct VirtioIommuProbeResvMem {
233     subtype: u8,
234     _reserved: [u8; 3],
235     start: u64,
236     end: u64,
237 }
238 
239 /// Virtio IOMMU fault flags
240 #[allow(unused)]
241 const VIRTIO_IOMMU_FAULT_F_READ: u32 = 1;
242 #[allow(unused)]
243 const VIRTIO_IOMMU_FAULT_F_WRITE: u32 = 1 << 1;
244 #[allow(unused)]
245 const VIRTIO_IOMMU_FAULT_F_EXEC: u32 = 1 << 2;
246 #[allow(unused)]
247 const VIRTIO_IOMMU_FAULT_F_ADDRESS: u32 = 1 << 8;
248 
249 /// Virtio IOMMU fault reasons
250 #[allow(unused)]
251 const VIRTIO_IOMMU_FAULT_R_UNKNOWN: u32 = 0;
252 #[allow(unused)]
253 const VIRTIO_IOMMU_FAULT_R_DOMAIN: u32 = 1;
254 #[allow(unused)]
255 const VIRTIO_IOMMU_FAULT_R_MAPPING: u32 = 2;
256 
257 /// Fault reporting through eventq
258 #[allow(unused)]
259 #[derive(Copy, Clone, Debug, Default)]
260 #[repr(packed)]
261 struct VirtioIommuFault {
262     reason: u8,
263     reserved: [u8; 3],
264     flags: u32,
265     endpoint: u32,
266     reserved2: [u8; 4],
267     address: u64,
268 }
269 
270 // SAFETY: data structure only contain integers and have no implicit padding
271 unsafe impl ByteValued for VirtioIommuRange32 {}
272 // SAFETY: data structure only contain integers and have no implicit padding
273 unsafe impl ByteValued for VirtioIommuRange64 {}
274 // SAFETY: data structure only contain integers and have no implicit padding
275 unsafe impl ByteValued for VirtioIommuConfig {}
276 // SAFETY: data structure only contain integers and have no implicit padding
277 unsafe impl ByteValued for VirtioIommuReqHead {}
278 // SAFETY: data structure only contain integers and have no implicit padding
279 unsafe impl ByteValued for VirtioIommuReqTail {}
280 // SAFETY: data structure only contain integers and have no implicit padding
281 unsafe impl ByteValued for VirtioIommuReqAttach {}
282 // SAFETY: data structure only contain integers and have no implicit padding
283 unsafe impl ByteValued for VirtioIommuReqDetach {}
284 // SAFETY: data structure only contain integers and have no implicit padding
285 unsafe impl ByteValued for VirtioIommuReqMap {}
286 // SAFETY: data structure only contain integers and have no implicit padding
287 unsafe impl ByteValued for VirtioIommuReqUnmap {}
288 // SAFETY: data structure only contain integers and have no implicit padding
289 unsafe impl ByteValued for VirtioIommuReqProbe {}
290 // SAFETY: data structure only contain integers and have no implicit padding
291 unsafe impl ByteValued for VirtioIommuProbeProperty {}
292 // SAFETY: data structure only contain integers and have no implicit padding
293 unsafe impl ByteValued for VirtioIommuProbeResvMem {}
294 // SAFETY: data structure only contain integers and have no implicit padding
295 unsafe impl ByteValued for VirtioIommuFault {}
296 
297 #[derive(Error, Debug)]
298 enum Error {
299     #[error("Guest gave us bad memory addresses: {0}")]
300     GuestMemory(GuestMemoryError),
301     #[error("Guest gave us a write only descriptor that protocol says to read from")]
302     UnexpectedWriteOnlyDescriptor,
303     #[error("Guest gave us a read only descriptor that protocol says to write to")]
304     UnexpectedReadOnlyDescriptor,
305     #[error("Guest gave us too few descriptors in a descriptor chain")]
306     DescriptorChainTooShort,
307     #[error("Guest gave us a buffer that was too short to use")]
308     BufferLengthTooSmall,
309     #[error("Guest sent us invalid request")]
310     InvalidRequest,
311     #[error("Guest sent us invalid ATTACH request")]
312     InvalidAttachRequest,
313     #[error("Guest sent us invalid DETACH request")]
314     InvalidDetachRequest,
315     #[error("Guest sent us invalid MAP request")]
316     InvalidMapRequest,
317     #[error("Invalid to map because the domain is in bypass mode")]
318     InvalidMapRequestBypassDomain,
319     #[error("Invalid to map because the domain is missing")]
320     InvalidMapRequestMissingDomain,
321     #[error("Guest sent us invalid UNMAP request")]
322     InvalidUnmapRequest,
323     #[error("Invalid to unmap because the domain is in bypass mode")]
324     InvalidUnmapRequestBypassDomain,
325     #[error("Invalid to unmap because the domain is missing")]
326     InvalidUnmapRequestMissingDomain,
327     #[error("Guest sent us invalid PROBE request")]
328     InvalidProbeRequest,
329     #[error("Failed to performing external mapping: {0}")]
330     ExternalMapping(io::Error),
331     #[error("Failed to performing external unmapping: {0}")]
332     ExternalUnmapping(io::Error),
333     #[error("Failed adding used index: {0}")]
334     QueueAddUsed(virtio_queue::Error),
335 }
336 
337 struct Request {}
338 
339 impl Request {
340     // Parse the available vring buffer. Based on the hashmap table of external
341     // mappings required from various devices such as VFIO or vhost-user ones,
342     // this function might update the hashmap table of external mappings per
343     // domain.
344     // Basically, the VMM knows about the device_id <=> mapping relationship
345     // before running the VM, but at runtime, a new domain <=> mapping hashmap
346     // is created based on the information provided from the guest driver for
347     // virtio-iommu (giving the link device_id <=> domain).
348     fn parse(
349         desc_chain: &mut DescriptorChain<GuestMemoryLoadGuard<GuestMemoryMmap>>,
350         mapping: &Arc<IommuMapping>,
351         ext_mapping: &BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
352         msi_iova_space: (u64, u64),
353     ) -> result::Result<usize, Error> {
354         let desc = desc_chain
355             .next()
356             .ok_or(Error::DescriptorChainTooShort)
357             .map_err(|e| {
358                 error!("Missing head descriptor");
359                 e
360             })?;
361 
362         // The descriptor contains the request type which MUST be readable.
363         if desc.is_write_only() {
364             return Err(Error::UnexpectedWriteOnlyDescriptor);
365         }
366 
367         if (desc.len() as usize) < size_of::<VirtioIommuReqHead>() {
368             return Err(Error::InvalidRequest);
369         }
370 
371         let req_head: VirtioIommuReqHead = desc_chain
372             .memory()
373             .read_obj(desc.addr())
374             .map_err(Error::GuestMemory)?;
375         let req_offset = size_of::<VirtioIommuReqHead>();
376         let desc_size_left = (desc.len() as usize) - req_offset;
377         let req_addr = if let Some(addr) = desc.addr().checked_add(req_offset as u64) {
378             addr
379         } else {
380             return Err(Error::InvalidRequest);
381         };
382 
383         let (msi_iova_start, msi_iova_end) = msi_iova_space;
384 
385         // Create the reply
386         let mut reply: Vec<u8> = Vec::new();
387         let mut status = VIRTIO_IOMMU_S_OK;
388         let mut hdr_len = 0;
389 
390         let result = (|| {
391             match req_head.type_ {
392                 VIRTIO_IOMMU_T_ATTACH => {
393                     if desc_size_left != size_of::<VirtioIommuReqAttach>() {
394                         status = VIRTIO_IOMMU_S_INVAL;
395                         return Err(Error::InvalidAttachRequest);
396                     }
397 
398                     let req: VirtioIommuReqAttach = desc_chain
399                         .memory()
400                         .read_obj(req_addr as GuestAddress)
401                         .map_err(Error::GuestMemory)?;
402                     debug!("Attach request {:?}", req);
403 
404                     // Copy the value to use it as a proper reference.
405                     let domain_id = req.domain;
406                     let endpoint = req.endpoint;
407                     let bypass =
408                         (req.flags & VIRTIO_IOMMU_ATTACH_F_BYPASS) == VIRTIO_IOMMU_ATTACH_F_BYPASS;
409 
410                     // Add endpoint associated with specific domain
411                     mapping
412                         .endpoints
413                         .write()
414                         .unwrap()
415                         .insert(endpoint, domain_id);
416 
417                     // Add new domain with no mapping if the entry didn't exist yet
418                     let mut domains = mapping.domains.write().unwrap();
419                     let domain = Domain {
420                         mappings: BTreeMap::new(),
421                         bypass,
422                     };
423                     domains.entry(domain_id).or_insert_with(|| domain);
424                 }
425                 VIRTIO_IOMMU_T_DETACH => {
426                     if desc_size_left != size_of::<VirtioIommuReqDetach>() {
427                         status = VIRTIO_IOMMU_S_INVAL;
428                         return Err(Error::InvalidDetachRequest);
429                     }
430 
431                     let req: VirtioIommuReqDetach = desc_chain
432                         .memory()
433                         .read_obj(req_addr as GuestAddress)
434                         .map_err(Error::GuestMemory)?;
435                     debug!("Detach request {:?}", req);
436 
437                     // Copy the value to use it as a proper reference.
438                     let domain_id = req.domain;
439                     let endpoint = req.endpoint;
440 
441                     // Remove endpoint associated with specific domain
442                     mapping.endpoints.write().unwrap().remove(&endpoint);
443 
444                     // After all endpoints have been successfully detached from a
445                     // domain, the domain can be removed. This means we must remove
446                     // the mappings associated with this domain.
447                     if mapping
448                         .endpoints
449                         .write()
450                         .unwrap()
451                         .iter()
452                         .filter(|(_, &d)| d == domain_id)
453                         .count()
454                         == 0
455                     {
456                         mapping.domains.write().unwrap().remove(&domain_id);
457                     }
458                 }
459                 VIRTIO_IOMMU_T_MAP => {
460                     if desc_size_left != size_of::<VirtioIommuReqMap>() {
461                         status = VIRTIO_IOMMU_S_INVAL;
462                         return Err(Error::InvalidMapRequest);
463                     }
464 
465                     let req: VirtioIommuReqMap = desc_chain
466                         .memory()
467                         .read_obj(req_addr as GuestAddress)
468                         .map_err(Error::GuestMemory)?;
469                     debug!("Map request {:?}", req);
470 
471                     // Copy the value to use it as a proper reference.
472                     let domain_id = req.domain;
473 
474                     if let Some(domain) = mapping.domains.read().unwrap().get(&domain_id) {
475                         if domain.bypass {
476                             status = VIRTIO_IOMMU_S_INVAL;
477                             return Err(Error::InvalidMapRequestBypassDomain);
478                         }
479                     } else {
480                         status = VIRTIO_IOMMU_S_INVAL;
481                         return Err(Error::InvalidMapRequestMissingDomain);
482                     }
483 
484                     // Find the list of endpoints attached to the given domain.
485                     let endpoints: Vec<u32> = mapping
486                         .endpoints
487                         .write()
488                         .unwrap()
489                         .iter()
490                         .filter(|(_, &d)| d == domain_id)
491                         .map(|(&e, _)| e)
492                         .collect();
493 
494                     // Trigger external mapping if necessary.
495                     for endpoint in endpoints {
496                         if let Some(ext_map) = ext_mapping.get(&endpoint) {
497                             let size = req.virt_end - req.virt_start + 1;
498                             ext_map
499                                 .map(req.virt_start, req.phys_start, size)
500                                 .map_err(Error::ExternalMapping)?;
501                         }
502                     }
503 
504                     // Add new mapping associated with the domain
505                     mapping
506                         .domains
507                         .write()
508                         .unwrap()
509                         .get_mut(&domain_id)
510                         .unwrap()
511                         .mappings
512                         .insert(
513                             req.virt_start,
514                             Mapping {
515                                 gpa: req.phys_start,
516                                 size: req.virt_end - req.virt_start + 1,
517                             },
518                         );
519                 }
520                 VIRTIO_IOMMU_T_UNMAP => {
521                     if desc_size_left != size_of::<VirtioIommuReqUnmap>() {
522                         status = VIRTIO_IOMMU_S_INVAL;
523                         return Err(Error::InvalidUnmapRequest);
524                     }
525 
526                     let req: VirtioIommuReqUnmap = desc_chain
527                         .memory()
528                         .read_obj(req_addr as GuestAddress)
529                         .map_err(Error::GuestMemory)?;
530                     debug!("Unmap request {:?}", req);
531 
532                     // Copy the value to use it as a proper reference.
533                     let domain_id = req.domain;
534                     let virt_start = req.virt_start;
535 
536                     if let Some(domain) = mapping.domains.read().unwrap().get(&domain_id) {
537                         if domain.bypass {
538                             status = VIRTIO_IOMMU_S_INVAL;
539                             return Err(Error::InvalidUnmapRequestBypassDomain);
540                         }
541                     } else {
542                         status = VIRTIO_IOMMU_S_INVAL;
543                         return Err(Error::InvalidUnmapRequestMissingDomain);
544                     }
545 
546                     // Find the list of endpoints attached to the given domain.
547                     let endpoints: Vec<u32> = mapping
548                         .endpoints
549                         .write()
550                         .unwrap()
551                         .iter()
552                         .filter(|(_, &d)| d == domain_id)
553                         .map(|(&e, _)| e)
554                         .collect();
555 
556                     // Trigger external unmapping if necessary.
557                     for endpoint in endpoints {
558                         if let Some(ext_map) = ext_mapping.get(&endpoint) {
559                             let size = req.virt_end - virt_start + 1;
560                             ext_map
561                                 .unmap(virt_start, size)
562                                 .map_err(Error::ExternalUnmapping)?;
563                         }
564                     }
565 
566                     // Remove mapping associated with the domain
567                     mapping
568                         .domains
569                         .write()
570                         .unwrap()
571                         .get_mut(&domain_id)
572                         .unwrap()
573                         .mappings
574                         .remove(&virt_start);
575                 }
576                 VIRTIO_IOMMU_T_PROBE => {
577                     if desc_size_left != size_of::<VirtioIommuReqProbe>() {
578                         status = VIRTIO_IOMMU_S_INVAL;
579                         return Err(Error::InvalidProbeRequest);
580                     }
581 
582                     let req: VirtioIommuReqProbe = desc_chain
583                         .memory()
584                         .read_obj(req_addr as GuestAddress)
585                         .map_err(Error::GuestMemory)?;
586                     debug!("Probe request {:?}", req);
587 
588                     let probe_prop = VirtioIommuProbeProperty {
589                         type_: VIRTIO_IOMMU_PROBE_T_RESV_MEM,
590                         length: size_of::<VirtioIommuProbeResvMem>() as u16,
591                     };
592                     reply.extend_from_slice(probe_prop.as_slice());
593 
594                     let resv_mem = VirtioIommuProbeResvMem {
595                         subtype: VIRTIO_IOMMU_RESV_MEM_T_MSI,
596                         start: msi_iova_start,
597                         end: msi_iova_end,
598                         ..Default::default()
599                     };
600                     reply.extend_from_slice(resv_mem.as_slice());
601 
602                     hdr_len = PROBE_PROP_SIZE;
603                 }
604                 _ => {
605                     status = VIRTIO_IOMMU_S_INVAL;
606                     return Err(Error::InvalidRequest);
607                 }
608             }
609             Ok(())
610         })();
611 
612         let status_desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
613 
614         // The status MUST always be writable
615         if !status_desc.is_write_only() {
616             return Err(Error::UnexpectedReadOnlyDescriptor);
617         }
618 
619         if status_desc.len() < hdr_len + size_of::<VirtioIommuReqTail>() as u32 {
620             return Err(Error::BufferLengthTooSmall);
621         }
622 
623         let tail = VirtioIommuReqTail {
624             status,
625             ..Default::default()
626         };
627         reply.extend_from_slice(tail.as_slice());
628 
629         // Make sure we return the result of the request to the guest before
630         // we return a potential error internally.
631         desc_chain
632             .memory()
633             .write_slice(reply.as_slice(), status_desc.addr())
634             .map_err(Error::GuestMemory)?;
635 
636         // Return the error if the result was not Ok().
637         result?;
638 
639         Ok((hdr_len as usize) + size_of::<VirtioIommuReqTail>())
640     }
641 }
642 
643 struct IommuEpollHandler {
644     mem: GuestMemoryAtomic<GuestMemoryMmap>,
645     request_queue: Queue,
646     _event_queue: Queue,
647     interrupt_cb: Arc<dyn VirtioInterrupt>,
648     request_queue_evt: EventFd,
649     _event_queue_evt: EventFd,
650     kill_evt: EventFd,
651     pause_evt: EventFd,
652     mapping: Arc<IommuMapping>,
653     ext_mapping: Arc<Mutex<BTreeMap<u32, Arc<dyn ExternalDmaMapping>>>>,
654     msi_iova_space: (u64, u64),
655 }
656 
657 impl IommuEpollHandler {
658     fn request_queue(&mut self) -> Result<bool, Error> {
659         let mut used_descs = false;
660         while let Some(mut desc_chain) = self.request_queue.pop_descriptor_chain(self.mem.memory())
661         {
662             let len = Request::parse(
663                 &mut desc_chain,
664                 &self.mapping,
665                 &self.ext_mapping.lock().unwrap(),
666                 self.msi_iova_space,
667             )?;
668 
669             self.request_queue
670                 .add_used(desc_chain.memory(), desc_chain.head_index(), len as u32)
671                 .map_err(Error::QueueAddUsed)?;
672 
673             used_descs = true;
674         }
675 
676         Ok(used_descs)
677     }
678 
679     fn signal_used_queue(&self, queue_index: u16) -> result::Result<(), DeviceError> {
680         self.interrupt_cb
681             .trigger(VirtioInterruptType::Queue(queue_index))
682             .map_err(|e| {
683                 error!("Failed to signal used queue: {:?}", e);
684                 DeviceError::FailedSignalingUsedQueue(e)
685             })
686     }
687 
688     fn run(
689         &mut self,
690         paused: Arc<AtomicBool>,
691         paused_sync: Arc<Barrier>,
692     ) -> result::Result<(), EpollHelperError> {
693         let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
694         helper.add_event(self.request_queue_evt.as_raw_fd(), REQUEST_Q_EVENT)?;
695         helper.run(paused, paused_sync, self)?;
696 
697         Ok(())
698     }
699 }
700 
701 impl EpollHelperHandler for IommuEpollHandler {
702     fn handle_event(
703         &mut self,
704         _helper: &mut EpollHelper,
705         event: &epoll::Event,
706     ) -> result::Result<(), EpollHelperError> {
707         let ev_type = event.data as u16;
708         match ev_type {
709             REQUEST_Q_EVENT => {
710                 self.request_queue_evt.read().map_err(|e| {
711                     EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
712                 })?;
713 
714                 let needs_notification = self.request_queue().map_err(|e| {
715                     EpollHelperError::HandleEvent(anyhow!(
716                         "Failed to process request queue : {:?}",
717                         e
718                     ))
719                 })?;
720                 if needs_notification {
721                     self.signal_used_queue(0).map_err(|e| {
722                         EpollHelperError::HandleEvent(anyhow!(
723                             "Failed to signal used queue: {:?}",
724                             e
725                         ))
726                     })?;
727                 }
728             }
729             _ => {
730                 return Err(EpollHelperError::HandleEvent(anyhow!(
731                     "Unexpected event: {}",
732                     ev_type
733                 )));
734             }
735         }
736         Ok(())
737     }
738 }
739 
740 #[derive(Clone, Copy, Debug, Versionize)]
741 struct Mapping {
742     gpa: u64,
743     size: u64,
744 }
745 
746 #[derive(Clone, Debug)]
747 struct Domain {
748     mappings: BTreeMap<u64, Mapping>,
749     bypass: bool,
750 }
751 
752 #[derive(Debug)]
753 pub struct IommuMapping {
754     // Domain related to an endpoint.
755     endpoints: Arc<RwLock<BTreeMap<u32, u32>>>,
756     // Information related to each domain.
757     domains: Arc<RwLock<BTreeMap<u32, Domain>>>,
758     // Global flag indicating if endpoints that are not attached to any domain
759     // are in bypass mode.
760     bypass: AtomicBool,
761 }
762 
763 impl DmaRemapping for IommuMapping {
764     fn translate_gva(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error> {
765         debug!("Translate GVA addr 0x{:x}", addr);
766         if let Some(domain_id) = self.endpoints.read().unwrap().get(&id) {
767             if let Some(domain) = self.domains.read().unwrap().get(domain_id) {
768                 // Directly return identity mapping in case the domain is in
769                 // bypass mode.
770                 if domain.bypass {
771                     return Ok(addr);
772                 }
773 
774                 let range_start = if VIRTIO_IOMMU_PAGE_SIZE_MASK > addr {
775                     0
776                 } else {
777                     addr - VIRTIO_IOMMU_PAGE_SIZE_MASK
778                 };
779                 for (&key, &value) in domain
780                     .mappings
781                     .range((Included(&range_start), Included(&addr)))
782                 {
783                     if addr >= key && addr < key + value.size {
784                         let new_addr = addr - key + value.gpa;
785                         debug!("Into GPA addr 0x{:x}", new_addr);
786                         return Ok(new_addr);
787                     }
788                 }
789             }
790         } else if self.bypass.load(Ordering::Acquire) {
791             return Ok(addr);
792         }
793 
794         Err(io::Error::new(
795             io::ErrorKind::Other,
796             format!("failed to translate GVA addr 0x{addr:x}"),
797         ))
798     }
799 
800     fn translate_gpa(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error> {
801         debug!("Translate GPA addr 0x{:x}", addr);
802         if let Some(domain_id) = self.endpoints.read().unwrap().get(&id) {
803             if let Some(domain) = self.domains.read().unwrap().get(domain_id) {
804                 // Directly return identity mapping in case the domain is in
805                 // bypass mode.
806                 if domain.bypass {
807                     return Ok(addr);
808                 }
809 
810                 for (&key, &value) in domain.mappings.iter() {
811                     if addr >= value.gpa && addr < value.gpa + value.size {
812                         let new_addr = addr - value.gpa + key;
813                         debug!("Into GVA addr 0x{:x}", new_addr);
814                         return Ok(new_addr);
815                     }
816                 }
817             }
818         } else if self.bypass.load(Ordering::Acquire) {
819             return Ok(addr);
820         }
821 
822         Err(io::Error::new(
823             io::ErrorKind::Other,
824             format!("failed to translate GPA addr 0x{addr:x}"),
825         ))
826     }
827 }
828 
829 #[derive(Debug)]
830 pub struct AccessPlatformMapping {
831     id: u32,
832     mapping: Arc<IommuMapping>,
833 }
834 
835 impl AccessPlatformMapping {
836     pub fn new(id: u32, mapping: Arc<IommuMapping>) -> Self {
837         AccessPlatformMapping { id, mapping }
838     }
839 }
840 
841 impl AccessPlatform for AccessPlatformMapping {
842     fn translate_gva(&self, base: u64, _size: u64) -> std::result::Result<u64, std::io::Error> {
843         self.mapping.translate_gva(self.id, base)
844     }
845     fn translate_gpa(&self, base: u64, _size: u64) -> std::result::Result<u64, std::io::Error> {
846         self.mapping.translate_gpa(self.id, base)
847     }
848 }
849 
850 pub struct Iommu {
851     common: VirtioCommon,
852     id: String,
853     config: VirtioIommuConfig,
854     mapping: Arc<IommuMapping>,
855     ext_mapping: Arc<Mutex<BTreeMap<u32, Arc<dyn ExternalDmaMapping>>>>,
856     seccomp_action: SeccompAction,
857     exit_evt: EventFd,
858     msi_iova_space: (u64, u64),
859 }
860 
861 type EndpointsState = Vec<(u32, u32)>;
862 type DomainsState = Vec<(u32, (Vec<(u64, Mapping)>, bool))>;
863 
864 #[derive(Versionize)]
865 pub struct IommuState {
866     avail_features: u64,
867     acked_features: u64,
868     endpoints: EndpointsState,
869     domains: DomainsState,
870 }
871 
872 impl VersionMapped for IommuState {}
873 
874 impl Iommu {
875     pub fn new(
876         id: String,
877         seccomp_action: SeccompAction,
878         exit_evt: EventFd,
879         msi_iova_space: (u64, u64),
880         state: Option<IommuState>,
881     ) -> io::Result<(Self, Arc<IommuMapping>)> {
882         let (avail_features, acked_features, endpoints, domains, paused) =
883             if let Some(state) = state {
884                 info!("Restoring virtio-iommu {}", id);
885                 (
886                     state.avail_features,
887                     state.acked_features,
888                     state.endpoints.into_iter().collect(),
889                     state
890                         .domains
891                         .into_iter()
892                         .map(|(k, v)| {
893                             (
894                                 k,
895                                 Domain {
896                                     mappings: v.0.into_iter().collect(),
897                                     bypass: v.1,
898                                 },
899                             )
900                         })
901                         .collect(),
902                     true,
903                 )
904             } else {
905                 let avail_features = 1u64 << VIRTIO_F_VERSION_1
906                     | 1u64 << VIRTIO_IOMMU_F_MAP_UNMAP
907                     | 1u64 << VIRTIO_IOMMU_F_PROBE
908                     | 1u64 << VIRTIO_IOMMU_F_BYPASS_CONFIG;
909 
910                 (avail_features, 0, BTreeMap::new(), BTreeMap::new(), false)
911             };
912 
913         let config = VirtioIommuConfig {
914             page_size_mask: VIRTIO_IOMMU_PAGE_SIZE_MASK,
915             probe_size: PROBE_PROP_SIZE,
916             ..Default::default()
917         };
918 
919         let mapping = Arc::new(IommuMapping {
920             endpoints: Arc::new(RwLock::new(endpoints)),
921             domains: Arc::new(RwLock::new(domains)),
922             bypass: AtomicBool::new(true),
923         });
924 
925         Ok((
926             Iommu {
927                 id,
928                 common: VirtioCommon {
929                     device_type: VirtioDeviceType::Iommu as u32,
930                     queue_sizes: QUEUE_SIZES.to_vec(),
931                     avail_features,
932                     acked_features,
933                     paused_sync: Some(Arc::new(Barrier::new(2))),
934                     min_queues: NUM_QUEUES as u16,
935                     paused: Arc::new(AtomicBool::new(paused)),
936                     ..Default::default()
937                 },
938                 config,
939                 mapping: mapping.clone(),
940                 ext_mapping: Arc::new(Mutex::new(BTreeMap::new())),
941                 seccomp_action,
942                 exit_evt,
943                 msi_iova_space,
944             },
945             mapping,
946         ))
947     }
948 
949     fn state(&self) -> IommuState {
950         IommuState {
951             avail_features: self.common.avail_features,
952             acked_features: self.common.acked_features,
953             endpoints: self
954                 .mapping
955                 .endpoints
956                 .read()
957                 .unwrap()
958                 .clone()
959                 .into_iter()
960                 .collect(),
961             domains: self
962                 .mapping
963                 .domains
964                 .read()
965                 .unwrap()
966                 .clone()
967                 .into_iter()
968                 .map(|(k, v)| (k, (v.mappings.into_iter().collect(), v.bypass)))
969                 .collect(),
970         }
971     }
972 
973     fn update_bypass(&mut self) {
974         // Use bypass from config if VIRTIO_IOMMU_F_BYPASS_CONFIG has been negotiated
975         if !self
976             .common
977             .feature_acked(VIRTIO_IOMMU_F_BYPASS_CONFIG.into())
978         {
979             return;
980         }
981 
982         let bypass = self.config.bypass == 1;
983         info!("Updating bypass mode to {}", bypass);
984         self.mapping.bypass.store(bypass, Ordering::Release);
985     }
986 
987     pub fn add_external_mapping(&mut self, device_id: u32, mapping: Arc<dyn ExternalDmaMapping>) {
988         self.ext_mapping.lock().unwrap().insert(device_id, mapping);
989     }
990 
991     #[cfg(fuzzing)]
992     pub fn wait_for_epoll_threads(&mut self) {
993         self.common.wait_for_epoll_threads();
994     }
995 }
996 
997 impl Drop for Iommu {
998     fn drop(&mut self) {
999         if let Some(kill_evt) = self.common.kill_evt.take() {
1000             // Ignore the result because there is nothing we can do about it.
1001             let _ = kill_evt.write(1);
1002         }
1003     }
1004 }
1005 
1006 impl VirtioDevice for Iommu {
1007     fn device_type(&self) -> u32 {
1008         self.common.device_type
1009     }
1010 
1011     fn queue_max_sizes(&self) -> &[u16] {
1012         &self.common.queue_sizes
1013     }
1014 
1015     fn features(&self) -> u64 {
1016         self.common.avail_features
1017     }
1018 
1019     fn ack_features(&mut self, value: u64) {
1020         self.common.ack_features(value)
1021     }
1022 
1023     fn read_config(&self, offset: u64, data: &mut [u8]) {
1024         self.read_config_from_slice(self.config.as_slice(), offset, data);
1025     }
1026 
1027     fn write_config(&mut self, offset: u64, data: &[u8]) {
1028         // The "bypass" field is the only mutable field
1029         let bypass_offset =
1030             (&self.config.bypass as *const _ as u64) - (&self.config as *const _ as u64);
1031         if offset != bypass_offset || data.len() != std::mem::size_of_val(&self.config.bypass) {
1032             error!(
1033                 "Attempt to write to read-only field: offset {:x} length {}",
1034                 offset,
1035                 data.len()
1036             );
1037             return;
1038         }
1039 
1040         self.config.bypass = data[0];
1041 
1042         self.update_bypass();
1043     }
1044 
1045     fn activate(
1046         &mut self,
1047         mem: GuestMemoryAtomic<GuestMemoryMmap>,
1048         interrupt_cb: Arc<dyn VirtioInterrupt>,
1049         mut queues: Vec<(usize, Queue, EventFd)>,
1050     ) -> ActivateResult {
1051         self.common.activate(&queues, &interrupt_cb)?;
1052         let (kill_evt, pause_evt) = self.common.dup_eventfds();
1053 
1054         let (_, request_queue, request_queue_evt) = queues.remove(0);
1055         let (_, _event_queue, _event_queue_evt) = queues.remove(0);
1056 
1057         let mut handler = IommuEpollHandler {
1058             mem,
1059             request_queue,
1060             _event_queue,
1061             interrupt_cb,
1062             request_queue_evt,
1063             _event_queue_evt,
1064             kill_evt,
1065             pause_evt,
1066             mapping: self.mapping.clone(),
1067             ext_mapping: self.ext_mapping.clone(),
1068             msi_iova_space: self.msi_iova_space,
1069         };
1070 
1071         let paused = self.common.paused.clone();
1072         let paused_sync = self.common.paused_sync.clone();
1073         let mut epoll_threads = Vec::new();
1074         spawn_virtio_thread(
1075             &self.id,
1076             &self.seccomp_action,
1077             Thread::VirtioIommu,
1078             &mut epoll_threads,
1079             &self.exit_evt,
1080             move || handler.run(paused, paused_sync.unwrap()),
1081         )?;
1082 
1083         self.common.epoll_threads = Some(epoll_threads);
1084 
1085         event!("virtio-device", "activated", "id", &self.id);
1086         Ok(())
1087     }
1088 
1089     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
1090         let result = self.common.reset();
1091         event!("virtio-device", "reset", "id", &self.id);
1092         result
1093     }
1094 }
1095 
1096 impl Pausable for Iommu {
1097     fn pause(&mut self) -> result::Result<(), MigratableError> {
1098         self.common.pause()
1099     }
1100 
1101     fn resume(&mut self) -> result::Result<(), MigratableError> {
1102         self.common.resume()
1103     }
1104 }
1105 
1106 impl Snapshottable for Iommu {
1107     fn id(&self) -> String {
1108         self.id.clone()
1109     }
1110 
1111     fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
1112         Snapshot::new_from_versioned_state(&self.state())
1113     }
1114 }
1115 impl Transportable for Iommu {}
1116 impl Migratable for Iommu {}
1117