xref: /cloud-hypervisor/vm-virtio/src/queue.rs (revision 9af2968a7dc47b89bf07ea9dc5e735084efcfa3a)
1 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 //
3 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
4 // Use of this source code is governed by a BSD-style license that can be
5 // found in the LICENSE-BSD-3-Clause file.
6 //
7 // Copyright © 2019 Intel Corporation
8 //
9 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
10 
11 use crate::{VirtioIommuRemapping, VIRTIO_MSI_NO_VECTOR};
12 use std::cmp::min;
13 use std::convert::TryInto;
14 use std::fmt::{self, Display};
15 use std::num::Wrapping;
16 use std::sync::atomic::{fence, Ordering};
17 use std::sync::Arc;
18 use vm_memory::{
19     bitmap::AtomicBitmap, Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError,
20     GuestUsize,
21 };
22 
23 pub const VIRTQ_DESC_F_NEXT: u16 = 0x1;
24 pub const VIRTQ_DESC_F_WRITE: u16 = 0x2;
25 pub const VIRTQ_DESC_F_INDIRECT: u16 = 0x4;
26 
27 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
28 
29 #[derive(Debug)]
30 pub enum Error {
31     GuestMemoryError,
32     InvalidIndirectDescriptor,
33     InvalidChain,
34     InvalidOffset(u64),
35     InvalidRingIndexFromMemory(GuestMemoryError),
36 }
37 
38 impl Display for Error {
39     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
40         use self::Error::*;
41 
42         match self {
43             GuestMemoryError => write!(f, "error accessing guest memory"),
44             InvalidChain => write!(f, "invalid descriptor chain"),
45             InvalidIndirectDescriptor => write!(f, "invalid indirect descriptor"),
46             InvalidOffset(o) => write!(f, "invalid offset {}", o),
47             InvalidRingIndexFromMemory(e) => write!(f, "invalid ring index from memory: {}", e),
48         }
49     }
50 }
51 
52 // GuestMemoryMmap::read_obj() will be used to fetch the descriptor,
53 // which has an explicit constraint that the entire descriptor doesn't
54 // cross the page boundary. Otherwise the descriptor may be splitted into
55 // two mmap regions which causes failure of GuestMemoryMmap::read_obj().
56 //
57 // The Virtio Spec 1.0 defines the alignment of VirtIO descriptor is 16 bytes,
58 // which fulfills the explicit constraint of GuestMemoryMmap::read_obj().
59 
60 /// An iterator over a single descriptor chain.  Not to be confused with AvailIter,
61 /// which iterates over the descriptor chain heads in a queue.
62 pub struct DescIter<'a> {
63     next: Option<DescriptorChain<'a>>,
64 }
65 
66 impl<'a> DescIter<'a> {
67     /// Returns an iterator that only yields the readable descriptors in the chain.
68     pub fn readable(self) -> impl Iterator<Item = DescriptorChain<'a>> {
69         self.filter(|d| !d.is_write_only())
70     }
71 
72     /// Returns an iterator that only yields the writable descriptors in the chain.
73     pub fn writable(self) -> impl Iterator<Item = DescriptorChain<'a>> {
74         self.filter(DescriptorChain::is_write_only)
75     }
76 }
77 
78 impl<'a> Iterator for DescIter<'a> {
79     type Item = DescriptorChain<'a>;
80 
81     fn next(&mut self) -> Option<Self::Item> {
82         if let Some(current) = self.next.take() {
83             self.next = current.next_descriptor();
84             Some(current)
85         } else {
86             None
87         }
88     }
89 }
90 
91 /// A virtio descriptor constraints with C representative.
92 #[repr(C)]
93 #[derive(Default, Clone, Copy)]
94 pub struct Descriptor {
95     addr: u64,
96     len: u32,
97     flags: u16,
98     next: u16,
99 }
100 
101 unsafe impl ByteValued for Descriptor {}
102 
103 /// A virtio descriptor head, not tied to a GuestMemoryMmap.
104 pub struct DescriptorHead {
105     desc_table: GuestAddress,
106     table_size: u16,
107     index: u16,
108     iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
109 }
110 
111 /// A virtio descriptor chain.
112 #[derive(Clone)]
113 pub struct DescriptorChain<'a> {
114     desc_table: GuestAddress,
115     table_size: u16,
116     ttl: u16, // used to prevent infinite chain cycles
117     iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
118 
119     /// Reference to guest memory
120     pub mem: &'a GuestMemoryMmap,
121 
122     /// Index into the descriptor table
123     pub index: u16,
124 
125     /// Guest physical address of device specific data
126     pub addr: GuestAddress,
127 
128     /// Length of device specific data
129     pub len: u32,
130 
131     /// Includes next, write, and indirect bits
132     pub flags: u16,
133 
134     /// Index into the descriptor table of the next descriptor if flags has
135     /// the next bit set
136     pub next: u16,
137 }
138 
139 impl<'a> DescriptorChain<'a> {
140     pub fn checked_new(
141         mem: &GuestMemoryMmap,
142         desc_table: GuestAddress,
143         table_size: u16,
144         index: u16,
145         iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
146     ) -> Option<DescriptorChain> {
147         if index >= table_size {
148             return None;
149         }
150 
151         let desc_head = match mem.checked_offset(desc_table, (index as usize) * 16) {
152             Some(a) => a,
153             None => return None,
154         };
155         mem.checked_offset(desc_head, 16)?;
156 
157         // These reads can't fail unless Guest memory is hopelessly broken.
158         let desc = match mem.read_obj::<Descriptor>(desc_head) {
159             Ok(ret) => ret,
160             Err(_) => {
161                 // TODO log address
162                 error!("Failed to read from memory");
163                 return None;
164             }
165         };
166 
167         // Translate address if necessary
168         let desc_addr = if let Some(iommu_mapping_cb) = &iommu_mapping_cb {
169             (iommu_mapping_cb)(desc.addr).unwrap()
170         } else {
171             desc.addr
172         };
173 
174         let chain = DescriptorChain {
175             mem,
176             desc_table,
177             table_size,
178             ttl: table_size,
179             index,
180             addr: GuestAddress(desc_addr),
181             len: desc.len,
182             flags: desc.flags,
183             next: desc.next,
184             iommu_mapping_cb,
185         };
186 
187         if chain.is_valid() {
188             Some(chain)
189         } else {
190             None
191         }
192     }
193 
194     pub fn new_from_indirect(&self) -> Result<DescriptorChain, Error> {
195         if !self.is_indirect() {
196             return Err(Error::InvalidIndirectDescriptor);
197         }
198 
199         let desc_head = self.addr;
200         self.mem
201             .checked_offset(desc_head, 16)
202             .ok_or(Error::GuestMemoryError)?;
203 
204         // These reads can't fail unless Guest memory is hopelessly broken.
205         let desc = match self.mem.read_obj::<Descriptor>(desc_head) {
206             Ok(ret) => ret,
207             Err(_) => return Err(Error::GuestMemoryError),
208         };
209 
210         // Translate address if necessary
211         let (desc_addr, iommu_mapping_cb) =
212             if let Some(iommu_mapping_cb) = self.iommu_mapping_cb.clone() {
213                 (
214                     (iommu_mapping_cb)(desc.addr).unwrap(),
215                     Some(iommu_mapping_cb),
216                 )
217             } else {
218                 (desc.addr, None)
219             };
220 
221         let chain = DescriptorChain {
222             mem: self.mem,
223             desc_table: self.addr,
224             table_size: (self.len / 16).try_into().unwrap(),
225             ttl: (self.len / 16).try_into().unwrap(),
226             index: 0,
227             addr: GuestAddress(desc_addr),
228             len: desc.len,
229             flags: desc.flags,
230             next: desc.next,
231             iommu_mapping_cb,
232         };
233 
234         if !chain.is_valid() {
235             return Err(Error::InvalidChain);
236         }
237 
238         Ok(chain)
239     }
240 
241     /// Returns a copy of a descriptor referencing a different GuestMemoryMmap object.
242     pub fn new_from_head(
243         mem: &'a GuestMemoryMmap,
244         head: DescriptorHead,
245     ) -> Result<DescriptorChain<'a>, Error> {
246         match DescriptorChain::checked_new(
247             mem,
248             head.desc_table,
249             head.table_size,
250             head.index,
251             head.iommu_mapping_cb,
252         ) {
253             Some(d) => Ok(d),
254             None => Err(Error::InvalidChain),
255         }
256     }
257 
258     /// Returns a DescriptorHead that can be used to build a copy of a descriptor
259     /// referencing a different GuestMemoryMmap.
260     pub fn get_head(&self) -> DescriptorHead {
261         DescriptorHead {
262             desc_table: self.desc_table,
263             table_size: self.table_size,
264             index: self.index,
265             iommu_mapping_cb: self.iommu_mapping_cb.clone(),
266         }
267     }
268 
269     fn is_valid(&self) -> bool {
270         !(!self.mem.check_range(self.addr, self.len as usize)
271             || (self.has_next() && self.next >= self.table_size))
272     }
273 
274     /// Gets if this descriptor has another descriptor linked after it.
275     pub fn has_next(&self) -> bool {
276         self.flags & VIRTQ_DESC_F_NEXT != 0 && self.ttl > 1
277     }
278 
279     /// If the driver designated this as a write only descriptor.
280     ///
281     /// If this is false, this descriptor is read only.
282     /// Write only means that the emulated device can write and the driver can read.
283     pub fn is_write_only(&self) -> bool {
284         self.flags & VIRTQ_DESC_F_WRITE != 0
285     }
286 
287     pub fn is_indirect(&self) -> bool {
288         self.flags & VIRTQ_DESC_F_INDIRECT != 0
289     }
290 
291     /// Gets the next descriptor in this descriptor chain, if there is one.
292     ///
293     /// Note that this is distinct from the next descriptor chain returned by `AvailIter`, which is
294     /// the head of the next _available_ descriptor chain.
295     pub fn next_descriptor(&self) -> Option<DescriptorChain<'a>> {
296         if self.has_next() {
297             DescriptorChain::checked_new(
298                 self.mem,
299                 self.desc_table,
300                 self.table_size,
301                 self.next,
302                 self.iommu_mapping_cb.clone(),
303             )
304             .map(|mut c| {
305                 c.ttl = self.ttl - 1;
306                 c
307             })
308         } else {
309             None
310         }
311     }
312 }
313 
314 impl<'a> IntoIterator for DescriptorChain<'a> {
315     type Item = DescriptorChain<'a>;
316     type IntoIter = DescIter<'a>;
317 
318     fn into_iter(self) -> Self::IntoIter {
319         DescIter { next: Some(self) }
320     }
321 }
322 
323 /// Consuming iterator over all available descriptor chain heads in the queue.
324 pub struct AvailIter<'a, 'b> {
325     mem: &'a GuestMemoryMmap,
326     desc_table: GuestAddress,
327     avail_ring: GuestAddress,
328     next_index: Wrapping<u16>,
329     last_index: Wrapping<u16>,
330     queue_size: u16,
331     next_avail: &'b mut Wrapping<u16>,
332     iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
333 }
334 
335 impl<'a, 'b> AvailIter<'a, 'b> {
336     pub fn new(mem: &'a GuestMemoryMmap, q_next_avail: &'b mut Wrapping<u16>) -> AvailIter<'a, 'b> {
337         AvailIter {
338             mem,
339             desc_table: GuestAddress(0),
340             avail_ring: GuestAddress(0),
341             next_index: Wrapping(0),
342             last_index: Wrapping(0),
343             queue_size: 0,
344             next_avail: q_next_avail,
345             iommu_mapping_cb: None,
346         }
347     }
348 }
349 
350 impl<'a, 'b> Iterator for AvailIter<'a, 'b> {
351     type Item = DescriptorChain<'a>;
352 
353     fn next(&mut self) -> Option<Self::Item> {
354         if self.next_index == self.last_index {
355             return None;
356         }
357 
358         let offset = (4 + (self.next_index.0 % self.queue_size) * 2) as usize;
359         let avail_addr = match self.mem.checked_offset(self.avail_ring, offset) {
360             Some(a) => a,
361             None => return None,
362         };
363         // This index is checked below in checked_new
364         let desc_index: u16 = match self.mem.read_obj(avail_addr) {
365             Ok(ret) => ret,
366             Err(_) => {
367                 // TODO log address
368                 error!("Failed to read from memory");
369                 return None;
370             }
371         };
372 
373         self.next_index += Wrapping(1);
374 
375         let ret = DescriptorChain::checked_new(
376             self.mem,
377             self.desc_table,
378             self.queue_size,
379             desc_index,
380             self.iommu_mapping_cb.clone(),
381         );
382         if ret.is_some() {
383             *self.next_avail += Wrapping(1);
384         }
385         ret
386     }
387 }
388 
389 #[derive(Clone)]
390 /// A virtio queue's parameters.
391 pub struct Queue {
392     /// The maximal size in elements offered by the device
393     pub max_size: u16,
394 
395     /// The queue size in elements the driver selected
396     pub size: u16,
397 
398     /// Indicates if the queue is finished with configuration
399     pub ready: bool,
400 
401     /// Interrupt vector index of the queue
402     pub vector: u16,
403 
404     /// Guest physical address of the descriptor table
405     pub desc_table: GuestAddress,
406 
407     /// Guest physical address of the available ring
408     pub avail_ring: GuestAddress,
409 
410     /// Guest physical address of the used ring
411     pub used_ring: GuestAddress,
412 
413     pub next_avail: Wrapping<u16>,
414     pub next_used: Wrapping<u16>,
415 
416     pub iommu_mapping_cb: Option<Arc<VirtioIommuRemapping>>,
417 
418     /// VIRTIO_F_RING_EVENT_IDX negotiated
419     event_idx: bool,
420 
421     /// The last used value when using EVENT_IDX
422     signalled_used: Option<Wrapping<u16>>,
423 }
424 
425 impl Queue {
426     /// Constructs an empty virtio queue with the given `max_size`.
427     pub fn new(max_size: u16) -> Queue {
428         Queue {
429             max_size,
430             size: max_size,
431             ready: false,
432             vector: VIRTIO_MSI_NO_VECTOR,
433             desc_table: GuestAddress(0),
434             avail_ring: GuestAddress(0),
435             used_ring: GuestAddress(0),
436             next_avail: Wrapping(0),
437             next_used: Wrapping(0),
438             iommu_mapping_cb: None,
439             event_idx: false,
440             signalled_used: None,
441         }
442     }
443 
444     pub fn get_max_size(&self) -> u16 {
445         self.max_size
446     }
447 
448     pub fn enable(&mut self, set: bool) {
449         self.ready = set;
450 
451         if set {
452             // Translate address of descriptor table and vrings.
453             if let Some(iommu_mapping_cb) = &self.iommu_mapping_cb {
454                 self.desc_table =
455                     GuestAddress((iommu_mapping_cb)(self.desc_table.raw_value()).unwrap());
456                 self.avail_ring =
457                     GuestAddress((iommu_mapping_cb)(self.avail_ring.raw_value()).unwrap());
458                 self.used_ring =
459                     GuestAddress((iommu_mapping_cb)(self.used_ring.raw_value()).unwrap());
460             }
461         } else {
462             self.desc_table = GuestAddress(0);
463             self.avail_ring = GuestAddress(0);
464             self.used_ring = GuestAddress(0);
465         }
466     }
467 
468     /// Return the actual size of the queue, as the driver may not set up a
469     /// queue as big as the device allows.
470     pub fn actual_size(&self) -> u16 {
471         min(self.size, self.max_size)
472     }
473 
474     /// Reset the queue to a state that is acceptable for a device reset
475     pub fn reset(&mut self) {
476         self.ready = false;
477         self.size = self.max_size;
478         self.next_avail = Wrapping(0);
479         self.next_used = Wrapping(0);
480         self.vector = VIRTIO_MSI_NO_VECTOR;
481         self.desc_table = GuestAddress(0);
482         self.avail_ring = GuestAddress(0);
483         self.used_ring = GuestAddress(0);
484         self.event_idx = false;
485         self.signalled_used = None;
486     }
487 
488     pub fn is_valid(&self, mem: &GuestMemoryMmap) -> bool {
489         let queue_size = self.actual_size() as usize;
490         let desc_table = self.desc_table;
491         let desc_table_size = 16 * queue_size;
492         let avail_ring = self.avail_ring;
493         let avail_ring_size = 6 + 2 * queue_size;
494         let used_ring = self.used_ring;
495         let used_ring_size = 6 + 8 * queue_size;
496         if !self.ready {
497             error!("attempt to use virtio queue that is not marked ready");
498             false
499         } else if self.size > self.max_size || self.size == 0 || (self.size & (self.size - 1)) != 0
500         {
501             error!("virtio queue with invalid size: {}", self.size);
502             false
503         } else if desc_table
504             .checked_add(desc_table_size as GuestUsize)
505             .map_or(true, |v| !mem.address_in_range(v))
506         {
507             error!(
508                 "virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
509                 desc_table.raw_value(),
510                 desc_table_size
511             );
512             false
513         } else if avail_ring
514             .checked_add(avail_ring_size as GuestUsize)
515             .map_or(true, |v| !mem.address_in_range(v))
516         {
517             error!(
518                 "virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
519                 avail_ring.raw_value(),
520                 avail_ring_size
521             );
522             false
523         } else if used_ring
524             .checked_add(used_ring_size as GuestUsize)
525             .map_or(true, |v| !mem.address_in_range(v))
526         {
527             error!(
528                 "virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
529                 used_ring.raw_value(),
530                 used_ring_size
531             );
532             false
533         } else if desc_table.mask(0xf) != 0 {
534             error!("virtio queue descriptor table breaks alignment constraints");
535             false
536         } else if avail_ring.mask(0x1) != 0 {
537             error!("virtio queue available ring breaks alignment constraints");
538             false
539         } else if used_ring.mask(0x3) != 0 {
540             error!("virtio queue used ring breaks alignment constraints");
541             false
542         } else {
543             true
544         }
545     }
546 
547     /// A consuming iterator over all available descriptor chain heads offered by the driver.
548     pub fn iter<'a, 'b>(&'b mut self, mem: &'a GuestMemoryMmap) -> AvailIter<'a, 'b> {
549         let queue_size = self.actual_size();
550         let avail_ring = self.avail_ring;
551 
552         let index_addr = match mem.checked_offset(avail_ring, 2) {
553             Some(ret) => ret,
554             None => {
555                 // TODO log address
556                 warn!("Invalid offset");
557                 return AvailIter::new(mem, &mut self.next_avail);
558             }
559         };
560         // Note that last_index has no invalid values
561         let last_index: u16 = match mem.read_obj::<u16>(index_addr) {
562             Ok(ret) => ret,
563             Err(_) => return AvailIter::new(mem, &mut self.next_avail),
564         };
565 
566         AvailIter {
567             mem,
568             desc_table: self.desc_table,
569             avail_ring,
570             next_index: self.next_avail,
571             last_index: Wrapping(last_index),
572             queue_size,
573             next_avail: &mut self.next_avail,
574             iommu_mapping_cb: self.iommu_mapping_cb.clone(),
575         }
576     }
577 
578     /// Update avail_event on the used ring with the last index in the avail ring.
579     pub fn update_avail_event(&mut self, mem: &GuestMemoryMmap) {
580         let index_addr = match mem.checked_offset(self.avail_ring, 2) {
581             Some(ret) => ret,
582             None => {
583                 // TODO log address
584                 warn!("Invalid offset");
585                 return;
586             }
587         };
588         // Note that last_index has no invalid values
589         let last_index: u16 = match mem.read_obj::<u16>(index_addr) {
590             Ok(ret) => ret,
591             Err(_) => return,
592         };
593 
594         match mem.checked_offset(self.used_ring, (4 + self.actual_size() * 8) as usize) {
595             Some(a) => {
596                 mem.write_obj(last_index, a).unwrap();
597             }
598             None => warn!("Can't update avail_event"),
599         }
600 
601         // This fence ensures both guest and us see the correct value (avail idx and avail event)
602         fence(Ordering::SeqCst);
603     }
604 
605     /// Return the value present in the used_event field of the avail ring.
606     #[inline(always)]
607     pub fn get_used_event(&self, mem: &GuestMemoryMmap) -> Option<Wrapping<u16>> {
608         let avail_ring = self.avail_ring;
609         let used_event_addr =
610             match mem.checked_offset(avail_ring, (4 + self.actual_size() * 2) as usize) {
611                 Some(a) => a,
612                 None => {
613                     warn!("Invalid offset looking for used_event");
614                     return None;
615                 }
616             };
617 
618         // This fence ensures we're seeing the latest update from the guest.
619         fence(Ordering::SeqCst);
620         match mem.read_obj::<u16>(used_event_addr) {
621             Ok(ret) => Some(Wrapping(ret)),
622             Err(_) => None,
623         }
624     }
625 
626     /// Puts an available descriptor head into the used ring for use by the guest.
627     pub fn add_used(&mut self, mem: &GuestMemoryMmap, desc_index: u16, len: u32) -> Option<u16> {
628         if desc_index >= self.actual_size() {
629             error!(
630                 "attempted to add out of bounds descriptor to used ring: {}",
631                 desc_index
632             );
633             return None;
634         }
635 
636         let used_ring = self.used_ring;
637         let next_used = u64::from(self.next_used.0 % self.actual_size());
638         let used_elem = used_ring.unchecked_add(4 + next_used * 8);
639 
640         // These writes can't fail as we are guaranteed to be within the descriptor ring.
641         mem.write_obj(u32::from(desc_index), used_elem).unwrap();
642         mem.write_obj(len as u32, used_elem.unchecked_add(4))
643             .unwrap();
644 
645         self.next_used += Wrapping(1);
646 
647         // This fence ensures all descriptor writes are visible before the index update is.
648         fence(Ordering::Release);
649 
650         mem.write_obj(self.next_used.0 as u16, used_ring.unchecked_add(2))
651             .unwrap();
652 
653         Some(self.next_used.0)
654     }
655 
656     /// Goes back one position in the available descriptor chain offered by the driver.
657     /// Rust does not support bidirectional iterators. This is the only way to revert the effect
658     /// of an iterator increment on the queue.
659     pub fn go_to_previous_position(&mut self) {
660         self.next_avail -= Wrapping(1);
661     }
662 
663     /// Get ring's index from memory.
664     fn index_from_memory(&self, ring: GuestAddress, mem: &GuestMemoryMmap) -> Result<u16, Error> {
665         mem.read_obj::<u16>(
666             mem.checked_offset(ring, 2)
667                 .ok_or_else(|| Error::InvalidOffset(ring.raw_value() + 2))?,
668         )
669         .map_err(Error::InvalidRingIndexFromMemory)
670     }
671 
672     /// Get latest index from available ring.
673     pub fn avail_index_from_memory(&self, mem: &GuestMemoryMmap) -> Result<u16, Error> {
674         self.index_from_memory(self.avail_ring, mem)
675     }
676 
677     /// Get latest index from used ring.
678     pub fn used_index_from_memory(&self, mem: &GuestMemoryMmap) -> Result<u16, Error> {
679         self.index_from_memory(self.used_ring, mem)
680     }
681 
682     pub fn available_descriptors(&self, mem: &GuestMemoryMmap) -> Result<bool, Error> {
683         Ok(self.used_index_from_memory(mem)? < self.avail_index_from_memory(mem)?)
684     }
685 
686     pub fn set_event_idx(&mut self, enabled: bool) {
687         /* Also reset the last signalled event */
688         self.signalled_used = None;
689         self.event_idx = enabled;
690     }
691 
692     pub fn needs_notification(&mut self, mem: &GuestMemoryMmap, used_idx: Wrapping<u16>) -> bool {
693         if !self.event_idx {
694             return true;
695         }
696 
697         let mut notify = true;
698 
699         if let Some(old_idx) = self.signalled_used {
700             if let Some(used_event) = self.get_used_event(mem) {
701                 debug!(
702                     "used_event = {:?} used_idx = {:?} old_idx = {:?}",
703                     used_event, used_idx, old_idx
704                 );
705                 if (used_idx - used_event - Wrapping(1u16)) >= (used_idx - old_idx) {
706                     notify = false;
707                 }
708             }
709         }
710 
711         self.signalled_used = Some(used_idx);
712         debug!("Needs notification: {:?}", notify);
713         notify
714     }
715 }
716 
717 pub mod testing {
718     use super::*;
719     use std::marker::PhantomData;
720     use std::mem;
721     use vm_memory::Bytes;
722     use vm_memory::{bitmap::AtomicBitmap, Address, GuestAddress, GuestUsize};
723 
724     type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
725 
726     // Represents a location in GuestMemoryMmap which holds a given type.
727     pub struct SomeplaceInMemory<'a, T> {
728         pub location: GuestAddress,
729         mem: &'a GuestMemoryMmap,
730         phantom: PhantomData<*const T>,
731     }
732 
733     // The ByteValued trait is required to use mem.read_obj and write_obj.
734     impl<'a, T> SomeplaceInMemory<'a, T>
735     where
736         T: vm_memory::ByteValued,
737     {
738         fn new(location: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
739             SomeplaceInMemory {
740                 location,
741                 mem,
742                 phantom: PhantomData,
743             }
744         }
745 
746         // Reads from the actual memory location.
747         pub fn get(&self) -> T {
748             self.mem.read_obj(self.location).unwrap()
749         }
750 
751         // Writes to the actual memory location.
752         pub fn set(&self, val: T) {
753             self.mem.write_obj(val, self.location).unwrap()
754         }
755 
756         // This function returns a place in memory which holds a value of type U, and starts
757         // offset bytes after the current location.
758         fn map_offset<U>(&self, offset: GuestUsize) -> SomeplaceInMemory<'a, U> {
759             SomeplaceInMemory {
760                 location: self.location.checked_add(offset).unwrap(),
761                 mem: self.mem,
762                 phantom: PhantomData,
763             }
764         }
765 
766         // This function returns a place in memory which holds a value of type U, and starts
767         // immediately after the end of self (which is location + sizeof(T)).
768         fn next_place<U>(&self) -> SomeplaceInMemory<'a, U> {
769             self.map_offset::<U>(mem::size_of::<T>() as u64)
770         }
771 
772         fn end(&self) -> GuestAddress {
773             self.location
774                 .checked_add(mem::size_of::<T>() as u64)
775                 .unwrap()
776         }
777     }
778 
779     // Represents a virtio descriptor in guest memory.
780     pub struct VirtqDesc<'a> {
781         pub addr: SomeplaceInMemory<'a, u64>,
782         pub len: SomeplaceInMemory<'a, u32>,
783         pub flags: SomeplaceInMemory<'a, u16>,
784         pub next: SomeplaceInMemory<'a, u16>,
785     }
786 
787     impl<'a> VirtqDesc<'a> {
788         pub fn new(start: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
789             assert_eq!(start.0 & 0xf, 0);
790 
791             let addr = SomeplaceInMemory::new(start, mem);
792             let len = addr.next_place();
793             let flags = len.next_place();
794             let next = flags.next_place();
795 
796             VirtqDesc {
797                 addr,
798                 len,
799                 flags,
800                 next,
801             }
802         }
803 
804         fn start(&self) -> GuestAddress {
805             self.addr.location
806         }
807 
808         fn end(&self) -> GuestAddress {
809             self.next.end()
810         }
811 
812         pub fn set(&self, addr: u64, len: u32, flags: u16, next: u16) {
813             self.addr.set(addr);
814             self.len.set(len);
815             self.flags.set(flags);
816             self.next.set(next);
817         }
818     }
819 
820     // Represents a virtio queue ring. The only difference between the used and available rings,
821     // is the ring element type.
822     pub struct VirtqRing<'a, T> {
823         pub flags: SomeplaceInMemory<'a, u16>,
824         pub idx: SomeplaceInMemory<'a, u16>,
825         pub ring: Vec<SomeplaceInMemory<'a, T>>,
826         pub event: SomeplaceInMemory<'a, u16>,
827     }
828 
829     impl<'a, T> VirtqRing<'a, T>
830     where
831         T: vm_memory::ByteValued,
832     {
833         fn new(
834             start: GuestAddress,
835             mem: &'a GuestMemoryMmap,
836             qsize: u16,
837             alignment: GuestUsize,
838         ) -> Self {
839             assert_eq!(start.0 & (alignment - 1), 0);
840 
841             let flags = SomeplaceInMemory::new(start, mem);
842             let idx = flags.next_place();
843 
844             let mut ring = Vec::with_capacity(qsize as usize);
845 
846             ring.push(idx.next_place());
847 
848             for _ in 1..qsize as usize {
849                 let x = ring.last().unwrap().next_place();
850                 ring.push(x)
851             }
852 
853             let event = ring.last().unwrap().next_place();
854 
855             flags.set(0);
856             idx.set(0);
857             event.set(0);
858 
859             VirtqRing {
860                 flags,
861                 idx,
862                 ring,
863                 event,
864             }
865         }
866 
867         pub fn end(&self) -> GuestAddress {
868             self.event.end()
869         }
870     }
871 
872     #[repr(C)]
873     #[derive(Clone, Copy, Default)]
874     pub struct VirtqUsedElem {
875         pub id: u32,
876         pub len: u32,
877     }
878 
879     unsafe impl vm_memory::ByteValued for VirtqUsedElem {}
880 
881     pub type VirtqAvail<'a> = VirtqRing<'a, u16>;
882     pub type VirtqUsed<'a> = VirtqRing<'a, VirtqUsedElem>;
883 
884     pub struct VirtQueue<'a> {
885         pub dtable: Vec<VirtqDesc<'a>>,
886         pub avail: VirtqAvail<'a>,
887         pub used: VirtqUsed<'a>,
888     }
889 
890     impl<'a> VirtQueue<'a> {
891         // We try to make sure things are aligned properly :-s
892         pub fn new(start: GuestAddress, mem: &'a GuestMemoryMmap, qsize: u16) -> Self {
893             // power of 2?
894             assert!(qsize > 0 && qsize & (qsize - 1) == 0);
895 
896             let mut dtable = Vec::with_capacity(qsize as usize);
897 
898             let mut end = start;
899 
900             for _ in 0..qsize {
901                 let d = VirtqDesc::new(end, mem);
902                 end = d.end();
903                 dtable.push(d);
904             }
905 
906             const AVAIL_ALIGN: u64 = 2;
907 
908             let avail = VirtqAvail::new(end, mem, qsize, AVAIL_ALIGN);
909 
910             const USED_ALIGN: u64 = 4;
911 
912             let mut x = avail.end().0;
913             x = (x + USED_ALIGN - 1) & !(USED_ALIGN - 1);
914 
915             let used = VirtqUsed::new(GuestAddress(x), mem, qsize, USED_ALIGN);
916 
917             VirtQueue {
918                 dtable,
919                 avail,
920                 used,
921             }
922         }
923 
924         fn size(&self) -> u16 {
925             self.dtable.len() as u16
926         }
927 
928         pub fn dtable_start(&self) -> GuestAddress {
929             self.dtable.first().unwrap().start()
930         }
931 
932         pub fn avail_start(&self) -> GuestAddress {
933             self.avail.flags.location
934         }
935 
936         pub fn used_start(&self) -> GuestAddress {
937             self.used.flags.location
938         }
939 
940         // Creates a new Queue, using the underlying memory regions represented by the VirtQueue.
941         pub fn create_queue(&self) -> Queue {
942             let mut q = Queue::new(self.size());
943 
944             q.size = self.size();
945             q.ready = true;
946             q.desc_table = self.dtable_start();
947             q.avail_ring = self.avail_start();
948             q.used_ring = self.used_start();
949 
950             q
951         }
952 
953         pub fn start(&self) -> GuestAddress {
954             self.dtable_start()
955         }
956 
957         pub fn end(&self) -> GuestAddress {
958             self.used.end()
959         }
960     }
961 }
962 
963 #[cfg(test)]
964 pub mod tests {
965     use super::testing::*;
966     pub use super::*;
967     use vm_memory::{bitmap::AtomicBitmap, GuestAddress};
968 
969     type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
970 
971     #[test]
972     fn test_checked_new_descriptor_chain() {
973         let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
974         let vq = VirtQueue::new(GuestAddress(0), m, 16);
975 
976         assert!(vq.end().0 < 0x1000);
977 
978         // index >= queue_size
979         assert!(DescriptorChain::checked_new(m, vq.start(), 16, 16, None).is_none());
980 
981         // desc_table address is way off
982         assert!(
983             DescriptorChain::checked_new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0, None).is_none()
984         );
985 
986         // the addr field of the descriptor is way off
987         vq.dtable[0].addr.set(0x0fff_ffff_ffff);
988         assert!(DescriptorChain::checked_new(m, vq.start(), 16, 0, None).is_none());
989 
990         // let's create some invalid chains
991 
992         {
993             // the addr field of the desc is ok now
994             vq.dtable[0].addr.set(0x1000);
995             // ...but the length is too large
996             vq.dtable[0].len.set(0xffff_ffff);
997             assert!(DescriptorChain::checked_new(m, vq.start(), 16, 0, None).is_none());
998         }
999 
1000         {
1001             // the first desc has a normal len now, and the next_descriptor flag is set
1002             vq.dtable[0].len.set(0x1000);
1003             vq.dtable[0].flags.set(VIRTQ_DESC_F_NEXT);
1004             //..but the index of the next descriptor is too large
1005             vq.dtable[0].next.set(16);
1006 
1007             assert!(DescriptorChain::checked_new(m, vq.start(), 16, 0, None).is_none());
1008         }
1009 
1010         // finally, let's test an ok chain
1011 
1012         {
1013             vq.dtable[0].next.set(1);
1014             vq.dtable[1].set(0x2000, 0x1000, 0, 0);
1015 
1016             let c = DescriptorChain::checked_new(m, vq.start(), 16, 0, None).unwrap();
1017 
1018             assert_eq!(c.mem as *const GuestMemoryMmap, m as *const GuestMemoryMmap);
1019             assert_eq!(c.desc_table, vq.start());
1020             assert_eq!(c.table_size, 16);
1021             assert_eq!(c.ttl, c.table_size);
1022             assert_eq!(c.index, 0);
1023             assert_eq!(c.addr, GuestAddress(0x1000));
1024             assert_eq!(c.len, 0x1000);
1025             assert_eq!(c.flags, VIRTQ_DESC_F_NEXT);
1026             assert_eq!(c.next, 1);
1027 
1028             assert!(c.next_descriptor().unwrap().next_descriptor().is_none());
1029         }
1030     }
1031 
1032     #[test]
1033     fn test_new_from_descriptor_chain() {
1034         let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1035         let vq = VirtQueue::new(GuestAddress(0), m, 16);
1036 
1037         // create a chain with a descriptor pointing to an indirect table
1038         vq.dtable[0].addr.set(0x1000);
1039         vq.dtable[0].len.set(0x1000);
1040         vq.dtable[0].next.set(0);
1041         vq.dtable[0].flags.set(VIRTQ_DESC_F_INDIRECT);
1042 
1043         let c = DescriptorChain::checked_new(m, vq.start(), 16, 0, None).unwrap();
1044         assert!(c.is_indirect());
1045 
1046         // create an indirect table with 4 chained descriptors
1047         let mut indirect_table = Vec::with_capacity(4);
1048         for j in 0..4 {
1049             let desc = VirtqDesc::new(GuestAddress(0x1000 + (j * 16)), m);
1050             desc.set(0x1000, 0x1000, VIRTQ_DESC_F_NEXT, (j + 1) as u16);
1051             indirect_table.push(desc);
1052         }
1053 
1054         // try to iterate through the indirect table descriptors
1055         let mut i = c.new_from_indirect().unwrap();
1056         for j in 0..4 {
1057             assert_eq!(i.flags, VIRTQ_DESC_F_NEXT);
1058             assert_eq!(i.next, j + 1);
1059             i = i.next_descriptor().unwrap();
1060         }
1061     }
1062 
1063     #[test]
1064     fn test_queue_and_iterator() {
1065         let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1066         let vq = VirtQueue::new(GuestAddress(0), m, 16);
1067 
1068         let mut q = vq.create_queue();
1069 
1070         // q is currently valid
1071         assert!(q.is_valid(m));
1072 
1073         // shouldn't be valid when not marked as ready
1074         q.ready = false;
1075         assert!(!q.is_valid(m));
1076         q.ready = true;
1077 
1078         // or when size > max_size
1079         q.size = q.max_size << 1;
1080         assert!(!q.is_valid(m));
1081         q.size = q.max_size;
1082 
1083         // or when size is 0
1084         q.size = 0;
1085         assert!(!q.is_valid(m));
1086         q.size = q.max_size;
1087 
1088         // or when size is not a power of 2
1089         q.size = 11;
1090         assert!(!q.is_valid(m));
1091         q.size = q.max_size;
1092 
1093         // or if the various addresses are off
1094 
1095         q.desc_table = GuestAddress(0xffff_ffff);
1096         assert!(!q.is_valid(m));
1097         q.desc_table = GuestAddress(0x1001);
1098         assert!(!q.is_valid(m));
1099         q.desc_table = vq.dtable_start();
1100 
1101         q.avail_ring = GuestAddress(0xffff_ffff);
1102         assert!(!q.is_valid(m));
1103         q.avail_ring = GuestAddress(0x1001);
1104         assert!(!q.is_valid(m));
1105         q.avail_ring = vq.avail_start();
1106 
1107         q.used_ring = GuestAddress(0xffff_ffff);
1108         assert!(!q.is_valid(m));
1109         q.used_ring = GuestAddress(0x1001);
1110         assert!(!q.is_valid(m));
1111         q.used_ring = vq.used_start();
1112 
1113         {
1114             // an invalid queue should return an iterator with no next
1115             q.ready = false;
1116             let mut i = q.iter(m);
1117             assert!(i.next().is_none());
1118         }
1119 
1120         q.ready = true;
1121 
1122         // now let's create two simple descriptor chains
1123 
1124         {
1125             for j in 0..5 {
1126                 vq.dtable[j].set(
1127                     0x1000 * (j + 1) as u64,
1128                     0x1000,
1129                     VIRTQ_DESC_F_NEXT,
1130                     (j + 1) as u16,
1131                 );
1132             }
1133 
1134             // the chains are (0, 1) and (2, 3, 4)
1135             vq.dtable[1].flags.set(0);
1136             vq.dtable[4].flags.set(0);
1137             vq.avail.ring[0].set(0);
1138             vq.avail.ring[1].set(2);
1139             vq.avail.idx.set(2);
1140 
1141             let mut i = q.iter(m);
1142 
1143             {
1144                 let mut c = i.next().unwrap();
1145                 c = c.next_descriptor().unwrap();
1146                 assert!(!c.has_next());
1147             }
1148 
1149             {
1150                 let mut c = i.next().unwrap();
1151                 c = c.next_descriptor().unwrap();
1152                 c = c.next_descriptor().unwrap();
1153                 assert!(!c.has_next());
1154             }
1155         }
1156 
1157         // also test go_to_previous_position() works as expected
1158         {
1159             assert!(q.iter(m).next().is_none());
1160             q.go_to_previous_position();
1161             let mut c = q.iter(m).next().unwrap();
1162             c = c.next_descriptor().unwrap();
1163             c = c.next_descriptor().unwrap();
1164             assert!(!c.has_next());
1165         }
1166     }
1167 
1168     #[test]
1169     fn test_add_used() {
1170         let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
1171         let vq = VirtQueue::new(GuestAddress(0), m, 16);
1172 
1173         let mut q = vq.create_queue();
1174         assert_eq!(vq.used.idx.get(), 0);
1175 
1176         //index too large
1177         q.add_used(m, 16, 0x1000);
1178         assert_eq!(vq.used.idx.get(), 0);
1179 
1180         //should be ok
1181         q.add_used(m, 1, 0x1000);
1182         assert_eq!(vq.used.idx.get(), 1);
1183         let x = vq.used.ring[0].get();
1184         assert_eq!(x.id, 1);
1185         assert_eq!(x.len, 0x1000);
1186     }
1187 }
1188