xref: /cloud-hypervisor/virtio-devices/src/vsock/device.rs (revision b686a5bb24f949e3b201308d69b01e85c14f1ad6)
1 // Copyright 2019 Intel Corporation. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 // SPDX-License-Identifier: Apache-2.0
6 //
7 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
8 // Use of this source code is governed by a BSD-style license that can be
9 // found in the THIRD-PARTY file.
10 
11 use std::os::unix::io::AsRawFd;
12 use std::path::PathBuf;
13 use std::sync::atomic::AtomicBool;
14 use std::sync::{Arc, Barrier, RwLock};
15 use std::{io, result};
16 
17 use anyhow::anyhow;
18 use byteorder::{ByteOrder, LittleEndian};
19 use seccompiler::SeccompAction;
20 use serde::{Deserialize, Serialize};
21 use virtio_queue::{Queue, QueueOwnedT, QueueT};
22 use vm_memory::{GuestAddressSpace, GuestMemoryAtomic};
23 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
24 use vm_virtio::AccessPlatform;
25 use vmm_sys_util::eventfd::EventFd;
26 
27 /// This is the `VirtioDevice` implementation for our vsock device. It handles the virtio-level
28 /// device logic: feature negotiation, device configuration, and device activation.
29 /// The run-time device logic (i.e. event-driven data handling) is implemented by
30 /// `super::epoll_handler::EpollHandler`.
31 ///
32 /// We aim to conform to the VirtIO v1.1 spec:
33 /// https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.html
34 ///
35 /// The vsock device has two input parameters: a CID to identify the device, and a `VsockBackend`
36 /// to use for offloading vsock traffic.
37 ///
38 /// Upon its activation, the vsock device creates its `EpollHandler`, passes it the event-interested
39 /// file descriptors, and registers these descriptors with the VMM `EpollContext`. Going forward,
40 /// the `EpollHandler` will get notified whenever an event occurs on the just-registered FDs:
41 /// - an RX queue FD;
42 /// - a TX queue FD;
43 /// - an event queue FD; and
44 /// - a backend FD.
45 ///
46 use super::{VsockBackend, VsockPacket};
47 use crate::seccomp_filters::Thread;
48 use crate::thread_helper::spawn_virtio_thread;
49 use crate::{
50     ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, Error as DeviceError,
51     GuestMemoryMmap, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterrupt,
52     VirtioInterruptType, EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IN_ORDER, VIRTIO_F_IOMMU_PLATFORM,
53     VIRTIO_F_VERSION_1,
54 };
55 
56 const QUEUE_SIZE: u16 = 256;
57 const NUM_QUEUES: usize = 3;
58 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
59 
60 // New descriptors are pending on the rx queue.
61 pub const RX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
62 // New descriptors are pending on the tx queue.
63 pub const TX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2;
64 // New descriptors are pending on the event queue.
65 pub const EVT_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 3;
66 // Notification coming from the backend.
67 pub const BACKEND_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 4;
68 
69 /// The `VsockEpollHandler` implements the runtime logic of our vsock device:
70 /// 1. Respond to TX queue events by wrapping virtio buffers into `VsockPacket`s, then sending those
71 ///    packets to the `VsockBackend`;
72 /// 2. Forward backend FD event notifications to the `VsockBackend`;
73 /// 3. Fetch incoming packets from the `VsockBackend` and place them into the virtio RX queue;
74 /// 4. Whenever we have processed some virtio buffers (either TX or RX), let the driver know by
75 ///    raising our assigned IRQ.
76 ///
77 /// In a nutshell, the `VsockEpollHandler` logic looks like this:
78 /// - on TX queue event:
79 ///   - fetch all packets from the TX queue and send them to the backend; then
80 ///   - if the backend has queued up any incoming packets, fetch them into any available RX buffers.
81 /// - on RX queue event:
82 ///   - fetch any incoming packets, queued up by the backend, into newly available RX buffers.
83 /// - on backend event:
84 ///   - forward the event to the backend; then
85 ///   - again, attempt to fetch any incoming packets queued by the backend into virtio RX buffers.
86 ///
87 pub struct VsockEpollHandler<B: VsockBackend> {
88     pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
89     pub queues: Vec<Queue>,
90     pub queue_evts: Vec<EventFd>,
91     pub kill_evt: EventFd,
92     pub pause_evt: EventFd,
93     pub interrupt_cb: Arc<dyn VirtioInterrupt>,
94     pub backend: Arc<RwLock<B>>,
95     pub access_platform: Option<Arc<dyn AccessPlatform>>,
96 }
97 
98 impl<B> VsockEpollHandler<B>
99 where
100     B: VsockBackend,
101 {
102     /// Signal the guest driver that we've used some virtio buffers that it had previously made
103     /// available.
104     ///
105     fn signal_used_queue(&self, queue_index: u16) -> result::Result<(), DeviceError> {
106         debug!("vsock: raising IRQ");
107 
108         self.interrupt_cb
109             .trigger(VirtioInterruptType::Queue(queue_index))
110             .map_err(|e| {
111                 error!("Failed to signal used queue: {:?}", e);
112                 DeviceError::FailedSignalingUsedQueue(e)
113             })
114     }
115 
116     /// Walk the driver-provided RX queue buffers and attempt to fill them up with any data that we
117     /// have pending.
118     ///
119     fn process_rx(&mut self) -> result::Result<(), DeviceError> {
120         debug!("vsock: epoll_handler::process_rx()");
121 
122         let mut used_descs = false;
123 
124         while let Some(mut desc_chain) = self.queues[0].pop_descriptor_chain(self.mem.memory()) {
125             let used_len = match VsockPacket::from_rx_virtq_head(
126                 &mut desc_chain,
127                 self.access_platform.as_ref(),
128             ) {
129                 Ok(mut pkt) => {
130                     if self.backend.write().unwrap().recv_pkt(&mut pkt).is_ok() {
131                         pkt.hdr().len() as u32 + pkt.len()
132                     } else {
133                         // We are using a consuming iterator over the virtio buffers, so, if we can't
134                         // fill in this buffer, we'll need to undo the last iterator step.
135                         self.queues[0].go_to_previous_position();
136                         break;
137                     }
138                 }
139                 Err(e) => {
140                     warn!("vsock: RX queue error: {:?}", e);
141                     0
142                 }
143             };
144 
145             self.queues[0]
146                 .add_used(desc_chain.memory(), desc_chain.head_index(), used_len)
147                 .map_err(DeviceError::QueueAddUsed)?;
148             used_descs = true;
149         }
150 
151         if used_descs {
152             self.signal_used_queue(0)
153         } else {
154             Ok(())
155         }
156     }
157 
158     /// Walk the driver-provided TX queue buffers, package them up as vsock packets, and send them to
159     /// the backend for processing.
160     ///
161     fn process_tx(&mut self) -> result::Result<(), DeviceError> {
162         debug!("vsock: epoll_handler::process_tx()");
163 
164         let mut used_descs = false;
165 
166         while let Some(mut desc_chain) = self.queues[1].pop_descriptor_chain(self.mem.memory()) {
167             let pkt = match VsockPacket::from_tx_virtq_head(
168                 &mut desc_chain,
169                 self.access_platform.as_ref(),
170             ) {
171                 Ok(pkt) => pkt,
172                 Err(e) => {
173                     error!("vsock: error reading TX packet: {:?}", e);
174                     self.queues[1]
175                         .add_used(desc_chain.memory(), desc_chain.head_index(), 0)
176                         .map_err(DeviceError::QueueAddUsed)?;
177                     used_descs = true;
178                     continue;
179                 }
180             };
181 
182             if self.backend.write().unwrap().send_pkt(&pkt).is_err() {
183                 self.queues[1].go_to_previous_position();
184                 break;
185             }
186 
187             self.queues[1]
188                 .add_used(desc_chain.memory(), desc_chain.head_index(), 0)
189                 .map_err(DeviceError::QueueAddUsed)?;
190             used_descs = true;
191         }
192 
193         if used_descs {
194             self.signal_used_queue(1)
195         } else {
196             Ok(())
197         }
198     }
199 
200     fn run(
201         &mut self,
202         paused: Arc<AtomicBool>,
203         paused_sync: Arc<Barrier>,
204     ) -> result::Result<(), EpollHelperError> {
205         let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
206         helper.add_event(self.queue_evts[0].as_raw_fd(), RX_QUEUE_EVENT)?;
207         helper.add_event(self.queue_evts[1].as_raw_fd(), TX_QUEUE_EVENT)?;
208         helper.add_event(self.queue_evts[2].as_raw_fd(), EVT_QUEUE_EVENT)?;
209         helper.add_event(self.backend.read().unwrap().get_polled_fd(), BACKEND_EVENT)?;
210         helper.run(paused, paused_sync, self)?;
211 
212         Ok(())
213     }
214 }
215 
216 impl<B> EpollHelperHandler for VsockEpollHandler<B>
217 where
218     B: VsockBackend,
219 {
220     fn handle_event(
221         &mut self,
222         _helper: &mut EpollHelper,
223         event: &epoll::Event,
224     ) -> result::Result<(), EpollHelperError> {
225         let evset = match epoll::Events::from_bits(event.events) {
226             Some(evset) => evset,
227             None => {
228                 let evbits = event.events;
229                 warn!("epoll: ignoring unknown event set: 0x{:x}", evbits);
230                 return Ok(());
231             }
232         };
233 
234         let ev_type = event.data as u16;
235         match ev_type {
236             RX_QUEUE_EVENT => {
237                 debug!("vsock: RX queue event");
238                 self.queue_evts[0].read().map_err(|e| {
239                     EpollHelperError::HandleEvent(anyhow!("Failed to get RX queue event: {:?}", e))
240                 })?;
241                 if self.backend.read().unwrap().has_pending_rx() {
242                     self.process_rx().map_err(|e| {
243                         EpollHelperError::HandleEvent(anyhow!(
244                             "Failed to process RX queue: {:?}",
245                             e
246                         ))
247                     })?;
248                 }
249             }
250             TX_QUEUE_EVENT => {
251                 debug!("vsock: TX queue event");
252                 self.queue_evts[1].read().map_err(|e| {
253                     EpollHelperError::HandleEvent(anyhow!("Failed to get TX queue event: {:?}", e))
254                 })?;
255 
256                 self.process_tx().map_err(|e| {
257                     EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {:?}", e))
258                 })?;
259 
260                 // The backend may have queued up responses to the packets we sent during TX queue
261                 // processing. If that happened, we need to fetch those responses and place them
262                 // into RX buffers.
263                 if self.backend.read().unwrap().has_pending_rx() {
264                     self.process_rx().map_err(|e| {
265                         EpollHelperError::HandleEvent(anyhow!(
266                             "Failed to process RX queue: {:?}",
267                             e
268                         ))
269                     })?;
270                 }
271             }
272             EVT_QUEUE_EVENT => {
273                 debug!("vsock: EVT queue event");
274                 self.queue_evts[2].read().map_err(|e| {
275                     EpollHelperError::HandleEvent(anyhow!("Failed to get EVT queue event: {:?}", e))
276                 })?;
277             }
278             BACKEND_EVENT => {
279                 debug!("vsock: backend event");
280                 self.backend.write().unwrap().notify(evset);
281                 // After the backend has been kicked, it might've freed up some resources, so we
282                 // can attempt to send it more data to process.
283                 // In particular, if `self.backend.send_pkt()` halted the TX queue processing (by
284                 // returning an error) at some point in the past, now is the time to try walking the
285                 // TX queue again.
286                 self.process_tx().map_err(|e| {
287                     EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {:?}", e))
288                 })?;
289                 if self.backend.read().unwrap().has_pending_rx() {
290                     self.process_rx().map_err(|e| {
291                         EpollHelperError::HandleEvent(anyhow!(
292                             "Failed to process RX queue: {:?}",
293                             e
294                         ))
295                     })?;
296                 }
297             }
298             _ => {
299                 return Err(EpollHelperError::HandleEvent(anyhow!(
300                     "Unknown event for virtio-vsock"
301                 )));
302             }
303         }
304 
305         Ok(())
306     }
307 }
308 
309 /// Virtio device exposing virtual socket to the guest.
310 pub struct Vsock<B: VsockBackend> {
311     common: VirtioCommon,
312     id: String,
313     cid: u64,
314     backend: Arc<RwLock<B>>,
315     path: PathBuf,
316     seccomp_action: SeccompAction,
317     exit_evt: EventFd,
318 }
319 
320 #[derive(Serialize, Deserialize)]
321 pub struct VsockState {
322     pub avail_features: u64,
323     pub acked_features: u64,
324 }
325 
326 impl<B> Vsock<B>
327 where
328     B: VsockBackend + Sync,
329 {
330     /// Create a new virtio-vsock device with the given VM CID and vsock
331     /// backend.
332     #[allow(clippy::too_many_arguments)]
333     pub fn new(
334         id: String,
335         cid: u32,
336         path: PathBuf,
337         backend: B,
338         iommu: bool,
339         seccomp_action: SeccompAction,
340         exit_evt: EventFd,
341         state: Option<VsockState>,
342     ) -> io::Result<Vsock<B>> {
343         let (avail_features, acked_features, paused) = if let Some(state) = state {
344             info!("Restoring virtio-vsock {}", id);
345             (state.avail_features, state.acked_features, true)
346         } else {
347             let mut avail_features = (1u64 << VIRTIO_F_VERSION_1) | (1u64 << VIRTIO_F_IN_ORDER);
348 
349             if iommu {
350                 avail_features |= 1u64 << VIRTIO_F_IOMMU_PLATFORM;
351             }
352             (avail_features, 0, false)
353         };
354 
355         Ok(Vsock {
356             common: VirtioCommon {
357                 device_type: VirtioDeviceType::Vsock as u32,
358                 avail_features,
359                 acked_features,
360                 paused_sync: Some(Arc::new(Barrier::new(2))),
361                 queue_sizes: QUEUE_SIZES.to_vec(),
362                 min_queues: NUM_QUEUES as u16,
363                 paused: Arc::new(AtomicBool::new(paused)),
364                 ..Default::default()
365             },
366             id,
367             cid: cid.into(),
368             backend: Arc::new(RwLock::new(backend)),
369             path,
370             seccomp_action,
371             exit_evt,
372         })
373     }
374 
375     fn state(&self) -> VsockState {
376         VsockState {
377             avail_features: self.common.avail_features,
378             acked_features: self.common.acked_features,
379         }
380     }
381 
382     #[cfg(fuzzing)]
383     pub fn wait_for_epoll_threads(&mut self) {
384         self.common.wait_for_epoll_threads();
385     }
386 }
387 
388 impl<B> Drop for Vsock<B>
389 where
390     B: VsockBackend,
391 {
392     fn drop(&mut self) {
393         if let Some(kill_evt) = self.common.kill_evt.take() {
394             // Ignore the result because there is nothing we can do about it.
395             let _ = kill_evt.write(1);
396         }
397         self.common.wait_for_epoll_threads();
398     }
399 }
400 
401 impl<B> VirtioDevice for Vsock<B>
402 where
403     B: VsockBackend + Sync + 'static,
404 {
405     fn device_type(&self) -> u32 {
406         self.common.device_type
407     }
408 
409     fn queue_max_sizes(&self) -> &[u16] {
410         &self.common.queue_sizes
411     }
412 
413     fn features(&self) -> u64 {
414         self.common.avail_features
415     }
416 
417     fn ack_features(&mut self, value: u64) {
418         self.common.ack_features(value)
419     }
420 
421     fn read_config(&self, offset: u64, data: &mut [u8]) {
422         match offset {
423             0 if data.len() == 8 => LittleEndian::write_u64(data, self.cid),
424             0 if data.len() == 4 => LittleEndian::write_u32(data, (self.cid & 0xffff_ffff) as u32),
425             4 if data.len() == 4 => {
426                 LittleEndian::write_u32(data, ((self.cid >> 32) & 0xffff_ffff) as u32)
427             }
428             _ => warn!(
429                 "vsock: virtio-vsock received invalid read request of {} bytes at offset {}",
430                 data.len(),
431                 offset
432             ),
433         }
434     }
435 
436     fn activate(
437         &mut self,
438         mem: GuestMemoryAtomic<GuestMemoryMmap>,
439         interrupt_cb: Arc<dyn VirtioInterrupt>,
440         queues: Vec<(usize, Queue, EventFd)>,
441     ) -> ActivateResult {
442         self.common.activate(&queues, &interrupt_cb)?;
443         let (kill_evt, pause_evt) = self.common.dup_eventfds();
444 
445         let mut virtqueues = Vec::new();
446         let mut queue_evts = Vec::new();
447         for (_, queue, queue_evt) in queues {
448             virtqueues.push(queue);
449             queue_evts.push(queue_evt);
450         }
451 
452         let mut handler = VsockEpollHandler {
453             mem,
454             queues: virtqueues,
455             queue_evts,
456             kill_evt,
457             pause_evt,
458             interrupt_cb,
459             backend: self.backend.clone(),
460             access_platform: self.common.access_platform.clone(),
461         };
462 
463         let paused = self.common.paused.clone();
464         let paused_sync = self.common.paused_sync.clone();
465         let mut epoll_threads = Vec::new();
466 
467         spawn_virtio_thread(
468             &self.id,
469             &self.seccomp_action,
470             Thread::VirtioVsock,
471             &mut epoll_threads,
472             &self.exit_evt,
473             move || handler.run(paused, paused_sync.unwrap()),
474         )?;
475 
476         self.common.epoll_threads = Some(epoll_threads);
477 
478         event!("virtio-device", "activated", "id", &self.id);
479         Ok(())
480     }
481 
482     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
483         let result = self.common.reset();
484         event!("virtio-device", "reset", "id", &self.id);
485         result
486     }
487 
488     fn shutdown(&mut self) {
489         std::fs::remove_file(&self.path).ok();
490     }
491 
492     fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) {
493         self.common.set_access_platform(access_platform)
494     }
495 }
496 
497 impl<B> Pausable for Vsock<B>
498 where
499     B: VsockBackend + Sync + 'static,
500 {
501     fn pause(&mut self) -> result::Result<(), MigratableError> {
502         self.common.pause()
503     }
504 
505     fn resume(&mut self) -> result::Result<(), MigratableError> {
506         self.common.resume()
507     }
508 }
509 
510 impl<B> Snapshottable for Vsock<B>
511 where
512     B: VsockBackend + Sync + 'static,
513 {
514     fn id(&self) -> String {
515         self.id.clone()
516     }
517 
518     fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
519         Snapshot::new_from_state(&self.state())
520     }
521 }
522 impl<B> Transportable for Vsock<B> where B: VsockBackend + Sync + 'static {}
523 impl<B> Migratable for Vsock<B> where B: VsockBackend + Sync + 'static {}
524 
525 #[cfg(test)]
526 mod tests {
527     use libc::EFD_NONBLOCK;
528 
529     use super::super::tests::{NoopVirtioInterrupt, TestContext};
530     use super::super::*;
531     use super::*;
532     use crate::ActivateError;
533 
534     #[test]
535     fn test_virtio_device() {
536         let mut ctx = TestContext::new();
537         let avail_features = (1u64 << VIRTIO_F_VERSION_1) | (1u64 << VIRTIO_F_IN_ORDER);
538         let device_features = avail_features;
539         let driver_features: u64 = avail_features | 1 | (1 << 32);
540         let device_pages = [
541             (device_features & 0xffff_ffff) as u32,
542             (device_features >> 32) as u32,
543         ];
544         let driver_pages = [
545             (driver_features & 0xffff_ffff) as u32,
546             (driver_features >> 32) as u32,
547         ];
548         assert_eq!(ctx.device.device_type(), VirtioDeviceType::Vsock as u32);
549         assert_eq!(ctx.device.queue_max_sizes(), QUEUE_SIZES);
550         assert_eq!(ctx.device.features() as u32, device_pages[0]);
551         assert_eq!((ctx.device.features() >> 32) as u32, device_pages[1]);
552 
553         // Ack device features, page 0.
554         ctx.device.ack_features(u64::from(driver_pages[0]));
555         // Ack device features, page 1.
556         ctx.device.ack_features(u64::from(driver_pages[1]) << 32);
557         // Check that no side effect are present, and that the acked features are exactly the same
558         // as the device features.
559         assert_eq!(
560             ctx.device.common.acked_features,
561             device_features & driver_features
562         );
563 
564         // Test reading 32-bit chunks.
565         let mut data = [0u8; 8];
566         ctx.device.read_config(0, &mut data[..4]);
567         assert_eq!(
568             u64::from(LittleEndian::read_u32(&data)),
569             ctx.cid & 0xffff_ffff
570         );
571         ctx.device.read_config(4, &mut data[4..]);
572         assert_eq!(
573             u64::from(LittleEndian::read_u32(&data[4..])),
574             (ctx.cid >> 32) & 0xffff_ffff
575         );
576 
577         // Test reading 64-bit.
578         let mut data = [0u8; 8];
579         ctx.device.read_config(0, &mut data);
580         assert_eq!(LittleEndian::read_u64(&data), ctx.cid);
581 
582         // Check that out-of-bounds reading doesn't mutate the destination buffer.
583         let mut data = [0u8, 1, 2, 3, 4, 5, 6, 7];
584         ctx.device.read_config(2, &mut data);
585         assert_eq!(data, [0u8, 1, 2, 3, 4, 5, 6, 7]);
586 
587         // Just covering lines here, since the vsock device has no writable config.
588         // A warning is, however, logged, if the guest driver attempts to write any config data.
589         ctx.device.write_config(0, &data[..4]);
590 
591         let memory = GuestMemoryAtomic::new(ctx.mem.clone());
592 
593         // Test a bad activation.
594         let bad_activate =
595             ctx.device
596                 .activate(memory.clone(), Arc::new(NoopVirtioInterrupt {}), Vec::new());
597         match bad_activate {
598             Err(ActivateError::BadActivate) => (),
599             other => panic!("{other:?}"),
600         }
601 
602         // Test a correct activation.
603         ctx.device
604             .activate(
605                 memory,
606                 Arc::new(NoopVirtioInterrupt {}),
607                 vec![
608                     (
609                         0,
610                         Queue::new(256).unwrap(),
611                         EventFd::new(EFD_NONBLOCK).unwrap(),
612                     ),
613                     (
614                         1,
615                         Queue::new(256).unwrap(),
616                         EventFd::new(EFD_NONBLOCK).unwrap(),
617                     ),
618                     (
619                         2,
620                         Queue::new(256).unwrap(),
621                         EventFd::new(EFD_NONBLOCK).unwrap(),
622                     ),
623                 ],
624             )
625             .unwrap();
626     }
627 
628     #[test]
629     fn test_irq() {
630         // Test case: successful IRQ signaling.
631         {
632             let test_ctx = TestContext::new();
633             let ctx = test_ctx.create_epoll_handler_context();
634 
635             let _queue: Queue = Queue::new(256).unwrap();
636             ctx.handler.signal_used_queue(0).unwrap();
637         }
638     }
639 
640     #[test]
641     fn test_txq_event() {
642         // Test case:
643         // - the driver has something to send (there's data in the TX queue); and
644         // - the backend has no pending RX data.
645         {
646             let test_ctx = TestContext::new();
647             let mut ctx = test_ctx.create_epoll_handler_context();
648 
649             ctx.handler.backend.write().unwrap().set_pending_rx(false);
650             ctx.signal_txq_event();
651 
652             // The available TX descriptor should have been used.
653             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
654             // The available RX descriptor should be untouched.
655             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
656         }
657 
658         // Test case:
659         // - the driver has something to send (there's data in the TX queue); and
660         // - the backend also has some pending RX data.
661         {
662             let test_ctx = TestContext::new();
663             let mut ctx = test_ctx.create_epoll_handler_context();
664 
665             ctx.handler.backend.write().unwrap().set_pending_rx(true);
666             ctx.signal_txq_event();
667 
668             // Both available RX and TX descriptors should have been used.
669             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
670             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
671         }
672 
673         // Test case:
674         // - the driver has something to send (there's data in the TX queue); and
675         // - the backend errors out and cannot process the TX queue.
676         {
677             let test_ctx = TestContext::new();
678             let mut ctx = test_ctx.create_epoll_handler_context();
679 
680             ctx.handler.backend.write().unwrap().set_pending_rx(false);
681             ctx.handler
682                 .backend
683                 .write()
684                 .unwrap()
685                 .set_tx_err(Some(VsockError::NoData));
686             ctx.signal_txq_event();
687 
688             // Both RX and TX queues should be untouched.
689             assert_eq!(ctx.guest_txvq.used.idx.get(), 0);
690             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
691         }
692 
693         // Test case:
694         // - the driver supplied a malformed TX buffer.
695         {
696             let test_ctx = TestContext::new();
697             let mut ctx = test_ctx.create_epoll_handler_context();
698 
699             // Invalidate the packet header descriptor, by setting its length to 0.
700             ctx.guest_txvq.dtable[0].len.set(0);
701             ctx.signal_txq_event();
702 
703             // The available descriptor should have been consumed, but no packet should have
704             // reached the backend.
705             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
706             assert_eq!(ctx.handler.backend.read().unwrap().tx_ok_cnt, 0);
707         }
708 
709         // Test case: spurious TXQ_EVENT.
710         {
711             let test_ctx = TestContext::new();
712             let mut ctx = test_ctx.create_epoll_handler_context();
713 
714             let events = epoll::Events::EPOLLIN;
715             let event = epoll::Event::new(events, TX_QUEUE_EVENT as u64);
716             let mut epoll_helper =
717                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
718 
719             ctx.handler
720                 .handle_event(&mut epoll_helper, &event)
721                 .expect_err("handle_event() should have failed");
722         }
723     }
724 
725     #[test]
726     fn test_rxq_event() {
727         // Test case:
728         // - there is pending RX data in the backend; and
729         // - the driver makes RX buffers available; and
730         // - the backend successfully places its RX data into the queue.
731         {
732             let test_ctx = TestContext::new();
733             let mut ctx = test_ctx.create_epoll_handler_context();
734 
735             ctx.handler.backend.write().unwrap().set_pending_rx(true);
736             ctx.handler
737                 .backend
738                 .write()
739                 .unwrap()
740                 .set_rx_err(Some(VsockError::NoData));
741             ctx.signal_rxq_event();
742 
743             // The available RX buffer should've been left untouched.
744             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
745         }
746 
747         // Test case:
748         // - there is pending RX data in the backend; and
749         // - the driver makes RX buffers available; and
750         // - the backend errors out, when attempting to receive data.
751         {
752             let test_ctx = TestContext::new();
753             let mut ctx = test_ctx.create_epoll_handler_context();
754 
755             ctx.handler.backend.write().unwrap().set_pending_rx(true);
756             ctx.signal_rxq_event();
757 
758             // The available RX buffer should have been used.
759             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
760         }
761 
762         // Test case: the driver provided a malformed RX descriptor chain.
763         {
764             let test_ctx = TestContext::new();
765             let mut ctx = test_ctx.create_epoll_handler_context();
766 
767             // Invalidate the packet header descriptor, by setting its length to 0.
768             ctx.guest_rxvq.dtable[0].len.set(0);
769 
770             // The chain should've been processed, without employing the backend.
771             ctx.handler.process_rx().unwrap();
772             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
773             assert_eq!(ctx.handler.backend.read().unwrap().rx_ok_cnt, 0);
774         }
775 
776         // Test case: spurious RXQ_EVENT.
777         {
778             let test_ctx = TestContext::new();
779             let mut ctx = test_ctx.create_epoll_handler_context();
780             ctx.handler.backend.write().unwrap().set_pending_rx(false);
781 
782             let events = epoll::Events::EPOLLIN;
783             let event = epoll::Event::new(events, RX_QUEUE_EVENT as u64);
784             let mut epoll_helper =
785                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
786 
787             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
788             ctx.handler
789                 .handle_event(&mut epoll_helper, &event)
790                 .expect_err("handle_event() should have failed");
791         }
792     }
793 
794     #[test]
795     fn test_evq_event() {
796         // Test case: spurious EVQ_EVENT.
797         {
798             let test_ctx = TestContext::new();
799             let mut ctx = test_ctx.create_epoll_handler_context();
800             ctx.handler.backend.write().unwrap().set_pending_rx(false);
801 
802             let events = epoll::Events::EPOLLIN;
803             let event = epoll::Event::new(events, EVT_QUEUE_EVENT as u64);
804             let mut epoll_helper =
805                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
806 
807             assert_eq!(ctx.guest_evvq.used.idx.get(), 0);
808 
809             ctx.handler
810                 .handle_event(&mut epoll_helper, &event)
811                 .expect_err("handle_event() should have failed");
812         }
813     }
814 
815     #[test]
816     fn test_backend_event() {
817         // Test case:
818         // - a backend event is received; and
819         // - the backend has pending RX data.
820         {
821             let test_ctx = TestContext::new();
822             let mut ctx = test_ctx.create_epoll_handler_context();
823 
824             ctx.handler.backend.write().unwrap().set_pending_rx(true);
825 
826             let events = epoll::Events::EPOLLIN;
827             let event = epoll::Event::new(events, BACKEND_EVENT as u64);
828             let mut epoll_helper =
829                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
830             ctx.handler.handle_event(&mut epoll_helper, &event).unwrap();
831 
832             // The backend should've received this event.
833             assert_eq!(
834                 ctx.handler.backend.read().unwrap().evset,
835                 Some(epoll::Events::EPOLLIN)
836             );
837             // TX queue processing should've been triggered.
838             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
839             // RX queue processing should've been triggered.
840             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
841         }
842 
843         // Test case:
844         // - a backend event is received; and
845         // - the backend doesn't have any pending RX data.
846         {
847             let test_ctx = TestContext::new();
848             let mut ctx = test_ctx.create_epoll_handler_context();
849 
850             ctx.handler.backend.write().unwrap().set_pending_rx(false);
851 
852             let events = epoll::Events::EPOLLIN;
853             let event = epoll::Event::new(events, BACKEND_EVENT as u64);
854             let mut epoll_helper =
855                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
856             ctx.handler.handle_event(&mut epoll_helper, &event).unwrap();
857 
858             // The backend should've received this event.
859             assert_eq!(
860                 ctx.handler.backend.read().unwrap().evset,
861                 Some(epoll::Events::EPOLLIN)
862             );
863             // TX queue processing should've been triggered.
864             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
865             // The RX queue should've been left untouched.
866             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
867         }
868     }
869 
870     #[test]
871     fn test_unknown_event() {
872         let test_ctx = TestContext::new();
873         let mut ctx = test_ctx.create_epoll_handler_context();
874 
875         let events = epoll::Events::EPOLLIN;
876         let event = epoll::Event::new(events, 0xff);
877         let mut epoll_helper =
878             EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
879 
880         ctx.handler
881             .handle_event(&mut epoll_helper, &event)
882             .expect_err("handle_event() should have failed");
883     }
884 }
885