xref: /cloud-hypervisor/virtio-devices/src/vsock/device.rs (revision 88a9f799449c04180c6b9a21d3b9c0c4b57e2bd6)
1 // Copyright 2019 Intel Corporation. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 // SPDX-License-Identifier: Apache-2.0
6 //
7 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
8 // Use of this source code is governed by a BSD-style license that can be
9 // found in the THIRD-PARTY file.
10 
11 use std::io;
12 use std::os::unix::io::AsRawFd;
13 use std::path::PathBuf;
14 use std::result;
15 use std::sync::atomic::AtomicBool;
16 use std::sync::{Arc, Barrier, RwLock};
17 
18 use anyhow::anyhow;
19 use byteorder::{ByteOrder, LittleEndian};
20 use seccompiler::SeccompAction;
21 use serde::{Deserialize, Serialize};
22 use virtio_queue::Queue;
23 use virtio_queue::QueueOwnedT;
24 use virtio_queue::QueueT;
25 use vm_memory::GuestAddressSpace;
26 use vm_memory::GuestMemoryAtomic;
27 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
28 use vm_virtio::AccessPlatform;
29 use vmm_sys_util::eventfd::EventFd;
30 
31 /// This is the `VirtioDevice` implementation for our vsock device. It handles the virtio-level
32 /// device logic: feature negotiation, device configuration, and device activation.
33 /// The run-time device logic (i.e. event-driven data handling) is implemented by
34 /// `super::epoll_handler::EpollHandler`.
35 ///
36 /// We aim to conform to the VirtIO v1.1 spec:
37 /// https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.html
38 ///
39 /// The vsock device has two input parameters: a CID to identify the device, and a `VsockBackend`
40 /// to use for offloading vsock traffic.
41 ///
42 /// Upon its activation, the vsock device creates its `EpollHandler`, passes it the event-interested
43 /// file descriptors, and registers these descriptors with the VMM `EpollContext`. Going forward,
44 /// the `EpollHandler` will get notified whenever an event occurs on the just-registered FDs:
45 /// - an RX queue FD;
46 /// - a TX queue FD;
47 /// - an event queue FD; and
48 /// - a backend FD.
49 ///
50 use super::{VsockBackend, VsockPacket};
51 use crate::seccomp_filters::Thread;
52 use crate::Error as DeviceError;
53 use crate::GuestMemoryMmap;
54 use crate::VirtioInterrupt;
55 use crate::{
56     thread_helper::spawn_virtio_thread, ActivateResult, EpollHelper, EpollHelperError,
57     EpollHelperHandler, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType,
58     EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IN_ORDER, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_VERSION_1,
59 };
60 
61 const QUEUE_SIZE: u16 = 256;
62 const NUM_QUEUES: usize = 3;
63 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
64 
65 // New descriptors are pending on the rx queue.
66 pub const RX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
67 // New descriptors are pending on the tx queue.
68 pub const TX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2;
69 // New descriptors are pending on the event queue.
70 pub const EVT_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 3;
71 // Notification coming from the backend.
72 pub const BACKEND_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 4;
73 
74 /// The `VsockEpollHandler` implements the runtime logic of our vsock device:
75 /// 1. Respond to TX queue events by wrapping virtio buffers into `VsockPacket`s, then sending those
76 ///    packets to the `VsockBackend`;
77 /// 2. Forward backend FD event notifications to the `VsockBackend`;
78 /// 3. Fetch incoming packets from the `VsockBackend` and place them into the virtio RX queue;
79 /// 4. Whenever we have processed some virtio buffers (either TX or RX), let the driver know by
80 ///    raising our assigned IRQ.
81 ///
82 /// In a nutshell, the `VsockEpollHandler` logic looks like this:
83 /// - on TX queue event:
84 ///   - fetch all packets from the TX queue and send them to the backend; then
85 ///   - if the backend has queued up any incoming packets, fetch them into any available RX buffers.
86 /// - on RX queue event:
87 ///   - fetch any incoming packets, queued up by the backend, into newly available RX buffers.
88 /// - on backend event:
89 ///   - forward the event to the backend; then
90 ///   - again, attempt to fetch any incoming packets queued by the backend into virtio RX buffers.
91 ///
92 pub struct VsockEpollHandler<B: VsockBackend> {
93     pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
94     pub queues: Vec<Queue>,
95     pub queue_evts: Vec<EventFd>,
96     pub kill_evt: EventFd,
97     pub pause_evt: EventFd,
98     pub interrupt_cb: Arc<dyn VirtioInterrupt>,
99     pub backend: Arc<RwLock<B>>,
100     pub access_platform: Option<Arc<dyn AccessPlatform>>,
101 }
102 
103 impl<B> VsockEpollHandler<B>
104 where
105     B: VsockBackend,
106 {
107     /// Signal the guest driver that we've used some virtio buffers that it had previously made
108     /// available.
109     ///
110     fn signal_used_queue(&self, queue_index: u16) -> result::Result<(), DeviceError> {
111         debug!("vsock: raising IRQ");
112 
113         self.interrupt_cb
114             .trigger(VirtioInterruptType::Queue(queue_index))
115             .map_err(|e| {
116                 error!("Failed to signal used queue: {:?}", e);
117                 DeviceError::FailedSignalingUsedQueue(e)
118             })
119     }
120 
121     /// Walk the driver-provided RX queue buffers and attempt to fill them up with any data that we
122     /// have pending.
123     ///
124     fn process_rx(&mut self) -> result::Result<(), DeviceError> {
125         debug!("vsock: epoll_handler::process_rx()");
126 
127         let mut used_descs = false;
128 
129         while let Some(mut desc_chain) = self.queues[0].pop_descriptor_chain(self.mem.memory()) {
130             let used_len = match VsockPacket::from_rx_virtq_head(
131                 &mut desc_chain,
132                 self.access_platform.as_ref(),
133             ) {
134                 Ok(mut pkt) => {
135                     if self.backend.write().unwrap().recv_pkt(&mut pkt).is_ok() {
136                         pkt.hdr().len() as u32 + pkt.len()
137                     } else {
138                         // We are using a consuming iterator over the virtio buffers, so, if we can't
139                         // fill in this buffer, we'll need to undo the last iterator step.
140                         self.queues[0].go_to_previous_position();
141                         break;
142                     }
143                 }
144                 Err(e) => {
145                     warn!("vsock: RX queue error: {:?}", e);
146                     0
147                 }
148             };
149 
150             self.queues[0]
151                 .add_used(desc_chain.memory(), desc_chain.head_index(), used_len)
152                 .map_err(DeviceError::QueueAddUsed)?;
153             used_descs = true;
154         }
155 
156         if used_descs {
157             self.signal_used_queue(0)
158         } else {
159             Ok(())
160         }
161     }
162 
163     /// Walk the driver-provided TX queue buffers, package them up as vsock packets, and send them to
164     /// the backend for processing.
165     ///
166     fn process_tx(&mut self) -> result::Result<(), DeviceError> {
167         debug!("vsock: epoll_handler::process_tx()");
168 
169         let mut used_descs = false;
170 
171         while let Some(mut desc_chain) = self.queues[1].pop_descriptor_chain(self.mem.memory()) {
172             let pkt = match VsockPacket::from_tx_virtq_head(
173                 &mut desc_chain,
174                 self.access_platform.as_ref(),
175             ) {
176                 Ok(pkt) => pkt,
177                 Err(e) => {
178                     error!("vsock: error reading TX packet: {:?}", e);
179                     self.queues[1]
180                         .add_used(desc_chain.memory(), desc_chain.head_index(), 0)
181                         .map_err(DeviceError::QueueAddUsed)?;
182                     used_descs = true;
183                     continue;
184                 }
185             };
186 
187             if self.backend.write().unwrap().send_pkt(&pkt).is_err() {
188                 self.queues[1].go_to_previous_position();
189                 break;
190             }
191 
192             self.queues[1]
193                 .add_used(desc_chain.memory(), desc_chain.head_index(), 0)
194                 .map_err(DeviceError::QueueAddUsed)?;
195             used_descs = true;
196         }
197 
198         if used_descs {
199             self.signal_used_queue(1)
200         } else {
201             Ok(())
202         }
203     }
204 
205     fn run(
206         &mut self,
207         paused: Arc<AtomicBool>,
208         paused_sync: Arc<Barrier>,
209     ) -> result::Result<(), EpollHelperError> {
210         let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
211         helper.add_event(self.queue_evts[0].as_raw_fd(), RX_QUEUE_EVENT)?;
212         helper.add_event(self.queue_evts[1].as_raw_fd(), TX_QUEUE_EVENT)?;
213         helper.add_event(self.queue_evts[2].as_raw_fd(), EVT_QUEUE_EVENT)?;
214         helper.add_event(self.backend.read().unwrap().get_polled_fd(), BACKEND_EVENT)?;
215         helper.run(paused, paused_sync, self)?;
216 
217         Ok(())
218     }
219 }
220 
221 impl<B> EpollHelperHandler for VsockEpollHandler<B>
222 where
223     B: VsockBackend,
224 {
225     fn handle_event(
226         &mut self,
227         _helper: &mut EpollHelper,
228         event: &epoll::Event,
229     ) -> result::Result<(), EpollHelperError> {
230         let evset = match epoll::Events::from_bits(event.events) {
231             Some(evset) => evset,
232             None => {
233                 let evbits = event.events;
234                 warn!("epoll: ignoring unknown event set: 0x{:x}", evbits);
235                 return Ok(());
236             }
237         };
238 
239         let ev_type = event.data as u16;
240         match ev_type {
241             RX_QUEUE_EVENT => {
242                 debug!("vsock: RX queue event");
243                 self.queue_evts[0].read().map_err(|e| {
244                     EpollHelperError::HandleEvent(anyhow!("Failed to get RX queue event: {:?}", e))
245                 })?;
246                 if self.backend.read().unwrap().has_pending_rx() {
247                     self.process_rx().map_err(|e| {
248                         EpollHelperError::HandleEvent(anyhow!(
249                             "Failed to process RX queue: {:?}",
250                             e
251                         ))
252                     })?;
253                 }
254             }
255             TX_QUEUE_EVENT => {
256                 debug!("vsock: TX queue event");
257                 self.queue_evts[1].read().map_err(|e| {
258                     EpollHelperError::HandleEvent(anyhow!("Failed to get TX queue event: {:?}", e))
259                 })?;
260 
261                 self.process_tx().map_err(|e| {
262                     EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {:?}", e))
263                 })?;
264 
265                 // The backend may have queued up responses to the packets we sent during TX queue
266                 // processing. If that happened, we need to fetch those responses and place them
267                 // into RX buffers.
268                 if self.backend.read().unwrap().has_pending_rx() {
269                     self.process_rx().map_err(|e| {
270                         EpollHelperError::HandleEvent(anyhow!(
271                             "Failed to process RX queue: {:?}",
272                             e
273                         ))
274                     })?;
275                 }
276             }
277             EVT_QUEUE_EVENT => {
278                 debug!("vsock: EVT queue event");
279                 self.queue_evts[2].read().map_err(|e| {
280                     EpollHelperError::HandleEvent(anyhow!("Failed to get EVT queue event: {:?}", e))
281                 })?;
282             }
283             BACKEND_EVENT => {
284                 debug!("vsock: backend event");
285                 self.backend.write().unwrap().notify(evset);
286                 // After the backend has been kicked, it might've freed up some resources, so we
287                 // can attempt to send it more data to process.
288                 // In particular, if `self.backend.send_pkt()` halted the TX queue processing (by
289                 // returning an error) at some point in the past, now is the time to try walking the
290                 // TX queue again.
291                 self.process_tx().map_err(|e| {
292                     EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {:?}", e))
293                 })?;
294                 if self.backend.read().unwrap().has_pending_rx() {
295                     self.process_rx().map_err(|e| {
296                         EpollHelperError::HandleEvent(anyhow!(
297                             "Failed to process RX queue: {:?}",
298                             e
299                         ))
300                     })?;
301                 }
302             }
303             _ => {
304                 return Err(EpollHelperError::HandleEvent(anyhow!(
305                     "Unknown event for virtio-vsock"
306                 )));
307             }
308         }
309 
310         Ok(())
311     }
312 }
313 
314 /// Virtio device exposing virtual socket to the guest.
315 pub struct Vsock<B: VsockBackend> {
316     common: VirtioCommon,
317     id: String,
318     cid: u64,
319     backend: Arc<RwLock<B>>,
320     path: PathBuf,
321     seccomp_action: SeccompAction,
322     exit_evt: EventFd,
323 }
324 
325 #[derive(Serialize, Deserialize)]
326 pub struct VsockState {
327     pub avail_features: u64,
328     pub acked_features: u64,
329 }
330 
331 impl<B> Vsock<B>
332 where
333     B: VsockBackend + Sync,
334 {
335     /// Create a new virtio-vsock device with the given VM CID and vsock
336     /// backend.
337     #[allow(clippy::too_many_arguments)]
338     pub fn new(
339         id: String,
340         cid: u32,
341         path: PathBuf,
342         backend: B,
343         iommu: bool,
344         seccomp_action: SeccompAction,
345         exit_evt: EventFd,
346         state: Option<VsockState>,
347     ) -> io::Result<Vsock<B>> {
348         let (avail_features, acked_features, paused) = if let Some(state) = state {
349             info!("Restoring virtio-vsock {}", id);
350             (state.avail_features, state.acked_features, true)
351         } else {
352             let mut avail_features = 1u64 << VIRTIO_F_VERSION_1 | 1u64 << VIRTIO_F_IN_ORDER;
353 
354             if iommu {
355                 avail_features |= 1u64 << VIRTIO_F_IOMMU_PLATFORM;
356             }
357             (avail_features, 0, false)
358         };
359 
360         Ok(Vsock {
361             common: VirtioCommon {
362                 device_type: VirtioDeviceType::Vsock as u32,
363                 avail_features,
364                 acked_features,
365                 paused_sync: Some(Arc::new(Barrier::new(2))),
366                 queue_sizes: QUEUE_SIZES.to_vec(),
367                 min_queues: NUM_QUEUES as u16,
368                 paused: Arc::new(AtomicBool::new(paused)),
369                 ..Default::default()
370             },
371             id,
372             cid: cid.into(),
373             backend: Arc::new(RwLock::new(backend)),
374             path,
375             seccomp_action,
376             exit_evt,
377         })
378     }
379 
380     fn state(&self) -> VsockState {
381         VsockState {
382             avail_features: self.common.avail_features,
383             acked_features: self.common.acked_features,
384         }
385     }
386 }
387 
388 impl<B> Drop for Vsock<B>
389 where
390     B: VsockBackend,
391 {
392     fn drop(&mut self) {
393         if let Some(kill_evt) = self.common.kill_evt.take() {
394             // Ignore the result because there is nothing we can do about it.
395             let _ = kill_evt.write(1);
396         }
397         self.common.wait_for_epoll_threads();
398     }
399 }
400 
401 impl<B> VirtioDevice for Vsock<B>
402 where
403     B: VsockBackend + Sync + 'static,
404 {
405     fn device_type(&self) -> u32 {
406         self.common.device_type
407     }
408 
409     fn queue_max_sizes(&self) -> &[u16] {
410         &self.common.queue_sizes
411     }
412 
413     fn features(&self) -> u64 {
414         self.common.avail_features
415     }
416 
417     fn ack_features(&mut self, value: u64) {
418         self.common.ack_features(value)
419     }
420 
421     fn read_config(&self, offset: u64, data: &mut [u8]) {
422         match offset {
423             0 if data.len() == 8 => LittleEndian::write_u64(data, self.cid),
424             0 if data.len() == 4 => LittleEndian::write_u32(data, (self.cid & 0xffff_ffff) as u32),
425             4 if data.len() == 4 => {
426                 LittleEndian::write_u32(data, ((self.cid >> 32) & 0xffff_ffff) as u32)
427             }
428             _ => warn!(
429                 "vsock: virtio-vsock received invalid read request of {} bytes at offset {}",
430                 data.len(),
431                 offset
432             ),
433         }
434     }
435 
436     fn activate(
437         &mut self,
438         mem: GuestMemoryAtomic<GuestMemoryMmap>,
439         interrupt_cb: Arc<dyn VirtioInterrupt>,
440         queues: Vec<(usize, Queue, EventFd)>,
441     ) -> ActivateResult {
442         self.common.activate(&queues, &interrupt_cb)?;
443         let (kill_evt, pause_evt) = self.common.dup_eventfds();
444 
445         let mut virtqueues = Vec::new();
446         let mut queue_evts = Vec::new();
447         for (_, queue, queue_evt) in queues {
448             virtqueues.push(queue);
449             queue_evts.push(queue_evt);
450         }
451 
452         let mut handler = VsockEpollHandler {
453             mem,
454             queues: virtqueues,
455             queue_evts,
456             kill_evt,
457             pause_evt,
458             interrupt_cb,
459             backend: self.backend.clone(),
460             access_platform: self.common.access_platform.clone(),
461         };
462 
463         let paused = self.common.paused.clone();
464         let paused_sync = self.common.paused_sync.clone();
465         let mut epoll_threads = Vec::new();
466 
467         spawn_virtio_thread(
468             &self.id,
469             &self.seccomp_action,
470             Thread::VirtioVsock,
471             &mut epoll_threads,
472             &self.exit_evt,
473             move || handler.run(paused, paused_sync.unwrap()),
474         )?;
475 
476         self.common.epoll_threads = Some(epoll_threads);
477 
478         event!("virtio-device", "activated", "id", &self.id);
479         Ok(())
480     }
481 
482     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
483         let result = self.common.reset();
484         event!("virtio-device", "reset", "id", &self.id);
485         result
486     }
487 
488     fn shutdown(&mut self) {
489         std::fs::remove_file(&self.path).ok();
490     }
491 
492     fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) {
493         self.common.set_access_platform(access_platform)
494     }
495 }
496 
497 impl<B> Pausable for Vsock<B>
498 where
499     B: VsockBackend + Sync + 'static,
500 {
501     fn pause(&mut self) -> result::Result<(), MigratableError> {
502         self.common.pause()
503     }
504 
505     fn resume(&mut self) -> result::Result<(), MigratableError> {
506         self.common.resume()
507     }
508 }
509 
510 impl<B> Snapshottable for Vsock<B>
511 where
512     B: VsockBackend + Sync + 'static,
513 {
514     fn id(&self) -> String {
515         self.id.clone()
516     }
517 
518     fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
519         Snapshot::new_from_state(&self.state())
520     }
521 }
522 impl<B> Transportable for Vsock<B> where B: VsockBackend + Sync + 'static {}
523 impl<B> Migratable for Vsock<B> where B: VsockBackend + Sync + 'static {}
524 
525 #[cfg(test)]
526 mod tests {
527     use libc::EFD_NONBLOCK;
528 
529     use super::super::tests::{NoopVirtioInterrupt, TestContext};
530     use super::super::*;
531     use super::*;
532     use crate::ActivateError;
533 
534     #[test]
535     fn test_virtio_device() {
536         let mut ctx = TestContext::new();
537         let avail_features = 1u64 << VIRTIO_F_VERSION_1 | 1u64 << VIRTIO_F_IN_ORDER;
538         let device_features = avail_features;
539         let driver_features: u64 = avail_features | 1 | (1 << 32);
540         let device_pages = [
541             (device_features & 0xffff_ffff) as u32,
542             (device_features >> 32) as u32,
543         ];
544         let driver_pages = [
545             (driver_features & 0xffff_ffff) as u32,
546             (driver_features >> 32) as u32,
547         ];
548         assert_eq!(ctx.device.device_type(), VirtioDeviceType::Vsock as u32);
549         assert_eq!(ctx.device.queue_max_sizes(), QUEUE_SIZES);
550         assert_eq!(ctx.device.features() as u32, device_pages[0]);
551         assert_eq!((ctx.device.features() >> 32) as u32, device_pages[1]);
552 
553         // Ack device features, page 0.
554         ctx.device.ack_features(u64::from(driver_pages[0]));
555         // Ack device features, page 1.
556         ctx.device.ack_features(u64::from(driver_pages[1]) << 32);
557         // Check that no side effect are present, and that the acked features are exactly the same
558         // as the device features.
559         assert_eq!(
560             ctx.device.common.acked_features,
561             device_features & driver_features
562         );
563 
564         // Test reading 32-bit chunks.
565         let mut data = [0u8; 8];
566         ctx.device.read_config(0, &mut data[..4]);
567         assert_eq!(
568             u64::from(LittleEndian::read_u32(&data)),
569             ctx.cid & 0xffff_ffff
570         );
571         ctx.device.read_config(4, &mut data[4..]);
572         assert_eq!(
573             u64::from(LittleEndian::read_u32(&data[4..])),
574             (ctx.cid >> 32) & 0xffff_ffff
575         );
576 
577         // Test reading 64-bit.
578         let mut data = [0u8; 8];
579         ctx.device.read_config(0, &mut data);
580         assert_eq!(LittleEndian::read_u64(&data), ctx.cid);
581 
582         // Check that out-of-bounds reading doesn't mutate the destination buffer.
583         let mut data = [0u8, 1, 2, 3, 4, 5, 6, 7];
584         ctx.device.read_config(2, &mut data);
585         assert_eq!(data, [0u8, 1, 2, 3, 4, 5, 6, 7]);
586 
587         // Just covering lines here, since the vsock device has no writable config.
588         // A warning is, however, logged, if the guest driver attempts to write any config data.
589         ctx.device.write_config(0, &data[..4]);
590 
591         let memory = GuestMemoryAtomic::new(ctx.mem.clone());
592 
593         // Test a bad activation.
594         let bad_activate =
595             ctx.device
596                 .activate(memory.clone(), Arc::new(NoopVirtioInterrupt {}), Vec::new());
597         match bad_activate {
598             Err(ActivateError::BadActivate) => (),
599             other => panic!("{other:?}"),
600         }
601 
602         // Test a correct activation.
603         ctx.device
604             .activate(
605                 memory,
606                 Arc::new(NoopVirtioInterrupt {}),
607                 vec![
608                     (
609                         0,
610                         Queue::new(256).unwrap(),
611                         EventFd::new(EFD_NONBLOCK).unwrap(),
612                     ),
613                     (
614                         1,
615                         Queue::new(256).unwrap(),
616                         EventFd::new(EFD_NONBLOCK).unwrap(),
617                     ),
618                     (
619                         2,
620                         Queue::new(256).unwrap(),
621                         EventFd::new(EFD_NONBLOCK).unwrap(),
622                     ),
623                 ],
624             )
625             .unwrap();
626     }
627 
628     #[test]
629     fn test_irq() {
630         // Test case: successful IRQ signaling.
631         {
632             let test_ctx = TestContext::new();
633             let ctx = test_ctx.create_epoll_handler_context();
634 
635             let _queue: Queue = Queue::new(256).unwrap();
636             assert!(ctx.handler.signal_used_queue(0).is_ok());
637         }
638     }
639 
640     #[test]
641     fn test_txq_event() {
642         // Test case:
643         // - the driver has something to send (there's data in the TX queue); and
644         // - the backend has no pending RX data.
645         {
646             let test_ctx = TestContext::new();
647             let mut ctx = test_ctx.create_epoll_handler_context();
648 
649             ctx.handler.backend.write().unwrap().set_pending_rx(false);
650             ctx.signal_txq_event();
651 
652             // The available TX descriptor should have been used.
653             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
654             // The available RX descriptor should be untouched.
655             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
656         }
657 
658         // Test case:
659         // - the driver has something to send (there's data in the TX queue); and
660         // - the backend also has some pending RX data.
661         {
662             let test_ctx = TestContext::new();
663             let mut ctx = test_ctx.create_epoll_handler_context();
664 
665             ctx.handler.backend.write().unwrap().set_pending_rx(true);
666             ctx.signal_txq_event();
667 
668             // Both available RX and TX descriptors should have been used.
669             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
670             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
671         }
672 
673         // Test case:
674         // - the driver has something to send (there's data in the TX queue); and
675         // - the backend errors out and cannot process the TX queue.
676         {
677             let test_ctx = TestContext::new();
678             let mut ctx = test_ctx.create_epoll_handler_context();
679 
680             ctx.handler.backend.write().unwrap().set_pending_rx(false);
681             ctx.handler
682                 .backend
683                 .write()
684                 .unwrap()
685                 .set_tx_err(Some(VsockError::NoData));
686             ctx.signal_txq_event();
687 
688             // Both RX and TX queues should be untouched.
689             assert_eq!(ctx.guest_txvq.used.idx.get(), 0);
690             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
691         }
692 
693         // Test case:
694         // - the driver supplied a malformed TX buffer.
695         {
696             let test_ctx = TestContext::new();
697             let mut ctx = test_ctx.create_epoll_handler_context();
698 
699             // Invalidate the packet header descriptor, by setting its length to 0.
700             ctx.guest_txvq.dtable[0].len.set(0);
701             ctx.signal_txq_event();
702 
703             // The available descriptor should have been consumed, but no packet should have
704             // reached the backend.
705             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
706             assert_eq!(ctx.handler.backend.read().unwrap().tx_ok_cnt, 0);
707         }
708 
709         // Test case: spurious TXQ_EVENT.
710         {
711             let test_ctx = TestContext::new();
712             let mut ctx = test_ctx.create_epoll_handler_context();
713 
714             let events = epoll::Events::EPOLLIN;
715             let event = epoll::Event::new(events, TX_QUEUE_EVENT as u64);
716             let mut epoll_helper =
717                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
718 
719             assert!(
720                 ctx.handler.handle_event(&mut epoll_helper, &event).is_err(),
721                 "handle_event() should have failed"
722             );
723         }
724     }
725 
726     #[test]
727     fn test_rxq_event() {
728         // Test case:
729         // - there is pending RX data in the backend; and
730         // - the driver makes RX buffers available; and
731         // - the backend successfully places its RX data into the queue.
732         {
733             let test_ctx = TestContext::new();
734             let mut ctx = test_ctx.create_epoll_handler_context();
735 
736             ctx.handler.backend.write().unwrap().set_pending_rx(true);
737             ctx.handler
738                 .backend
739                 .write()
740                 .unwrap()
741                 .set_rx_err(Some(VsockError::NoData));
742             ctx.signal_rxq_event();
743 
744             // The available RX buffer should've been left untouched.
745             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
746         }
747 
748         // Test case:
749         // - there is pending RX data in the backend; and
750         // - the driver makes RX buffers available; and
751         // - the backend errors out, when attempting to receive data.
752         {
753             let test_ctx = TestContext::new();
754             let mut ctx = test_ctx.create_epoll_handler_context();
755 
756             ctx.handler.backend.write().unwrap().set_pending_rx(true);
757             ctx.signal_rxq_event();
758 
759             // The available RX buffer should have been used.
760             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
761         }
762 
763         // Test case: the driver provided a malformed RX descriptor chain.
764         {
765             let test_ctx = TestContext::new();
766             let mut ctx = test_ctx.create_epoll_handler_context();
767 
768             // Invalidate the packet header descriptor, by setting its length to 0.
769             ctx.guest_rxvq.dtable[0].len.set(0);
770 
771             // The chain should've been processed, without employing the backend.
772             assert!(ctx.handler.process_rx().is_ok());
773             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
774             assert_eq!(ctx.handler.backend.read().unwrap().rx_ok_cnt, 0);
775         }
776 
777         // Test case: spurious RXQ_EVENT.
778         {
779             let test_ctx = TestContext::new();
780             let mut ctx = test_ctx.create_epoll_handler_context();
781             ctx.handler.backend.write().unwrap().set_pending_rx(false);
782 
783             let events = epoll::Events::EPOLLIN;
784             let event = epoll::Event::new(events, RX_QUEUE_EVENT as u64);
785             let mut epoll_helper =
786                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
787 
788             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
789             assert!(
790                 ctx.handler.handle_event(&mut epoll_helper, &event).is_err(),
791                 "handle_event() should have failed"
792             );
793         }
794     }
795 
796     #[test]
797     fn test_evq_event() {
798         // Test case: spurious EVQ_EVENT.
799         {
800             let test_ctx = TestContext::new();
801             let mut ctx = test_ctx.create_epoll_handler_context();
802             ctx.handler.backend.write().unwrap().set_pending_rx(false);
803 
804             let events = epoll::Events::EPOLLIN;
805             let event = epoll::Event::new(events, EVT_QUEUE_EVENT as u64);
806             let mut epoll_helper =
807                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
808 
809             assert_eq!(ctx.guest_evvq.used.idx.get(), 0);
810             assert!(
811                 ctx.handler.handle_event(&mut epoll_helper, &event).is_err(),
812                 "handle_event() should have failed"
813             );
814         }
815     }
816 
817     #[test]
818     fn test_backend_event() {
819         // Test case:
820         // - a backend event is received; and
821         // - the backend has pending RX data.
822         {
823             let test_ctx = TestContext::new();
824             let mut ctx = test_ctx.create_epoll_handler_context();
825 
826             ctx.handler.backend.write().unwrap().set_pending_rx(true);
827 
828             let events = epoll::Events::EPOLLIN;
829             let event = epoll::Event::new(events, BACKEND_EVENT as u64);
830             let mut epoll_helper =
831                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
832             assert!(ctx.handler.handle_event(&mut epoll_helper, &event).is_ok());
833 
834             // The backend should've received this event.
835             assert_eq!(
836                 ctx.handler.backend.read().unwrap().evset,
837                 Some(epoll::Events::EPOLLIN)
838             );
839             // TX queue processing should've been triggered.
840             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
841             // RX queue processing should've been triggered.
842             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
843         }
844 
845         // Test case:
846         // - a backend event is received; and
847         // - the backend doesn't have any pending RX data.
848         {
849             let test_ctx = TestContext::new();
850             let mut ctx = test_ctx.create_epoll_handler_context();
851 
852             ctx.handler.backend.write().unwrap().set_pending_rx(false);
853 
854             let events = epoll::Events::EPOLLIN;
855             let event = epoll::Event::new(events, BACKEND_EVENT as u64);
856             let mut epoll_helper =
857                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
858             assert!(ctx.handler.handle_event(&mut epoll_helper, &event).is_ok());
859 
860             // The backend should've received this event.
861             assert_eq!(
862                 ctx.handler.backend.read().unwrap().evset,
863                 Some(epoll::Events::EPOLLIN)
864             );
865             // TX queue processing should've been triggered.
866             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
867             // The RX queue should've been left untouched.
868             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
869         }
870     }
871 
872     #[test]
873     fn test_unknown_event() {
874         let test_ctx = TestContext::new();
875         let mut ctx = test_ctx.create_epoll_handler_context();
876 
877         let events = epoll::Events::EPOLLIN;
878         let event = epoll::Event::new(events, 0xff);
879         let mut epoll_helper =
880             EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
881 
882         assert!(
883             ctx.handler.handle_event(&mut epoll_helper, &event).is_err(),
884             "handle_event() should have failed"
885         );
886     }
887 }
888