xref: /cloud-hypervisor/virtio-devices/src/vsock/device.rs (revision d10f20eb718023742143fa847a37f3d6114ead52)
1 // Copyright 2019 Intel Corporation. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 // SPDX-License-Identifier: Apache-2.0
6 //
7 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
8 // Use of this source code is governed by a BSD-style license that can be
9 // found in the THIRD-PARTY file.
10 
11 /// This is the `VirtioDevice` implementation for our vsock device. It handles the virtio-level
12 /// device logic: feature negotiation, device configuration, and device activation.
13 /// The run-time device logic (i.e. event-driven data handling) is implemented by
14 /// `super::epoll_handler::EpollHandler`.
15 ///
16 /// We aim to conform to the VirtIO v1.1 spec:
17 /// https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.html
18 ///
19 /// The vsock device has two input parameters: a CID to identify the device, and a `VsockBackend`
20 /// to use for offloading vsock traffic.
21 ///
22 /// Upon its activation, the vsock device creates its `EpollHandler`, passes it the event-interested
23 /// file descriptors, and registers these descriptors with the VMM `EpollContext`. Going forward,
24 /// the `EpollHandler` will get notified whenever an event occurs on the just-registered FDs:
25 /// - an RX queue FD;
26 /// - a TX queue FD;
27 /// - an event queue FD; and
28 /// - a backend FD.
29 ///
30 use super::{VsockBackend, VsockPacket};
31 use crate::seccomp_filters::Thread;
32 use crate::Error as DeviceError;
33 use crate::GuestMemoryMmap;
34 use crate::VirtioInterrupt;
35 use crate::{
36     thread_helper::spawn_virtio_thread, ActivateResult, EpollHelper, EpollHelperError,
37     EpollHelperHandler, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType,
38     EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IN_ORDER, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_VERSION_1,
39 };
40 use anyhow::anyhow;
41 use byteorder::{ByteOrder, LittleEndian};
42 use seccompiler::SeccompAction;
43 use serde::{Deserialize, Serialize};
44 use std::io;
45 use std::os::unix::io::AsRawFd;
46 use std::path::PathBuf;
47 use std::result;
48 use std::sync::atomic::AtomicBool;
49 use std::sync::{Arc, Barrier, RwLock};
50 use virtio_queue::Queue;
51 use virtio_queue::QueueOwnedT;
52 use virtio_queue::QueueT;
53 use vm_memory::GuestAddressSpace;
54 use vm_memory::GuestMemoryAtomic;
55 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
56 use vm_virtio::AccessPlatform;
57 use vmm_sys_util::eventfd::EventFd;
58 
59 const QUEUE_SIZE: u16 = 256;
60 const NUM_QUEUES: usize = 3;
61 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
62 
63 // New descriptors are pending on the rx queue.
64 pub const RX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
65 // New descriptors are pending on the tx queue.
66 pub const TX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2;
67 // New descriptors are pending on the event queue.
68 pub const EVT_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 3;
69 // Notification coming from the backend.
70 pub const BACKEND_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 4;
71 
72 /// The `VsockEpollHandler` implements the runtime logic of our vsock device:
73 /// 1. Respond to TX queue events by wrapping virtio buffers into `VsockPacket`s, then sending those
74 ///    packets to the `VsockBackend`;
75 /// 2. Forward backend FD event notifications to the `VsockBackend`;
76 /// 3. Fetch incoming packets from the `VsockBackend` and place them into the virtio RX queue;
77 /// 4. Whenever we have processed some virtio buffers (either TX or RX), let the driver know by
78 ///    raising our assigned IRQ.
79 ///
80 /// In a nutshell, the `VsockEpollHandler` logic looks like this:
81 /// - on TX queue event:
82 ///   - fetch all packets from the TX queue and send them to the backend; then
83 ///   - if the backend has queued up any incoming packets, fetch them into any available RX buffers.
84 /// - on RX queue event:
85 ///   - fetch any incoming packets, queued up by the backend, into newly available RX buffers.
86 /// - on backend event:
87 ///   - forward the event to the backend; then
88 ///   - again, attempt to fetch any incoming packets queued by the backend into virtio RX buffers.
89 ///
90 pub struct VsockEpollHandler<B: VsockBackend> {
91     pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
92     pub queues: Vec<Queue>,
93     pub queue_evts: Vec<EventFd>,
94     pub kill_evt: EventFd,
95     pub pause_evt: EventFd,
96     pub interrupt_cb: Arc<dyn VirtioInterrupt>,
97     pub backend: Arc<RwLock<B>>,
98     pub access_platform: Option<Arc<dyn AccessPlatform>>,
99 }
100 
101 impl<B> VsockEpollHandler<B>
102 where
103     B: VsockBackend,
104 {
105     /// Signal the guest driver that we've used some virtio buffers that it had previously made
106     /// available.
107     ///
108     fn signal_used_queue(&self, queue_index: u16) -> result::Result<(), DeviceError> {
109         debug!("vsock: raising IRQ");
110 
111         self.interrupt_cb
112             .trigger(VirtioInterruptType::Queue(queue_index))
113             .map_err(|e| {
114                 error!("Failed to signal used queue: {:?}", e);
115                 DeviceError::FailedSignalingUsedQueue(e)
116             })
117     }
118 
119     /// Walk the driver-provided RX queue buffers and attempt to fill them up with any data that we
120     /// have pending.
121     ///
122     fn process_rx(&mut self) -> result::Result<(), DeviceError> {
123         debug!("vsock: epoll_handler::process_rx()");
124 
125         let mut used_descs = false;
126 
127         while let Some(mut desc_chain) = self.queues[0].pop_descriptor_chain(self.mem.memory()) {
128             let used_len = match VsockPacket::from_rx_virtq_head(
129                 &mut desc_chain,
130                 self.access_platform.as_ref(),
131             ) {
132                 Ok(mut pkt) => {
133                     if self.backend.write().unwrap().recv_pkt(&mut pkt).is_ok() {
134                         pkt.hdr().len() as u32 + pkt.len()
135                     } else {
136                         // We are using a consuming iterator over the virtio buffers, so, if we can't
137                         // fill in this buffer, we'll need to undo the last iterator step.
138                         self.queues[0].go_to_previous_position();
139                         break;
140                     }
141                 }
142                 Err(e) => {
143                     warn!("vsock: RX queue error: {:?}", e);
144                     0
145                 }
146             };
147 
148             self.queues[0]
149                 .add_used(desc_chain.memory(), desc_chain.head_index(), used_len)
150                 .map_err(DeviceError::QueueAddUsed)?;
151             used_descs = true;
152         }
153 
154         if used_descs {
155             self.signal_used_queue(0)
156         } else {
157             Ok(())
158         }
159     }
160 
161     /// Walk the driver-provided TX queue buffers, package them up as vsock packets, and send them to
162     /// the backend for processing.
163     ///
164     fn process_tx(&mut self) -> result::Result<(), DeviceError> {
165         debug!("vsock: epoll_handler::process_tx()");
166 
167         let mut used_descs = false;
168 
169         while let Some(mut desc_chain) = self.queues[1].pop_descriptor_chain(self.mem.memory()) {
170             let pkt = match VsockPacket::from_tx_virtq_head(
171                 &mut desc_chain,
172                 self.access_platform.as_ref(),
173             ) {
174                 Ok(pkt) => pkt,
175                 Err(e) => {
176                     error!("vsock: error reading TX packet: {:?}", e);
177                     self.queues[1]
178                         .add_used(desc_chain.memory(), desc_chain.head_index(), 0)
179                         .map_err(DeviceError::QueueAddUsed)?;
180                     used_descs = true;
181                     continue;
182                 }
183             };
184 
185             if self.backend.write().unwrap().send_pkt(&pkt).is_err() {
186                 self.queues[1].go_to_previous_position();
187                 break;
188             }
189 
190             self.queues[1]
191                 .add_used(desc_chain.memory(), desc_chain.head_index(), 0)
192                 .map_err(DeviceError::QueueAddUsed)?;
193             used_descs = true;
194         }
195 
196         if used_descs {
197             self.signal_used_queue(1)
198         } else {
199             Ok(())
200         }
201     }
202 
203     fn run(
204         &mut self,
205         paused: Arc<AtomicBool>,
206         paused_sync: Arc<Barrier>,
207     ) -> result::Result<(), EpollHelperError> {
208         let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
209         helper.add_event(self.queue_evts[0].as_raw_fd(), RX_QUEUE_EVENT)?;
210         helper.add_event(self.queue_evts[1].as_raw_fd(), TX_QUEUE_EVENT)?;
211         helper.add_event(self.queue_evts[2].as_raw_fd(), EVT_QUEUE_EVENT)?;
212         helper.add_event(self.backend.read().unwrap().get_polled_fd(), BACKEND_EVENT)?;
213         helper.run(paused, paused_sync, self)?;
214 
215         Ok(())
216     }
217 }
218 
219 impl<B> EpollHelperHandler for VsockEpollHandler<B>
220 where
221     B: VsockBackend,
222 {
223     fn handle_event(
224         &mut self,
225         _helper: &mut EpollHelper,
226         event: &epoll::Event,
227     ) -> result::Result<(), EpollHelperError> {
228         let evset = match epoll::Events::from_bits(event.events) {
229             Some(evset) => evset,
230             None => {
231                 let evbits = event.events;
232                 warn!("epoll: ignoring unknown event set: 0x{:x}", evbits);
233                 return Ok(());
234             }
235         };
236 
237         let ev_type = event.data as u16;
238         match ev_type {
239             RX_QUEUE_EVENT => {
240                 debug!("vsock: RX queue event");
241                 self.queue_evts[0].read().map_err(|e| {
242                     EpollHelperError::HandleEvent(anyhow!("Failed to get RX queue event: {:?}", e))
243                 })?;
244                 if self.backend.read().unwrap().has_pending_rx() {
245                     self.process_rx().map_err(|e| {
246                         EpollHelperError::HandleEvent(anyhow!(
247                             "Failed to process RX queue: {:?}",
248                             e
249                         ))
250                     })?;
251                 }
252             }
253             TX_QUEUE_EVENT => {
254                 debug!("vsock: TX queue event");
255                 self.queue_evts[1].read().map_err(|e| {
256                     EpollHelperError::HandleEvent(anyhow!("Failed to get TX queue event: {:?}", e))
257                 })?;
258 
259                 self.process_tx().map_err(|e| {
260                     EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {:?}", e))
261                 })?;
262 
263                 // The backend may have queued up responses to the packets we sent during TX queue
264                 // processing. If that happened, we need to fetch those responses and place them
265                 // into RX buffers.
266                 if self.backend.read().unwrap().has_pending_rx() {
267                     self.process_rx().map_err(|e| {
268                         EpollHelperError::HandleEvent(anyhow!(
269                             "Failed to process RX queue: {:?}",
270                             e
271                         ))
272                     })?;
273                 }
274             }
275             EVT_QUEUE_EVENT => {
276                 debug!("vsock: EVT queue event");
277                 self.queue_evts[2].read().map_err(|e| {
278                     EpollHelperError::HandleEvent(anyhow!("Failed to get EVT queue event: {:?}", e))
279                 })?;
280             }
281             BACKEND_EVENT => {
282                 debug!("vsock: backend event");
283                 self.backend.write().unwrap().notify(evset);
284                 // After the backend has been kicked, it might've freed up some resources, so we
285                 // can attempt to send it more data to process.
286                 // In particular, if `self.backend.send_pkt()` halted the TX queue processing (by
287                 // returning an error) at some point in the past, now is the time to try walking the
288                 // TX queue again.
289                 self.process_tx().map_err(|e| {
290                     EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {:?}", e))
291                 })?;
292                 if self.backend.read().unwrap().has_pending_rx() {
293                     self.process_rx().map_err(|e| {
294                         EpollHelperError::HandleEvent(anyhow!(
295                             "Failed to process RX queue: {:?}",
296                             e
297                         ))
298                     })?;
299                 }
300             }
301             _ => {
302                 return Err(EpollHelperError::HandleEvent(anyhow!(
303                     "Unknown event for virtio-vsock"
304                 )));
305             }
306         }
307 
308         Ok(())
309     }
310 }
311 
312 /// Virtio device exposing virtual socket to the guest.
313 pub struct Vsock<B: VsockBackend> {
314     common: VirtioCommon,
315     id: String,
316     cid: u64,
317     backend: Arc<RwLock<B>>,
318     path: PathBuf,
319     seccomp_action: SeccompAction,
320     exit_evt: EventFd,
321 }
322 
323 #[derive(Serialize, Deserialize)]
324 pub struct VsockState {
325     pub avail_features: u64,
326     pub acked_features: u64,
327 }
328 
329 impl<B> Vsock<B>
330 where
331     B: VsockBackend + Sync,
332 {
333     /// Create a new virtio-vsock device with the given VM CID and vsock
334     /// backend.
335     #[allow(clippy::too_many_arguments)]
336     pub fn new(
337         id: String,
338         cid: u32,
339         path: PathBuf,
340         backend: B,
341         iommu: bool,
342         seccomp_action: SeccompAction,
343         exit_evt: EventFd,
344         state: Option<VsockState>,
345     ) -> io::Result<Vsock<B>> {
346         let (avail_features, acked_features, paused) = if let Some(state) = state {
347             info!("Restoring virtio-vsock {}", id);
348             (state.avail_features, state.acked_features, true)
349         } else {
350             let mut avail_features = 1u64 << VIRTIO_F_VERSION_1 | 1u64 << VIRTIO_F_IN_ORDER;
351 
352             if iommu {
353                 avail_features |= 1u64 << VIRTIO_F_IOMMU_PLATFORM;
354             }
355             (avail_features, 0, false)
356         };
357 
358         Ok(Vsock {
359             common: VirtioCommon {
360                 device_type: VirtioDeviceType::Vsock as u32,
361                 avail_features,
362                 acked_features,
363                 paused_sync: Some(Arc::new(Barrier::new(2))),
364                 queue_sizes: QUEUE_SIZES.to_vec(),
365                 min_queues: NUM_QUEUES as u16,
366                 paused: Arc::new(AtomicBool::new(paused)),
367                 ..Default::default()
368             },
369             id,
370             cid: cid.into(),
371             backend: Arc::new(RwLock::new(backend)),
372             path,
373             seccomp_action,
374             exit_evt,
375         })
376     }
377 
378     fn state(&self) -> VsockState {
379         VsockState {
380             avail_features: self.common.avail_features,
381             acked_features: self.common.acked_features,
382         }
383     }
384 }
385 
386 impl<B> Drop for Vsock<B>
387 where
388     B: VsockBackend,
389 {
390     fn drop(&mut self) {
391         if let Some(kill_evt) = self.common.kill_evt.take() {
392             // Ignore the result because there is nothing we can do about it.
393             let _ = kill_evt.write(1);
394         }
395         self.common.wait_for_epoll_threads();
396     }
397 }
398 
399 impl<B> VirtioDevice for Vsock<B>
400 where
401     B: VsockBackend + Sync + 'static,
402 {
403     fn device_type(&self) -> u32 {
404         self.common.device_type
405     }
406 
407     fn queue_max_sizes(&self) -> &[u16] {
408         &self.common.queue_sizes
409     }
410 
411     fn features(&self) -> u64 {
412         self.common.avail_features
413     }
414 
415     fn ack_features(&mut self, value: u64) {
416         self.common.ack_features(value)
417     }
418 
419     fn read_config(&self, offset: u64, data: &mut [u8]) {
420         match offset {
421             0 if data.len() == 8 => LittleEndian::write_u64(data, self.cid),
422             0 if data.len() == 4 => LittleEndian::write_u32(data, (self.cid & 0xffff_ffff) as u32),
423             4 if data.len() == 4 => {
424                 LittleEndian::write_u32(data, ((self.cid >> 32) & 0xffff_ffff) as u32)
425             }
426             _ => warn!(
427                 "vsock: virtio-vsock received invalid read request of {} bytes at offset {}",
428                 data.len(),
429                 offset
430             ),
431         }
432     }
433 
434     fn activate(
435         &mut self,
436         mem: GuestMemoryAtomic<GuestMemoryMmap>,
437         interrupt_cb: Arc<dyn VirtioInterrupt>,
438         queues: Vec<(usize, Queue, EventFd)>,
439     ) -> ActivateResult {
440         self.common.activate(&queues, &interrupt_cb)?;
441         let (kill_evt, pause_evt) = self.common.dup_eventfds();
442 
443         let mut virtqueues = Vec::new();
444         let mut queue_evts = Vec::new();
445         for (_, queue, queue_evt) in queues {
446             virtqueues.push(queue);
447             queue_evts.push(queue_evt);
448         }
449 
450         let mut handler = VsockEpollHandler {
451             mem,
452             queues: virtqueues,
453             queue_evts,
454             kill_evt,
455             pause_evt,
456             interrupt_cb,
457             backend: self.backend.clone(),
458             access_platform: self.common.access_platform.clone(),
459         };
460 
461         let paused = self.common.paused.clone();
462         let paused_sync = self.common.paused_sync.clone();
463         let mut epoll_threads = Vec::new();
464 
465         spawn_virtio_thread(
466             &self.id,
467             &self.seccomp_action,
468             Thread::VirtioVsock,
469             &mut epoll_threads,
470             &self.exit_evt,
471             move || handler.run(paused, paused_sync.unwrap()),
472         )?;
473 
474         self.common.epoll_threads = Some(epoll_threads);
475 
476         event!("virtio-device", "activated", "id", &self.id);
477         Ok(())
478     }
479 
480     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
481         let result = self.common.reset();
482         event!("virtio-device", "reset", "id", &self.id);
483         result
484     }
485 
486     fn shutdown(&mut self) {
487         std::fs::remove_file(&self.path).ok();
488     }
489 
490     fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) {
491         self.common.set_access_platform(access_platform)
492     }
493 }
494 
495 impl<B> Pausable for Vsock<B>
496 where
497     B: VsockBackend + Sync + 'static,
498 {
499     fn pause(&mut self) -> result::Result<(), MigratableError> {
500         self.common.pause()
501     }
502 
503     fn resume(&mut self) -> result::Result<(), MigratableError> {
504         self.common.resume()
505     }
506 }
507 
508 impl<B> Snapshottable for Vsock<B>
509 where
510     B: VsockBackend + Sync + 'static,
511 {
512     fn id(&self) -> String {
513         self.id.clone()
514     }
515 
516     fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
517         Snapshot::new_from_state(&self.state())
518     }
519 }
520 impl<B> Transportable for Vsock<B> where B: VsockBackend + Sync + 'static {}
521 impl<B> Migratable for Vsock<B> where B: VsockBackend + Sync + 'static {}
522 
523 #[cfg(test)]
524 mod tests {
525     use super::super::tests::{NoopVirtioInterrupt, TestContext};
526     use super::super::*;
527     use super::*;
528     use crate::ActivateError;
529     use libc::EFD_NONBLOCK;
530 
531     #[test]
532     fn test_virtio_device() {
533         let mut ctx = TestContext::new();
534         let avail_features = 1u64 << VIRTIO_F_VERSION_1 | 1u64 << VIRTIO_F_IN_ORDER;
535         let device_features = avail_features;
536         let driver_features: u64 = avail_features | 1 | (1 << 32);
537         let device_pages = [
538             (device_features & 0xffff_ffff) as u32,
539             (device_features >> 32) as u32,
540         ];
541         let driver_pages = [
542             (driver_features & 0xffff_ffff) as u32,
543             (driver_features >> 32) as u32,
544         ];
545         assert_eq!(ctx.device.device_type(), VirtioDeviceType::Vsock as u32);
546         assert_eq!(ctx.device.queue_max_sizes(), QUEUE_SIZES);
547         assert_eq!(ctx.device.features() as u32, device_pages[0]);
548         assert_eq!((ctx.device.features() >> 32) as u32, device_pages[1]);
549 
550         // Ack device features, page 0.
551         ctx.device.ack_features(u64::from(driver_pages[0]));
552         // Ack device features, page 1.
553         ctx.device.ack_features(u64::from(driver_pages[1]) << 32);
554         // Check that no side effect are present, and that the acked features are exactly the same
555         // as the device features.
556         assert_eq!(
557             ctx.device.common.acked_features,
558             device_features & driver_features
559         );
560 
561         // Test reading 32-bit chunks.
562         let mut data = [0u8; 8];
563         ctx.device.read_config(0, &mut data[..4]);
564         assert_eq!(
565             u64::from(LittleEndian::read_u32(&data)),
566             ctx.cid & 0xffff_ffff
567         );
568         ctx.device.read_config(4, &mut data[4..]);
569         assert_eq!(
570             u64::from(LittleEndian::read_u32(&data[4..])),
571             (ctx.cid >> 32) & 0xffff_ffff
572         );
573 
574         // Test reading 64-bit.
575         let mut data = [0u8; 8];
576         ctx.device.read_config(0, &mut data);
577         assert_eq!(LittleEndian::read_u64(&data), ctx.cid);
578 
579         // Check that out-of-bounds reading doesn't mutate the destination buffer.
580         let mut data = [0u8, 1, 2, 3, 4, 5, 6, 7];
581         ctx.device.read_config(2, &mut data);
582         assert_eq!(data, [0u8, 1, 2, 3, 4, 5, 6, 7]);
583 
584         // Just covering lines here, since the vsock device has no writable config.
585         // A warning is, however, logged, if the guest driver attempts to write any config data.
586         ctx.device.write_config(0, &data[..4]);
587 
588         let memory = GuestMemoryAtomic::new(ctx.mem.clone());
589 
590         // Test a bad activation.
591         let bad_activate =
592             ctx.device
593                 .activate(memory.clone(), Arc::new(NoopVirtioInterrupt {}), Vec::new());
594         match bad_activate {
595             Err(ActivateError::BadActivate) => (),
596             other => panic!("{other:?}"),
597         }
598 
599         // Test a correct activation.
600         ctx.device
601             .activate(
602                 memory,
603                 Arc::new(NoopVirtioInterrupt {}),
604                 vec![
605                     (
606                         0,
607                         Queue::new(256).unwrap(),
608                         EventFd::new(EFD_NONBLOCK).unwrap(),
609                     ),
610                     (
611                         1,
612                         Queue::new(256).unwrap(),
613                         EventFd::new(EFD_NONBLOCK).unwrap(),
614                     ),
615                     (
616                         2,
617                         Queue::new(256).unwrap(),
618                         EventFd::new(EFD_NONBLOCK).unwrap(),
619                     ),
620                 ],
621             )
622             .unwrap();
623     }
624 
625     #[test]
626     fn test_irq() {
627         // Test case: successful IRQ signaling.
628         {
629             let test_ctx = TestContext::new();
630             let ctx = test_ctx.create_epoll_handler_context();
631 
632             let _queue: Queue = Queue::new(256).unwrap();
633             assert!(ctx.handler.signal_used_queue(0).is_ok());
634         }
635     }
636 
637     #[test]
638     fn test_txq_event() {
639         // Test case:
640         // - the driver has something to send (there's data in the TX queue); and
641         // - the backend has no pending RX data.
642         {
643             let test_ctx = TestContext::new();
644             let mut ctx = test_ctx.create_epoll_handler_context();
645 
646             ctx.handler.backend.write().unwrap().set_pending_rx(false);
647             ctx.signal_txq_event();
648 
649             // The available TX descriptor should have been used.
650             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
651             // The available RX descriptor should be untouched.
652             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
653         }
654 
655         // Test case:
656         // - the driver has something to send (there's data in the TX queue); and
657         // - the backend also has some pending RX data.
658         {
659             let test_ctx = TestContext::new();
660             let mut ctx = test_ctx.create_epoll_handler_context();
661 
662             ctx.handler.backend.write().unwrap().set_pending_rx(true);
663             ctx.signal_txq_event();
664 
665             // Both available RX and TX descriptors should have been used.
666             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
667             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
668         }
669 
670         // Test case:
671         // - the driver has something to send (there's data in the TX queue); and
672         // - the backend errors out and cannot process the TX queue.
673         {
674             let test_ctx = TestContext::new();
675             let mut ctx = test_ctx.create_epoll_handler_context();
676 
677             ctx.handler.backend.write().unwrap().set_pending_rx(false);
678             ctx.handler
679                 .backend
680                 .write()
681                 .unwrap()
682                 .set_tx_err(Some(VsockError::NoData));
683             ctx.signal_txq_event();
684 
685             // Both RX and TX queues should be untouched.
686             assert_eq!(ctx.guest_txvq.used.idx.get(), 0);
687             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
688         }
689 
690         // Test case:
691         // - the driver supplied a malformed TX buffer.
692         {
693             let test_ctx = TestContext::new();
694             let mut ctx = test_ctx.create_epoll_handler_context();
695 
696             // Invalidate the packet header descriptor, by setting its length to 0.
697             ctx.guest_txvq.dtable[0].len.set(0);
698             ctx.signal_txq_event();
699 
700             // The available descriptor should have been consumed, but no packet should have
701             // reached the backend.
702             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
703             assert_eq!(ctx.handler.backend.read().unwrap().tx_ok_cnt, 0);
704         }
705 
706         // Test case: spurious TXQ_EVENT.
707         {
708             let test_ctx = TestContext::new();
709             let mut ctx = test_ctx.create_epoll_handler_context();
710 
711             let events = epoll::Events::EPOLLIN;
712             let event = epoll::Event::new(events, TX_QUEUE_EVENT as u64);
713             let mut epoll_helper =
714                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
715 
716             assert!(
717                 ctx.handler.handle_event(&mut epoll_helper, &event).is_err(),
718                 "handle_event() should have failed"
719             );
720         }
721     }
722 
723     #[test]
724     fn test_rxq_event() {
725         // Test case:
726         // - there is pending RX data in the backend; and
727         // - the driver makes RX buffers available; and
728         // - the backend successfully places its RX data into the queue.
729         {
730             let test_ctx = TestContext::new();
731             let mut ctx = test_ctx.create_epoll_handler_context();
732 
733             ctx.handler.backend.write().unwrap().set_pending_rx(true);
734             ctx.handler
735                 .backend
736                 .write()
737                 .unwrap()
738                 .set_rx_err(Some(VsockError::NoData));
739             ctx.signal_rxq_event();
740 
741             // The available RX buffer should've been left untouched.
742             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
743         }
744 
745         // Test case:
746         // - there is pending RX data in the backend; and
747         // - the driver makes RX buffers available; and
748         // - the backend errors out, when attempting to receive data.
749         {
750             let test_ctx = TestContext::new();
751             let mut ctx = test_ctx.create_epoll_handler_context();
752 
753             ctx.handler.backend.write().unwrap().set_pending_rx(true);
754             ctx.signal_rxq_event();
755 
756             // The available RX buffer should have been used.
757             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
758         }
759 
760         // Test case: the driver provided a malformed RX descriptor chain.
761         {
762             let test_ctx = TestContext::new();
763             let mut ctx = test_ctx.create_epoll_handler_context();
764 
765             // Invalidate the packet header descriptor, by setting its length to 0.
766             ctx.guest_rxvq.dtable[0].len.set(0);
767 
768             // The chain should've been processed, without employing the backend.
769             assert!(ctx.handler.process_rx().is_ok());
770             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
771             assert_eq!(ctx.handler.backend.read().unwrap().rx_ok_cnt, 0);
772         }
773 
774         // Test case: spurious RXQ_EVENT.
775         {
776             let test_ctx = TestContext::new();
777             let mut ctx = test_ctx.create_epoll_handler_context();
778             ctx.handler.backend.write().unwrap().set_pending_rx(false);
779 
780             let events = epoll::Events::EPOLLIN;
781             let event = epoll::Event::new(events, RX_QUEUE_EVENT as u64);
782             let mut epoll_helper =
783                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
784 
785             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
786             assert!(
787                 ctx.handler.handle_event(&mut epoll_helper, &event).is_err(),
788                 "handle_event() should have failed"
789             );
790         }
791     }
792 
793     #[test]
794     fn test_evq_event() {
795         // Test case: spurious EVQ_EVENT.
796         {
797             let test_ctx = TestContext::new();
798             let mut ctx = test_ctx.create_epoll_handler_context();
799             ctx.handler.backend.write().unwrap().set_pending_rx(false);
800 
801             let events = epoll::Events::EPOLLIN;
802             let event = epoll::Event::new(events, EVT_QUEUE_EVENT as u64);
803             let mut epoll_helper =
804                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
805 
806             assert_eq!(ctx.guest_evvq.used.idx.get(), 0);
807             assert!(
808                 ctx.handler.handle_event(&mut epoll_helper, &event).is_err(),
809                 "handle_event() should have failed"
810             );
811         }
812     }
813 
814     #[test]
815     fn test_backend_event() {
816         // Test case:
817         // - a backend event is received; and
818         // - the backend has pending RX data.
819         {
820             let test_ctx = TestContext::new();
821             let mut ctx = test_ctx.create_epoll_handler_context();
822 
823             ctx.handler.backend.write().unwrap().set_pending_rx(true);
824 
825             let events = epoll::Events::EPOLLIN;
826             let event = epoll::Event::new(events, BACKEND_EVENT as u64);
827             let mut epoll_helper =
828                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
829             assert!(ctx.handler.handle_event(&mut epoll_helper, &event).is_ok());
830 
831             // The backend should've received this event.
832             assert_eq!(
833                 ctx.handler.backend.read().unwrap().evset,
834                 Some(epoll::Events::EPOLLIN)
835             );
836             // TX queue processing should've been triggered.
837             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
838             // RX queue processing should've been triggered.
839             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
840         }
841 
842         // Test case:
843         // - a backend event is received; and
844         // - the backend doesn't have any pending RX data.
845         {
846             let test_ctx = TestContext::new();
847             let mut ctx = test_ctx.create_epoll_handler_context();
848 
849             ctx.handler.backend.write().unwrap().set_pending_rx(false);
850 
851             let events = epoll::Events::EPOLLIN;
852             let event = epoll::Event::new(events, BACKEND_EVENT as u64);
853             let mut epoll_helper =
854                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
855             assert!(ctx.handler.handle_event(&mut epoll_helper, &event).is_ok());
856 
857             // The backend should've received this event.
858             assert_eq!(
859                 ctx.handler.backend.read().unwrap().evset,
860                 Some(epoll::Events::EPOLLIN)
861             );
862             // TX queue processing should've been triggered.
863             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
864             // The RX queue should've been left untouched.
865             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
866         }
867     }
868 
869     #[test]
870     fn test_unknown_event() {
871         let test_ctx = TestContext::new();
872         let mut ctx = test_ctx.create_epoll_handler_context();
873 
874         let events = epoll::Events::EPOLLIN;
875         let event = epoll::Event::new(events, 0xff);
876         let mut epoll_helper =
877             EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
878 
879         assert!(
880             ctx.handler.handle_event(&mut epoll_helper, &event).is_err(),
881             "handle_event() should have failed"
882         );
883     }
884 }
885