xref: /cloud-hypervisor/virtio-devices/src/vsock/device.rs (revision 7d7bfb2034001d4cb15df2ddc56d2d350c8da30f)
1 // Copyright 2019 Intel Corporation. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 // SPDX-License-Identifier: Apache-2.0
6 //
7 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
8 // Use of this source code is governed by a BSD-style license that can be
9 // found in the THIRD-PARTY file.
10 
11 /// This is the `VirtioDevice` implementation for our vsock device. It handles the virtio-level
12 /// device logic: feature negotiation, device configuration, and device activation.
13 /// The run-time device logic (i.e. event-driven data handling) is implemented by
14 /// `super::epoll_handler::EpollHandler`.
15 ///
16 /// We aim to conform to the VirtIO v1.1 spec:
17 /// https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.html
18 ///
19 /// The vsock device has two input parameters: a CID to identify the device, and a `VsockBackend`
20 /// to use for offloading vsock traffic.
21 ///
22 /// Upon its activation, the vsock device creates its `EpollHandler`, passes it the event-interested
23 /// file descriptors, and registers these descriptors with the VMM `EpollContext`. Going forward,
24 /// the `EpollHandler` will get notified whenever an event occurs on the just-registered FDs:
25 /// - an RX queue FD;
26 /// - a TX queue FD;
27 /// - an event queue FD; and
28 /// - a backend FD.
29 ///
30 use super::{VsockBackend, VsockPacket};
31 use crate::seccomp_filters::Thread;
32 use crate::Error as DeviceError;
33 use crate::GuestMemoryMmap;
34 use crate::VirtioInterrupt;
35 use crate::{
36     thread_helper::spawn_virtio_thread, ActivateResult, EpollHelper, EpollHelperError,
37     EpollHelperHandler, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType,
38     EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IN_ORDER, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_VERSION_1,
39 };
40 use byteorder::{ByteOrder, LittleEndian};
41 use seccompiler::SeccompAction;
42 use std::io;
43 use std::os::unix::io::AsRawFd;
44 use std::path::PathBuf;
45 use std::result;
46 use std::sync::atomic::AtomicBool;
47 use std::sync::{Arc, Barrier, RwLock};
48 use versionize::{VersionMap, Versionize, VersionizeResult};
49 use versionize_derive::Versionize;
50 use virtio_queue::Queue;
51 use vm_memory::GuestMemoryAtomic;
52 use vm_migration::{
53     Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
54 };
55 use vm_virtio::AccessPlatform;
56 use vmm_sys_util::eventfd::EventFd;
57 
58 const QUEUE_SIZE: u16 = 256;
59 const NUM_QUEUES: usize = 3;
60 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
61 
62 // New descriptors are pending on the rx queue.
63 pub const RX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
64 // New descriptors are pending on the tx queue.
65 pub const TX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2;
66 // New descriptors are pending on the event queue.
67 pub const EVT_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 3;
68 // Notification coming from the backend.
69 pub const BACKEND_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 4;
70 
71 /// The `VsockEpollHandler` implements the runtime logic of our vsock device:
72 /// 1. Respond to TX queue events by wrapping virtio buffers into `VsockPacket`s, then sending those
73 ///    packets to the `VsockBackend`;
74 /// 2. Forward backend FD event notifications to the `VsockBackend`;
75 /// 3. Fetch incoming packets from the `VsockBackend` and place them into the virtio RX queue;
76 /// 4. Whenever we have processed some virtio buffers (either TX or RX), let the driver know by
77 ///    raising our assigned IRQ.
78 ///
79 /// In a nutshell, the `VsockEpollHandler` logic looks like this:
80 /// - on TX queue event:
81 ///   - fetch all packets from the TX queue and send them to the backend; then
82 ///   - if the backend has queued up any incoming packets, fetch them into any available RX buffers.
83 /// - on RX queue event:
84 ///   - fetch any incoming packets, queued up by the backend, into newly available RX buffers.
85 /// - on backend event:
86 ///   - forward the event to the backend; then
87 ///   - again, attempt to fetch any incoming packets queued by the backend into virtio RX buffers.
88 ///
89 pub struct VsockEpollHandler<B: VsockBackend> {
90     pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
91     pub queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
92     pub queue_evts: Vec<EventFd>,
93     pub kill_evt: EventFd,
94     pub pause_evt: EventFd,
95     pub interrupt_cb: Arc<dyn VirtioInterrupt>,
96     pub backend: Arc<RwLock<B>>,
97     pub access_platform: Option<Arc<dyn AccessPlatform>>,
98 }
99 
100 impl<B> VsockEpollHandler<B>
101 where
102     B: VsockBackend,
103 {
104     /// Signal the guest driver that we've used some virtio buffers that it had previously made
105     /// available.
106     ///
107     fn signal_used_queue(&self, queue_index: u16) -> result::Result<(), DeviceError> {
108         debug!("vsock: raising IRQ");
109 
110         self.interrupt_cb
111             .trigger(VirtioInterruptType::Queue(queue_index))
112             .map_err(|e| {
113                 error!("Failed to signal used queue: {:?}", e);
114                 DeviceError::FailedSignalingUsedQueue(e)
115             })
116     }
117 
118     /// Walk the driver-provided RX queue buffers and attempt to fill them up with any data that we
119     /// have pending.
120     ///
121     fn process_rx(&mut self) -> result::Result<(), DeviceError> {
122         debug!("vsock: epoll_handler::process_rx()");
123 
124         let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
125         let mut used_count = 0;
126 
127         let mut avail_iter = self.queues[0].iter().map_err(DeviceError::QueueIterator)?;
128         for mut desc_chain in &mut avail_iter {
129             let used_len = match VsockPacket::from_rx_virtq_head(
130                 &mut desc_chain,
131                 self.access_platform.as_ref(),
132             ) {
133                 Ok(mut pkt) => {
134                     if self.backend.write().unwrap().recv_pkt(&mut pkt).is_ok() {
135                         pkt.hdr().len() as u32 + pkt.len()
136                     } else {
137                         // We are using a consuming iterator over the virtio buffers, so, if we can't
138                         // fill in this buffer, we'll need to undo the last iterator step.
139                         avail_iter.go_to_previous_position();
140                         break;
141                     }
142                 }
143                 Err(e) => {
144                     warn!("vsock: RX queue error: {:?}", e);
145                     0
146                 }
147             };
148 
149             used_desc_heads[used_count] = (desc_chain.head_index(), used_len);
150             used_count += 1;
151         }
152 
153         for &(desc_index, len) in &used_desc_heads[..used_count] {
154             self.queues[0]
155                 .add_used(desc_index, len)
156                 .map_err(DeviceError::QueueAddUsed)?;
157         }
158 
159         if used_count > 0 {
160             self.signal_used_queue(0)
161         } else {
162             Ok(())
163         }
164     }
165 
166     /// Walk the driver-provided TX queue buffers, package them up as vsock packets, and send them to
167     /// the backend for processing.
168     ///
169     fn process_tx(&mut self) -> result::Result<(), DeviceError> {
170         debug!("vsock: epoll_handler::process_tx()");
171 
172         let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
173         let mut used_count = 0;
174 
175         let mut avail_iter = self.queues[1].iter().map_err(DeviceError::QueueIterator)?;
176         for mut desc_chain in &mut avail_iter {
177             let pkt = match VsockPacket::from_tx_virtq_head(
178                 &mut desc_chain,
179                 self.access_platform.as_ref(),
180             ) {
181                 Ok(pkt) => pkt,
182                 Err(e) => {
183                     error!("vsock: error reading TX packet: {:?}", e);
184                     used_desc_heads[used_count] = (desc_chain.head_index(), 0);
185                     used_count += 1;
186                     continue;
187                 }
188             };
189 
190             if self.backend.write().unwrap().send_pkt(&pkt).is_err() {
191                 avail_iter.go_to_previous_position();
192                 break;
193             }
194 
195             used_desc_heads[used_count] = (desc_chain.head_index(), 0);
196             used_count += 1;
197         }
198 
199         for &(desc_index, len) in &used_desc_heads[..used_count] {
200             self.queues[1]
201                 .add_used(desc_index, len)
202                 .map_err(DeviceError::QueueAddUsed)?;
203         }
204 
205         if used_count > 0 {
206             self.signal_used_queue(1)
207         } else {
208             Ok(())
209         }
210     }
211 
212     fn run(
213         &mut self,
214         paused: Arc<AtomicBool>,
215         paused_sync: Arc<Barrier>,
216     ) -> result::Result<(), EpollHelperError> {
217         let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
218         helper.add_event(self.queue_evts[0].as_raw_fd(), RX_QUEUE_EVENT)?;
219         helper.add_event(self.queue_evts[1].as_raw_fd(), TX_QUEUE_EVENT)?;
220         helper.add_event(self.queue_evts[2].as_raw_fd(), EVT_QUEUE_EVENT)?;
221         helper.add_event(self.backend.read().unwrap().get_polled_fd(), BACKEND_EVENT)?;
222         helper.run(paused, paused_sync, self)?;
223 
224         Ok(())
225     }
226 }
227 
228 impl<B> EpollHelperHandler for VsockEpollHandler<B>
229 where
230     B: VsockBackend,
231 {
232     fn handle_event(&mut self, _helper: &mut EpollHelper, event: &epoll::Event) -> bool {
233         let evset = match epoll::Events::from_bits(event.events) {
234             Some(evset) => evset,
235             None => {
236                 let evbits = event.events;
237                 warn!("epoll: ignoring unknown event set: 0x{:x}", evbits);
238                 return false;
239             }
240         };
241 
242         let ev_type = event.data as u16;
243         match ev_type {
244             RX_QUEUE_EVENT => {
245                 debug!("vsock: RX queue event");
246                 if let Err(e) = self.queue_evts[0].read() {
247                     error!("Failed to get RX queue event: {:?}", e);
248                     return true;
249                 } else if self.backend.read().unwrap().has_pending_rx() {
250                     if let Err(e) = self.process_rx() {
251                         error!("Failed to process RX queue: {:?}", e);
252                         return true;
253                     }
254                 }
255             }
256             TX_QUEUE_EVENT => {
257                 debug!("vsock: TX queue event");
258                 if let Err(e) = self.queue_evts[1].read() {
259                     error!("Failed to get TX queue event: {:?}", e);
260                     return true;
261                 } else {
262                     if let Err(e) = self.process_tx() {
263                         error!("Failed to process TX queue: {:?}", e);
264                         return true;
265                     }
266                     // The backend may have queued up responses to the packets we sent during TX queue
267                     // processing. If that happened, we need to fetch those responses and place them
268                     // into RX buffers.
269                     if self.backend.read().unwrap().has_pending_rx() {
270                         if let Err(e) = self.process_rx() {
271                             error!("Failed to process RX queue: {:?}", e);
272                             return true;
273                         }
274                     }
275                 }
276             }
277             EVT_QUEUE_EVENT => {
278                 debug!("vsock: EVT queue event");
279                 if let Err(e) = self.queue_evts[2].read() {
280                     error!("Failed to get EVT queue event: {:?}", e);
281                     return true;
282                 }
283             }
284             BACKEND_EVENT => {
285                 debug!("vsock: backend event");
286                 self.backend.write().unwrap().notify(evset);
287                 // After the backend has been kicked, it might've freed up some resources, so we
288                 // can attempt to send it more data to process.
289                 // In particular, if `self.backend.send_pkt()` halted the TX queue processing (by
290                 // returning an error) at some point in the past, now is the time to try walking the
291                 // TX queue again.
292                 if let Err(e) = self.process_tx() {
293                     error!("Failed to process TX queue: {:?}", e);
294                     return true;
295                 }
296                 if self.backend.read().unwrap().has_pending_rx() {
297                     if let Err(e) = self.process_rx() {
298                         error!("Failed to process RX queue: {:?}", e);
299                         return true;
300                     }
301                 }
302             }
303             _ => {
304                 error!("Unknown event for virtio-vsock");
305                 return true;
306             }
307         }
308 
309         false
310     }
311 }
312 
313 /// Virtio device exposing virtual socket to the guest.
314 pub struct Vsock<B: VsockBackend> {
315     common: VirtioCommon,
316     id: String,
317     cid: u64,
318     backend: Arc<RwLock<B>>,
319     path: PathBuf,
320     seccomp_action: SeccompAction,
321     exit_evt: EventFd,
322 }
323 
324 #[derive(Versionize)]
325 pub struct VsockState {
326     pub avail_features: u64,
327     pub acked_features: u64,
328 }
329 
330 impl VersionMapped for VsockState {}
331 
332 impl<B> Vsock<B>
333 where
334     B: VsockBackend,
335 {
336     /// Create a new virtio-vsock device with the given VM CID and vsock
337     /// backend.
338     pub fn new(
339         id: String,
340         cid: u64,
341         path: PathBuf,
342         backend: B,
343         iommu: bool,
344         seccomp_action: SeccompAction,
345         exit_evt: EventFd,
346     ) -> io::Result<Vsock<B>> {
347         let mut avail_features = 1u64 << VIRTIO_F_VERSION_1 | 1u64 << VIRTIO_F_IN_ORDER;
348 
349         if iommu {
350             avail_features |= 1u64 << VIRTIO_F_IOMMU_PLATFORM;
351         }
352 
353         Ok(Vsock {
354             common: VirtioCommon {
355                 device_type: VirtioDeviceType::Vsock as u32,
356                 avail_features,
357                 paused_sync: Some(Arc::new(Barrier::new(2))),
358                 queue_sizes: QUEUE_SIZES.to_vec(),
359                 min_queues: NUM_QUEUES as u16,
360                 ..Default::default()
361             },
362             id,
363             cid,
364             backend: Arc::new(RwLock::new(backend)),
365             path,
366             seccomp_action,
367             exit_evt,
368         })
369     }
370 
371     fn state(&self) -> VsockState {
372         VsockState {
373             avail_features: self.common.avail_features,
374             acked_features: self.common.acked_features,
375         }
376     }
377 
378     fn set_state(&mut self, state: &VsockState) {
379         self.common.avail_features = state.avail_features;
380         self.common.acked_features = state.acked_features;
381     }
382 }
383 
384 impl<B> Drop for Vsock<B>
385 where
386     B: VsockBackend,
387 {
388     fn drop(&mut self) {
389         if let Some(kill_evt) = self.common.kill_evt.take() {
390             // Ignore the result because there is nothing we can do about it.
391             let _ = kill_evt.write(1);
392         }
393     }
394 }
395 
396 impl<B> VirtioDevice for Vsock<B>
397 where
398     B: VsockBackend + Sync + 'static,
399 {
400     fn device_type(&self) -> u32 {
401         self.common.device_type
402     }
403 
404     fn queue_max_sizes(&self) -> &[u16] {
405         &self.common.queue_sizes
406     }
407 
408     fn features(&self) -> u64 {
409         self.common.avail_features
410     }
411 
412     fn ack_features(&mut self, value: u64) {
413         self.common.ack_features(value)
414     }
415 
416     fn read_config(&self, offset: u64, data: &mut [u8]) {
417         match offset {
418             0 if data.len() == 8 => LittleEndian::write_u64(data, self.cid),
419             0 if data.len() == 4 => LittleEndian::write_u32(data, (self.cid & 0xffff_ffff) as u32),
420             4 if data.len() == 4 => {
421                 LittleEndian::write_u32(data, ((self.cid >> 32) & 0xffff_ffff) as u32)
422             }
423             _ => warn!(
424                 "vsock: virtio-vsock received invalid read request of {} bytes at offset {}",
425                 data.len(),
426                 offset
427             ),
428         }
429     }
430 
431     fn activate(
432         &mut self,
433         mem: GuestMemoryAtomic<GuestMemoryMmap>,
434         interrupt_cb: Arc<dyn VirtioInterrupt>,
435         queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>,
436         queue_evts: Vec<EventFd>,
437     ) -> ActivateResult {
438         self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
439         let (kill_evt, pause_evt) = self.common.dup_eventfds();
440 
441         let mut handler = VsockEpollHandler {
442             mem,
443             queues,
444             queue_evts,
445             kill_evt,
446             pause_evt,
447             interrupt_cb,
448             backend: self.backend.clone(),
449             access_platform: self.common.access_platform.clone(),
450         };
451 
452         let paused = self.common.paused.clone();
453         let paused_sync = self.common.paused_sync.clone();
454         let mut epoll_threads = Vec::new();
455 
456         spawn_virtio_thread(
457             &self.id,
458             &self.seccomp_action,
459             Thread::VirtioVsock,
460             &mut epoll_threads,
461             &self.exit_evt,
462             move || {
463                 if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
464                     error!("Error running worker: {:?}", e);
465                 }
466             },
467         )?;
468 
469         self.common.epoll_threads = Some(epoll_threads);
470 
471         event!("virtio-device", "activated", "id", &self.id);
472         Ok(())
473     }
474 
475     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
476         let result = self.common.reset();
477         event!("virtio-device", "reset", "id", &self.id);
478         result
479     }
480 
481     fn shutdown(&mut self) {
482         std::fs::remove_file(&self.path).ok();
483     }
484 
485     fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) {
486         self.common.set_access_platform(access_platform)
487     }
488 }
489 
490 impl<B> Pausable for Vsock<B>
491 where
492     B: VsockBackend + Sync + 'static,
493 {
494     fn pause(&mut self) -> result::Result<(), MigratableError> {
495         self.common.pause()
496     }
497 
498     fn resume(&mut self) -> result::Result<(), MigratableError> {
499         self.common.resume()
500     }
501 }
502 
503 impl<B> Snapshottable for Vsock<B>
504 where
505     B: VsockBackend + Sync + 'static,
506 {
507     fn id(&self) -> String {
508         self.id.clone()
509     }
510 
511     fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
512         Snapshot::new_from_versioned_state(&self.id, &self.state())
513     }
514 
515     fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
516         self.set_state(&snapshot.to_versioned_state(&self.id)?);
517         Ok(())
518     }
519 }
520 impl<B> Transportable for Vsock<B> where B: VsockBackend + Sync + 'static {}
521 impl<B> Migratable for Vsock<B> where B: VsockBackend + Sync + 'static {}
522 
523 #[cfg(test)]
524 mod tests {
525     use super::super::tests::{NoopVirtioInterrupt, TestContext};
526     use super::super::*;
527     use super::*;
528     use crate::vsock::device::{BACKEND_EVENT, EVT_QUEUE_EVENT, RX_QUEUE_EVENT, TX_QUEUE_EVENT};
529     use crate::ActivateError;
530     use libc::EFD_NONBLOCK;
531 
532     #[test]
533     fn test_virtio_device() {
534         let mut ctx = TestContext::new();
535         let avail_features = 1u64 << VIRTIO_F_VERSION_1 | 1u64 << VIRTIO_F_IN_ORDER;
536         let device_features = avail_features;
537         let driver_features: u64 = avail_features | 1 | (1 << 32);
538         let device_pages = [
539             (device_features & 0xffff_ffff) as u32,
540             (device_features >> 32) as u32,
541         ];
542         let driver_pages = [
543             (driver_features & 0xffff_ffff) as u32,
544             (driver_features >> 32) as u32,
545         ];
546         assert_eq!(ctx.device.device_type(), VirtioDeviceType::Vsock as u32);
547         assert_eq!(ctx.device.queue_max_sizes(), QUEUE_SIZES);
548         assert_eq!(ctx.device.features() as u32, device_pages[0]);
549         assert_eq!((ctx.device.features() >> 32) as u32, device_pages[1]);
550 
551         // Ack device features, page 0.
552         ctx.device.ack_features(u64::from(driver_pages[0]));
553         // Ack device features, page 1.
554         ctx.device.ack_features(u64::from(driver_pages[1]) << 32);
555         // Check that no side effect are present, and that the acked features are exactly the same
556         // as the device features.
557         assert_eq!(
558             ctx.device.common.acked_features,
559             device_features & driver_features
560         );
561 
562         // Test reading 32-bit chunks.
563         let mut data = [0u8; 8];
564         ctx.device.read_config(0, &mut data[..4]);
565         assert_eq!(
566             u64::from(LittleEndian::read_u32(&data)),
567             ctx.cid & 0xffff_ffff
568         );
569         ctx.device.read_config(4, &mut data[4..]);
570         assert_eq!(
571             u64::from(LittleEndian::read_u32(&data[4..])),
572             (ctx.cid >> 32) & 0xffff_ffff
573         );
574 
575         // Test reading 64-bit.
576         let mut data = [0u8; 8];
577         ctx.device.read_config(0, &mut data);
578         assert_eq!(LittleEndian::read_u64(&data), ctx.cid);
579 
580         // Check that out-of-bounds reading doesn't mutate the destination buffer.
581         let mut data = [0u8, 1, 2, 3, 4, 5, 6, 7];
582         ctx.device.read_config(2, &mut data);
583         assert_eq!(data, [0u8, 1, 2, 3, 4, 5, 6, 7]);
584 
585         // Just covering lines here, since the vsock device has no writable config.
586         // A warning is, however, logged, if the guest driver attempts to write any config data.
587         ctx.device.write_config(0, &data[..4]);
588 
589         // Test a bad activation.
590         let bad_activate = ctx.device.activate(
591             GuestMemoryAtomic::new(ctx.mem.clone()),
592             Arc::new(NoopVirtioInterrupt {}),
593             Vec::new(),
594             Vec::new(),
595         );
596         match bad_activate {
597             Err(ActivateError::BadActivate) => (),
598             other => panic!("{:?}", other),
599         }
600 
601         let memory = GuestMemoryAtomic::new(ctx.mem.clone());
602 
603         // Test a correct activation.
604         ctx.device
605             .activate(
606                 memory.clone(),
607                 Arc::new(NoopVirtioInterrupt {}),
608                 vec![
609                     Queue::new(memory.clone(), 256),
610                     Queue::new(memory.clone(), 256),
611                     Queue::new(memory, 256),
612                 ],
613                 vec![
614                     EventFd::new(EFD_NONBLOCK).unwrap(),
615                     EventFd::new(EFD_NONBLOCK).unwrap(),
616                     EventFd::new(EFD_NONBLOCK).unwrap(),
617                 ],
618             )
619             .unwrap();
620     }
621 
622     #[test]
623     fn test_irq() {
624         // Test case: successful IRQ signaling.
625         {
626             let test_ctx = TestContext::new();
627             let ctx = test_ctx.create_epoll_handler_context();
628             let memory = GuestMemoryAtomic::new(test_ctx.mem.clone());
629 
630             let _queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>> = Queue::new(memory, 256);
631             assert!(ctx.handler.signal_used_queue(0).is_ok());
632         }
633     }
634 
635     #[test]
636     fn test_txq_event() {
637         // Test case:
638         // - the driver has something to send (there's data in the TX queue); and
639         // - the backend has no pending RX data.
640         {
641             let test_ctx = TestContext::new();
642             let mut ctx = test_ctx.create_epoll_handler_context();
643 
644             ctx.handler.backend.write().unwrap().set_pending_rx(false);
645             ctx.signal_txq_event();
646 
647             // The available TX descriptor should have been used.
648             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
649             // The available RX descriptor should be untouched.
650             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
651         }
652 
653         // Test case:
654         // - the driver has something to send (there's data in the TX queue); and
655         // - the backend also has some pending RX data.
656         {
657             let test_ctx = TestContext::new();
658             let mut ctx = test_ctx.create_epoll_handler_context();
659 
660             ctx.handler.backend.write().unwrap().set_pending_rx(true);
661             ctx.signal_txq_event();
662 
663             // Both available RX and TX descriptors should have been used.
664             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
665             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
666         }
667 
668         // Test case:
669         // - the driver has something to send (there's data in the TX queue); and
670         // - the backend errors out and cannot process the TX queue.
671         {
672             let test_ctx = TestContext::new();
673             let mut ctx = test_ctx.create_epoll_handler_context();
674 
675             ctx.handler.backend.write().unwrap().set_pending_rx(false);
676             ctx.handler
677                 .backend
678                 .write()
679                 .unwrap()
680                 .set_tx_err(Some(VsockError::NoData));
681             ctx.signal_txq_event();
682 
683             // Both RX and TX queues should be untouched.
684             assert_eq!(ctx.guest_txvq.used.idx.get(), 0);
685             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
686         }
687 
688         // Test case:
689         // - the driver supplied a malformed TX buffer.
690         {
691             let test_ctx = TestContext::new();
692             let mut ctx = test_ctx.create_epoll_handler_context();
693 
694             // Invalidate the packet header descriptor, by setting its length to 0.
695             ctx.guest_txvq.dtable[0].len.set(0);
696             ctx.signal_txq_event();
697 
698             // The available descriptor should have been consumed, but no packet should have
699             // reached the backend.
700             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
701             assert_eq!(ctx.handler.backend.read().unwrap().tx_ok_cnt, 0);
702         }
703 
704         // Test case: spurious TXQ_EVENT.
705         {
706             let test_ctx = TestContext::new();
707             let mut ctx = test_ctx.create_epoll_handler_context();
708 
709             let events = epoll::Events::EPOLLIN;
710             let event = epoll::Event::new(events, TX_QUEUE_EVENT as u64);
711             let mut epoll_helper =
712                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
713 
714             assert!(
715                 ctx.handler.handle_event(&mut epoll_helper, &event),
716                 "handle_event() should have failed"
717             );
718         }
719     }
720 
721     #[test]
722     fn test_rxq_event() {
723         // Test case:
724         // - there is pending RX data in the backend; and
725         // - the driver makes RX buffers available; and
726         // - the backend successfully places its RX data into the queue.
727         {
728             let test_ctx = TestContext::new();
729             let mut ctx = test_ctx.create_epoll_handler_context();
730 
731             ctx.handler.backend.write().unwrap().set_pending_rx(true);
732             ctx.handler
733                 .backend
734                 .write()
735                 .unwrap()
736                 .set_rx_err(Some(VsockError::NoData));
737             ctx.signal_rxq_event();
738 
739             // The available RX buffer should've been left untouched.
740             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
741         }
742 
743         // Test case:
744         // - there is pending RX data in the backend; and
745         // - the driver makes RX buffers available; and
746         // - the backend errors out, when attempting to receive data.
747         {
748             let test_ctx = TestContext::new();
749             let mut ctx = test_ctx.create_epoll_handler_context();
750 
751             ctx.handler.backend.write().unwrap().set_pending_rx(true);
752             ctx.signal_rxq_event();
753 
754             // The available RX buffer should have been used.
755             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
756         }
757 
758         // Test case: the driver provided a malformed RX descriptor chain.
759         {
760             let test_ctx = TestContext::new();
761             let mut ctx = test_ctx.create_epoll_handler_context();
762 
763             // Invalidate the packet header descriptor, by setting its length to 0.
764             ctx.guest_rxvq.dtable[0].len.set(0);
765 
766             // The chain should've been processed, without employing the backend.
767             assert!(ctx.handler.process_rx().is_ok());
768             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
769             assert_eq!(ctx.handler.backend.read().unwrap().rx_ok_cnt, 0);
770         }
771 
772         // Test case: spurious RXQ_EVENT.
773         {
774             let test_ctx = TestContext::new();
775             let mut ctx = test_ctx.create_epoll_handler_context();
776             ctx.handler.backend.write().unwrap().set_pending_rx(false);
777 
778             let events = epoll::Events::EPOLLIN;
779             let event = epoll::Event::new(events, RX_QUEUE_EVENT as u64);
780             let mut epoll_helper =
781                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
782 
783             assert!(
784                 ctx.handler.handle_event(&mut epoll_helper, &event),
785                 "handle_event() should have failed"
786             );
787         }
788     }
789 
790     #[test]
791     fn test_evq_event() {
792         // Test case: spurious EVQ_EVENT.
793         {
794             let test_ctx = TestContext::new();
795             let mut ctx = test_ctx.create_epoll_handler_context();
796             ctx.handler.backend.write().unwrap().set_pending_rx(false);
797 
798             let events = epoll::Events::EPOLLIN;
799             let event = epoll::Event::new(events, EVT_QUEUE_EVENT as u64);
800             let mut epoll_helper =
801                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
802 
803             assert!(
804                 ctx.handler.handle_event(&mut epoll_helper, &event),
805                 "handle_event() should have failed"
806             );
807         }
808     }
809 
810     #[test]
811     fn test_backend_event() {
812         // Test case:
813         // - a backend event is received; and
814         // - the backend has pending RX data.
815         {
816             let test_ctx = TestContext::new();
817             let mut ctx = test_ctx.create_epoll_handler_context();
818 
819             ctx.handler.backend.write().unwrap().set_pending_rx(true);
820 
821             let events = epoll::Events::EPOLLIN;
822             let event = epoll::Event::new(events, BACKEND_EVENT as u64);
823             let mut epoll_helper =
824                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
825             ctx.handler.handle_event(&mut epoll_helper, &event);
826 
827             // The backend should've received this event.
828             assert_eq!(
829                 ctx.handler.backend.read().unwrap().evset,
830                 Some(epoll::Events::EPOLLIN)
831             );
832             // TX queue processing should've been triggered.
833             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
834             // RX queue processing should've been triggered.
835             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
836         }
837 
838         // Test case:
839         // - a backend event is received; and
840         // - the backend doesn't have any pending RX data.
841         {
842             let test_ctx = TestContext::new();
843             let mut ctx = test_ctx.create_epoll_handler_context();
844 
845             ctx.handler.backend.write().unwrap().set_pending_rx(false);
846 
847             let events = epoll::Events::EPOLLIN;
848             let event = epoll::Event::new(events, BACKEND_EVENT as u64);
849             let mut epoll_helper =
850                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
851             ctx.handler.handle_event(&mut epoll_helper, &event);
852 
853             // The backend should've received this event.
854             assert_eq!(
855                 ctx.handler.backend.read().unwrap().evset,
856                 Some(epoll::Events::EPOLLIN)
857             );
858             // TX queue processing should've been triggered.
859             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
860             // The RX queue should've been left untouched.
861             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
862         }
863     }
864 
865     #[test]
866     fn test_unknown_event() {
867         let test_ctx = TestContext::new();
868         let mut ctx = test_ctx.create_epoll_handler_context();
869 
870         let events = epoll::Events::EPOLLIN;
871         let event = epoll::Event::new(events, 0xff);
872         let mut epoll_helper =
873             EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
874 
875         assert!(
876             ctx.handler.handle_event(&mut epoll_helper, &event),
877             "handle_event() should have failed"
878         );
879     }
880 }
881