xref: /cloud-hypervisor/virtio-devices/src/vsock/device.rs (revision f67b3f79ea19c9a66e04074cbbf5d292f6529e43)
1 // Copyright 2019 Intel Corporation. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 // SPDX-License-Identifier: Apache-2.0
6 //
7 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
8 // Use of this source code is governed by a BSD-style license that can be
9 // found in the THIRD-PARTY file.
10 
11 /// This is the `VirtioDevice` implementation for our vsock device. It handles the virtio-level
12 /// device logic: feature negotiation, device configuration, and device activation.
13 /// The run-time device logic (i.e. event-driven data handling) is implemented by
14 /// `super::epoll_handler::EpollHandler`.
15 ///
16 /// We aim to conform to the VirtIO v1.1 spec:
17 /// https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.html
18 ///
19 /// The vsock device has two input parameters: a CID to identify the device, and a `VsockBackend`
20 /// to use for offloading vsock traffic.
21 ///
22 /// Upon its activation, the vsock device creates its `EpollHandler`, passes it the event-interested
23 /// file descriptors, and registers these descriptors with the VMM `EpollContext`. Going forward,
24 /// the `EpollHandler` will get notified whenever an event occurs on the just-registered FDs:
25 /// - an RX queue FD;
26 /// - a TX queue FD;
27 /// - an event queue FD; and
28 /// - a backend FD.
29 ///
30 use super::{VsockBackend, VsockPacket};
31 use crate::seccomp_filters::Thread;
32 use crate::Error as DeviceError;
33 use crate::GuestMemoryMmap;
34 use crate::VirtioInterrupt;
35 use crate::{
36     thread_helper::spawn_virtio_thread, ActivateResult, EpollHelper, EpollHelperError,
37     EpollHelperHandler, Queue, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType,
38     EPOLL_HELPER_EVENT_LAST, VIRTIO_F_IN_ORDER, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_VERSION_1,
39 };
40 use byteorder::{ByteOrder, LittleEndian};
41 use seccompiler::SeccompAction;
42 use std::io;
43 use std::os::unix::io::AsRawFd;
44 use std::path::PathBuf;
45 use std::result;
46 use std::sync::atomic::AtomicBool;
47 use std::sync::{Arc, Barrier, RwLock};
48 use versionize::{VersionMap, Versionize, VersionizeResult};
49 use versionize_derive::Versionize;
50 use vm_memory::{GuestAddressSpace, GuestMemoryAtomic};
51 use vm_migration::{
52     Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
53 };
54 use vmm_sys_util::eventfd::EventFd;
55 
56 const QUEUE_SIZE: u16 = 256;
57 const NUM_QUEUES: usize = 3;
58 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
59 
60 // New descriptors are pending on the rx queue.
61 pub const RX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
62 // New descriptors are pending on the tx queue.
63 pub const TX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2;
64 // New descriptors are pending on the event queue.
65 pub const EVT_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 3;
66 // Notification coming from the backend.
67 pub const BACKEND_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 4;
68 
69 /// The `VsockEpollHandler` implements the runtime logic of our vsock device:
70 /// 1. Respond to TX queue events by wrapping virtio buffers into `VsockPacket`s, then sending those
71 ///    packets to the `VsockBackend`;
72 /// 2. Forward backend FD event notifications to the `VsockBackend`;
73 /// 3. Fetch incoming packets from the `VsockBackend` and place them into the virtio RX queue;
74 /// 4. Whenever we have processed some virtio buffers (either TX or RX), let the driver know by
75 ///    raising our assigned IRQ.
76 ///
77 /// In a nutshell, the `VsockEpollHandler` logic looks like this:
78 /// - on TX queue event:
79 ///   - fetch all packets from the TX queue and send them to the backend; then
80 ///   - if the backend has queued up any incoming packets, fetch them into any available RX buffers.
81 /// - on RX queue event:
82 ///   - fetch any incoming packets, queued up by the backend, into newly available RX buffers.
83 /// - on backend event:
84 ///   - forward the event to the backend; then
85 ///   - again, attempt to fetch any incoming packets queued by the backend into virtio RX buffers.
86 ///
87 pub struct VsockEpollHandler<B: VsockBackend> {
88     pub mem: GuestMemoryAtomic<GuestMemoryMmap>,
89     pub queues: Vec<Queue>,
90     pub queue_evts: Vec<EventFd>,
91     pub kill_evt: EventFd,
92     pub pause_evt: EventFd,
93     pub interrupt_cb: Arc<dyn VirtioInterrupt>,
94     pub backend: Arc<RwLock<B>>,
95 }
96 
97 impl<B> VsockEpollHandler<B>
98 where
99     B: VsockBackend,
100 {
101     /// Signal the guest driver that we've used some virtio buffers that it had previously made
102     /// available.
103     ///
104     fn signal_used_queue(&self, queue: &Queue) -> result::Result<(), DeviceError> {
105         debug!("vsock: raising IRQ");
106 
107         self.interrupt_cb
108             .trigger(&VirtioInterruptType::Queue, Some(queue))
109             .map_err(|e| {
110                 error!("Failed to signal used queue: {:?}", e);
111                 DeviceError::FailedSignalingUsedQueue(e)
112             })
113     }
114 
115     /// Walk the driver-provided RX queue buffers and attempt to fill them up with any data that we
116     /// have pending.
117     ///
118     fn process_rx(&mut self) -> result::Result<(), DeviceError> {
119         debug!("vsock: epoll_handler::process_rx()");
120 
121         let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
122         let mut used_count = 0;
123         let mem = self.mem.memory();
124         for avail_desc in self.queues[0].iter(&mem) {
125             let used_len = match VsockPacket::from_rx_virtq_head(&avail_desc) {
126                 Ok(mut pkt) => {
127                     if self.backend.write().unwrap().recv_pkt(&mut pkt).is_ok() {
128                         pkt.hdr().len() as u32 + pkt.len()
129                     } else {
130                         // We are using a consuming iterator over the virtio buffers, so, if we can't
131                         // fill in this buffer, we'll need to undo the last iterator step.
132                         self.queues[0].go_to_previous_position();
133                         break;
134                     }
135                 }
136                 Err(e) => {
137                     warn!("vsock: RX queue error: {:?}", e);
138                     0
139                 }
140             };
141 
142             used_desc_heads[used_count] = (avail_desc.index, used_len);
143             used_count += 1;
144         }
145 
146         for &(desc_index, len) in &used_desc_heads[..used_count] {
147             self.queues[0].add_used(&mem, desc_index, len);
148         }
149 
150         if used_count > 0 {
151             self.signal_used_queue(&self.queues[0])
152         } else {
153             Ok(())
154         }
155     }
156 
157     /// Walk the driver-provided TX queue buffers, package them up as vsock packets, and send them to
158     /// the backend for processing.
159     ///
160     fn process_tx(&mut self) -> result::Result<(), DeviceError> {
161         debug!("vsock: epoll_handler::process_tx()");
162 
163         let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
164         let mut used_count = 0;
165         let mem = self.mem.memory();
166         for avail_desc in self.queues[1].iter(&mem) {
167             let pkt = match VsockPacket::from_tx_virtq_head(&avail_desc) {
168                 Ok(pkt) => pkt,
169                 Err(e) => {
170                     error!("vsock: error reading TX packet: {:?}", e);
171                     used_desc_heads[used_count] = (avail_desc.index, 0);
172                     used_count += 1;
173                     continue;
174                 }
175             };
176 
177             if self.backend.write().unwrap().send_pkt(&pkt).is_err() {
178                 self.queues[1].go_to_previous_position();
179                 break;
180             }
181 
182             used_desc_heads[used_count] = (avail_desc.index, 0);
183             used_count += 1;
184         }
185 
186         for &(desc_index, len) in &used_desc_heads[..used_count] {
187             self.queues[1].add_used(&mem, desc_index, len);
188         }
189 
190         if used_count > 0 {
191             self.signal_used_queue(&self.queues[1])
192         } else {
193             Ok(())
194         }
195     }
196 
197     fn run(
198         &mut self,
199         paused: Arc<AtomicBool>,
200         paused_sync: Arc<Barrier>,
201     ) -> result::Result<(), EpollHelperError> {
202         let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
203         helper.add_event(self.queue_evts[0].as_raw_fd(), RX_QUEUE_EVENT)?;
204         helper.add_event(self.queue_evts[1].as_raw_fd(), TX_QUEUE_EVENT)?;
205         helper.add_event(self.queue_evts[2].as_raw_fd(), EVT_QUEUE_EVENT)?;
206         helper.add_event(self.backend.read().unwrap().get_polled_fd(), BACKEND_EVENT)?;
207         helper.run(paused, paused_sync, self)?;
208 
209         Ok(())
210     }
211 }
212 
213 impl<B> EpollHelperHandler for VsockEpollHandler<B>
214 where
215     B: VsockBackend,
216 {
217     fn handle_event(&mut self, _helper: &mut EpollHelper, event: &epoll::Event) -> bool {
218         let evset = match epoll::Events::from_bits(event.events) {
219             Some(evset) => evset,
220             None => {
221                 let evbits = event.events;
222                 warn!("epoll: ignoring unknown event set: 0x{:x}", evbits);
223                 return false;
224             }
225         };
226 
227         let ev_type = event.data as u16;
228         match ev_type {
229             RX_QUEUE_EVENT => {
230                 debug!("vsock: RX queue event");
231                 if let Err(e) = self.queue_evts[0].read() {
232                     error!("Failed to get RX queue event: {:?}", e);
233                     return true;
234                 } else if self.backend.read().unwrap().has_pending_rx() {
235                     if let Err(e) = self.process_rx() {
236                         error!("Failed to process RX queue: {:?}", e);
237                         return true;
238                     }
239                 }
240             }
241             TX_QUEUE_EVENT => {
242                 debug!("vsock: TX queue event");
243                 if let Err(e) = self.queue_evts[1].read() {
244                     error!("Failed to get TX queue event: {:?}", e);
245                     return true;
246                 } else {
247                     if let Err(e) = self.process_tx() {
248                         error!("Failed to process TX queue: {:?}", e);
249                         return true;
250                     }
251                     // The backend may have queued up responses to the packets we sent during TX queue
252                     // processing. If that happened, we need to fetch those responses and place them
253                     // into RX buffers.
254                     if self.backend.read().unwrap().has_pending_rx() {
255                         if let Err(e) = self.process_rx() {
256                             error!("Failed to process RX queue: {:?}", e);
257                             return true;
258                         }
259                     }
260                 }
261             }
262             EVT_QUEUE_EVENT => {
263                 debug!("vsock: EVT queue event");
264                 if let Err(e) = self.queue_evts[2].read() {
265                     error!("Failed to get EVT queue event: {:?}", e);
266                     return true;
267                 }
268             }
269             BACKEND_EVENT => {
270                 debug!("vsock: backend event");
271                 self.backend.write().unwrap().notify(evset);
272                 // After the backend has been kicked, it might've freed up some resources, so we
273                 // can attempt to send it more data to process.
274                 // In particular, if `self.backend.send_pkt()` halted the TX queue processing (by
275                 // returning an error) at some point in the past, now is the time to try walking the
276                 // TX queue again.
277                 if let Err(e) = self.process_tx() {
278                     error!("Failed to process TX queue: {:?}", e);
279                     return true;
280                 }
281                 if self.backend.read().unwrap().has_pending_rx() {
282                     if let Err(e) = self.process_rx() {
283                         error!("Failed to process RX queue: {:?}", e);
284                         return true;
285                     }
286                 }
287             }
288             _ => {
289                 error!("Unknown event for virtio-vsock");
290                 return true;
291             }
292         }
293 
294         false
295     }
296 }
297 
298 /// Virtio device exposing virtual socket to the guest.
299 pub struct Vsock<B: VsockBackend> {
300     common: VirtioCommon,
301     id: String,
302     cid: u64,
303     backend: Arc<RwLock<B>>,
304     path: PathBuf,
305     seccomp_action: SeccompAction,
306     exit_evt: EventFd,
307 }
308 
309 #[derive(Versionize)]
310 pub struct VsockState {
311     pub avail_features: u64,
312     pub acked_features: u64,
313 }
314 
315 impl VersionMapped for VsockState {}
316 
317 impl<B> Vsock<B>
318 where
319     B: VsockBackend,
320 {
321     /// Create a new virtio-vsock device with the given VM CID and vsock
322     /// backend.
323     pub fn new(
324         id: String,
325         cid: u64,
326         path: PathBuf,
327         backend: B,
328         iommu: bool,
329         seccomp_action: SeccompAction,
330         exit_evt: EventFd,
331     ) -> io::Result<Vsock<B>> {
332         let mut avail_features = 1u64 << VIRTIO_F_VERSION_1 | 1u64 << VIRTIO_F_IN_ORDER;
333 
334         if iommu {
335             avail_features |= 1u64 << VIRTIO_F_IOMMU_PLATFORM;
336         }
337 
338         Ok(Vsock {
339             common: VirtioCommon {
340                 device_type: VirtioDeviceType::Vsock as u32,
341                 avail_features,
342                 paused_sync: Some(Arc::new(Barrier::new(2))),
343                 queue_sizes: QUEUE_SIZES.to_vec(),
344                 min_queues: NUM_QUEUES as u16,
345                 ..Default::default()
346             },
347             id,
348             cid,
349             backend: Arc::new(RwLock::new(backend)),
350             path,
351             seccomp_action,
352             exit_evt,
353         })
354     }
355 
356     fn state(&self) -> VsockState {
357         VsockState {
358             avail_features: self.common.avail_features,
359             acked_features: self.common.acked_features,
360         }
361     }
362 
363     fn set_state(&mut self, state: &VsockState) {
364         self.common.avail_features = state.avail_features;
365         self.common.acked_features = state.acked_features;
366     }
367 }
368 
369 impl<B> Drop for Vsock<B>
370 where
371     B: VsockBackend,
372 {
373     fn drop(&mut self) {
374         if let Some(kill_evt) = self.common.kill_evt.take() {
375             // Ignore the result because there is nothing we can do about it.
376             let _ = kill_evt.write(1);
377         }
378     }
379 }
380 
381 impl<B> VirtioDevice for Vsock<B>
382 where
383     B: VsockBackend + Sync + 'static,
384 {
385     fn device_type(&self) -> u32 {
386         self.common.device_type
387     }
388 
389     fn queue_max_sizes(&self) -> &[u16] {
390         &self.common.queue_sizes
391     }
392 
393     fn features(&self) -> u64 {
394         self.common.avail_features
395     }
396 
397     fn ack_features(&mut self, value: u64) {
398         self.common.ack_features(value)
399     }
400 
401     fn read_config(&self, offset: u64, data: &mut [u8]) {
402         match offset {
403             0 if data.len() == 8 => LittleEndian::write_u64(data, self.cid),
404             0 if data.len() == 4 => LittleEndian::write_u32(data, (self.cid & 0xffff_ffff) as u32),
405             4 if data.len() == 4 => {
406                 LittleEndian::write_u32(data, ((self.cid >> 32) & 0xffff_ffff) as u32)
407             }
408             _ => warn!(
409                 "vsock: virtio-vsock received invalid read request of {} bytes at offset {}",
410                 data.len(),
411                 offset
412             ),
413         }
414     }
415 
416     fn activate(
417         &mut self,
418         mem: GuestMemoryAtomic<GuestMemoryMmap>,
419         interrupt_cb: Arc<dyn VirtioInterrupt>,
420         queues: Vec<Queue>,
421         queue_evts: Vec<EventFd>,
422     ) -> ActivateResult {
423         self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
424         let (kill_evt, pause_evt) = self.common.dup_eventfds();
425 
426         let mut handler = VsockEpollHandler {
427             mem,
428             queues,
429             queue_evts,
430             kill_evt,
431             pause_evt,
432             interrupt_cb,
433             backend: self.backend.clone(),
434         };
435 
436         let paused = self.common.paused.clone();
437         let paused_sync = self.common.paused_sync.clone();
438         let mut epoll_threads = Vec::new();
439 
440         spawn_virtio_thread(
441             &self.id,
442             &self.seccomp_action,
443             Thread::VirtioVsock,
444             &mut epoll_threads,
445             &self.exit_evt,
446             move || {
447                 if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
448                     error!("Error running worker: {:?}", e);
449                 }
450             },
451         )?;
452 
453         self.common.epoll_threads = Some(epoll_threads);
454 
455         event!("virtio-device", "activated", "id", &self.id);
456         Ok(())
457     }
458 
459     fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
460         let result = self.common.reset();
461         event!("virtio-device", "reset", "id", &self.id);
462         result
463     }
464 
465     fn shutdown(&mut self) {
466         std::fs::remove_file(&self.path).ok();
467     }
468 }
469 
470 impl<B> Pausable for Vsock<B>
471 where
472     B: VsockBackend + Sync + 'static,
473 {
474     fn pause(&mut self) -> result::Result<(), MigratableError> {
475         self.common.pause()
476     }
477 
478     fn resume(&mut self) -> result::Result<(), MigratableError> {
479         self.common.resume()
480     }
481 }
482 
483 impl<B> Snapshottable for Vsock<B>
484 where
485     B: VsockBackend + Sync + 'static,
486 {
487     fn id(&self) -> String {
488         self.id.clone()
489     }
490 
491     fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
492         Snapshot::new_from_versioned_state(&self.id, &self.state())
493     }
494 
495     fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
496         self.set_state(&snapshot.to_versioned_state(&self.id)?);
497         Ok(())
498     }
499 }
500 impl<B> Transportable for Vsock<B> where B: VsockBackend + Sync + 'static {}
501 impl<B> Migratable for Vsock<B> where B: VsockBackend + Sync + 'static {}
502 
503 #[cfg(test)]
504 mod tests {
505     use super::super::tests::{NoopVirtioInterrupt, TestContext};
506     use super::super::*;
507     use super::*;
508     use crate::vsock::device::{BACKEND_EVENT, EVT_QUEUE_EVENT, RX_QUEUE_EVENT, TX_QUEUE_EVENT};
509     use crate::ActivateError;
510     use libc::EFD_NONBLOCK;
511 
512     #[test]
513     fn test_virtio_device() {
514         let mut ctx = TestContext::new();
515         let avail_features = 1u64 << VIRTIO_F_VERSION_1 | 1u64 << VIRTIO_F_IN_ORDER;
516         let device_features = avail_features;
517         let driver_features: u64 = avail_features | 1 | (1 << 32);
518         let device_pages = [
519             (device_features & 0xffff_ffff) as u32,
520             (device_features >> 32) as u32,
521         ];
522         let driver_pages = [
523             (driver_features & 0xffff_ffff) as u32,
524             (driver_features >> 32) as u32,
525         ];
526         assert_eq!(ctx.device.device_type(), VirtioDeviceType::Vsock as u32);
527         assert_eq!(ctx.device.queue_max_sizes(), QUEUE_SIZES);
528         assert_eq!(ctx.device.features() as u32, device_pages[0]);
529         assert_eq!((ctx.device.features() >> 32) as u32, device_pages[1]);
530 
531         // Ack device features, page 0.
532         ctx.device.ack_features(u64::from(driver_pages[0]));
533         // Ack device features, page 1.
534         ctx.device.ack_features(u64::from(driver_pages[1]) << 32);
535         // Check that no side effect are present, and that the acked features are exactly the same
536         // as the device features.
537         assert_eq!(
538             ctx.device.common.acked_features,
539             device_features & driver_features
540         );
541 
542         // Test reading 32-bit chunks.
543         let mut data = [0u8; 8];
544         ctx.device.read_config(0, &mut data[..4]);
545         assert_eq!(
546             u64::from(LittleEndian::read_u32(&data)),
547             ctx.cid & 0xffff_ffff
548         );
549         ctx.device.read_config(4, &mut data[4..]);
550         assert_eq!(
551             u64::from(LittleEndian::read_u32(&data[4..])),
552             (ctx.cid >> 32) & 0xffff_ffff
553         );
554 
555         // Test reading 64-bit.
556         let mut data = [0u8; 8];
557         ctx.device.read_config(0, &mut data);
558         assert_eq!(LittleEndian::read_u64(&data), ctx.cid);
559 
560         // Check that out-of-bounds reading doesn't mutate the destination buffer.
561         let mut data = [0u8, 1, 2, 3, 4, 5, 6, 7];
562         ctx.device.read_config(2, &mut data);
563         assert_eq!(data, [0u8, 1, 2, 3, 4, 5, 6, 7]);
564 
565         // Just covering lines here, since the vsock device has no writable config.
566         // A warning is, however, logged, if the guest driver attempts to write any config data.
567         ctx.device.write_config(0, &data[..4]);
568 
569         // Test a bad activation.
570         let bad_activate = ctx.device.activate(
571             GuestMemoryAtomic::new(ctx.mem.clone()),
572             Arc::new(NoopVirtioInterrupt {}),
573             Vec::new(),
574             Vec::new(),
575         );
576         match bad_activate {
577             Err(ActivateError::BadActivate) => (),
578             other => panic!("{:?}", other),
579         }
580 
581         // Test a correct activation.
582         ctx.device
583             .activate(
584                 GuestMemoryAtomic::new(ctx.mem.clone()),
585                 Arc::new(NoopVirtioInterrupt {}),
586                 vec![Queue::new(256), Queue::new(256), Queue::new(256)],
587                 vec![
588                     EventFd::new(EFD_NONBLOCK).unwrap(),
589                     EventFd::new(EFD_NONBLOCK).unwrap(),
590                     EventFd::new(EFD_NONBLOCK).unwrap(),
591                 ],
592             )
593             .unwrap();
594     }
595 
596     #[test]
597     fn test_irq() {
598         // Test case: successful IRQ signaling.
599         {
600             let test_ctx = TestContext::new();
601             let ctx = test_ctx.create_epoll_handler_context();
602 
603             let queue = Queue::new(256);
604             assert!(ctx.handler.signal_used_queue(&queue).is_ok());
605         }
606     }
607 
608     #[test]
609     fn test_txq_event() {
610         // Test case:
611         // - the driver has something to send (there's data in the TX queue); and
612         // - the backend has no pending RX data.
613         {
614             let test_ctx = TestContext::new();
615             let mut ctx = test_ctx.create_epoll_handler_context();
616 
617             ctx.handler.backend.write().unwrap().set_pending_rx(false);
618             ctx.signal_txq_event();
619 
620             // The available TX descriptor should have been used.
621             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
622             // The available RX descriptor should be untouched.
623             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
624         }
625 
626         // Test case:
627         // - the driver has something to send (there's data in the TX queue); and
628         // - the backend also has some pending RX data.
629         {
630             let test_ctx = TestContext::new();
631             let mut ctx = test_ctx.create_epoll_handler_context();
632 
633             ctx.handler.backend.write().unwrap().set_pending_rx(true);
634             ctx.signal_txq_event();
635 
636             // Both available RX and TX descriptors should have been used.
637             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
638             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
639         }
640 
641         // Test case:
642         // - the driver has something to send (there's data in the TX queue); and
643         // - the backend errors out and cannot process the TX queue.
644         {
645             let test_ctx = TestContext::new();
646             let mut ctx = test_ctx.create_epoll_handler_context();
647 
648             ctx.handler.backend.write().unwrap().set_pending_rx(false);
649             ctx.handler
650                 .backend
651                 .write()
652                 .unwrap()
653                 .set_tx_err(Some(VsockError::NoData));
654             ctx.signal_txq_event();
655 
656             // Both RX and TX queues should be untouched.
657             assert_eq!(ctx.guest_txvq.used.idx.get(), 0);
658             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
659         }
660 
661         // Test case:
662         // - the driver supplied a malformed TX buffer.
663         {
664             let test_ctx = TestContext::new();
665             let mut ctx = test_ctx.create_epoll_handler_context();
666 
667             // Invalidate the packet header descriptor, by setting its length to 0.
668             ctx.guest_txvq.dtable[0].len.set(0);
669             ctx.signal_txq_event();
670 
671             // The available descriptor should have been consumed, but no packet should have
672             // reached the backend.
673             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
674             assert_eq!(ctx.handler.backend.read().unwrap().tx_ok_cnt, 0);
675         }
676 
677         // Test case: spurious TXQ_EVENT.
678         {
679             let test_ctx = TestContext::new();
680             let mut ctx = test_ctx.create_epoll_handler_context();
681 
682             let events = epoll::Events::EPOLLIN;
683             let event = epoll::Event::new(events, TX_QUEUE_EVENT as u64);
684             let mut epoll_helper =
685                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
686 
687             if !ctx.handler.handle_event(&mut epoll_helper, &event) {
688                 panic!("handle_event() should have failed");
689             }
690         }
691     }
692 
693     #[test]
694     fn test_rxq_event() {
695         // Test case:
696         // - there is pending RX data in the backend; and
697         // - the driver makes RX buffers available; and
698         // - the backend successfully places its RX data into the queue.
699         {
700             let test_ctx = TestContext::new();
701             let mut ctx = test_ctx.create_epoll_handler_context();
702 
703             ctx.handler.backend.write().unwrap().set_pending_rx(true);
704             ctx.handler
705                 .backend
706                 .write()
707                 .unwrap()
708                 .set_rx_err(Some(VsockError::NoData));
709             ctx.signal_rxq_event();
710 
711             // The available RX buffer should've been left untouched.
712             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
713         }
714 
715         // Test case:
716         // - there is pending RX data in the backend; and
717         // - the driver makes RX buffers available; and
718         // - the backend errors out, when attempting to receive data.
719         {
720             let test_ctx = TestContext::new();
721             let mut ctx = test_ctx.create_epoll_handler_context();
722 
723             ctx.handler.backend.write().unwrap().set_pending_rx(true);
724             ctx.signal_rxq_event();
725 
726             // The available RX buffer should have been used.
727             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
728         }
729 
730         // Test case: the driver provided a malformed RX descriptor chain.
731         {
732             let test_ctx = TestContext::new();
733             let mut ctx = test_ctx.create_epoll_handler_context();
734 
735             // Invalidate the packet header descriptor, by setting its length to 0.
736             ctx.guest_rxvq.dtable[0].len.set(0);
737 
738             // The chain should've been processed, without employing the backend.
739             assert!(ctx.handler.process_rx().is_ok());
740             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
741             assert_eq!(ctx.handler.backend.read().unwrap().rx_ok_cnt, 0);
742         }
743 
744         // Test case: spurious RXQ_EVENT.
745         {
746             let test_ctx = TestContext::new();
747             let mut ctx = test_ctx.create_epoll_handler_context();
748             ctx.handler.backend.write().unwrap().set_pending_rx(false);
749 
750             let events = epoll::Events::EPOLLIN;
751             let event = epoll::Event::new(events, RX_QUEUE_EVENT as u64);
752             let mut epoll_helper =
753                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
754 
755             if !ctx.handler.handle_event(&mut epoll_helper, &event) {
756                 panic!("handle_event() should have failed");
757             }
758         }
759     }
760 
761     #[test]
762     fn test_evq_event() {
763         // Test case: spurious EVQ_EVENT.
764         {
765             let test_ctx = TestContext::new();
766             let mut ctx = test_ctx.create_epoll_handler_context();
767             ctx.handler.backend.write().unwrap().set_pending_rx(false);
768 
769             let events = epoll::Events::EPOLLIN;
770             let event = epoll::Event::new(events, EVT_QUEUE_EVENT as u64);
771             let mut epoll_helper =
772                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
773 
774             if !ctx.handler.handle_event(&mut epoll_helper, &event) {
775                 panic!("handle_event() should have failed");
776             }
777         }
778     }
779 
780     #[test]
781     fn test_backend_event() {
782         // Test case:
783         // - a backend event is received; and
784         // - the backend has pending RX data.
785         {
786             let test_ctx = TestContext::new();
787             let mut ctx = test_ctx.create_epoll_handler_context();
788 
789             ctx.handler.backend.write().unwrap().set_pending_rx(true);
790 
791             let events = epoll::Events::EPOLLIN;
792             let event = epoll::Event::new(events, BACKEND_EVENT as u64);
793             let mut epoll_helper =
794                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
795             ctx.handler.handle_event(&mut epoll_helper, &event);
796 
797             // The backend should've received this event.
798             assert_eq!(
799                 ctx.handler.backend.read().unwrap().evset,
800                 Some(epoll::Events::EPOLLIN)
801             );
802             // TX queue processing should've been triggered.
803             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
804             // RX queue processing should've been triggered.
805             assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
806         }
807 
808         // Test case:
809         // - a backend event is received; and
810         // - the backend doesn't have any pending RX data.
811         {
812             let test_ctx = TestContext::new();
813             let mut ctx = test_ctx.create_epoll_handler_context();
814 
815             ctx.handler.backend.write().unwrap().set_pending_rx(false);
816 
817             let events = epoll::Events::EPOLLIN;
818             let event = epoll::Event::new(events, BACKEND_EVENT as u64);
819             let mut epoll_helper =
820                 EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
821             ctx.handler.handle_event(&mut epoll_helper, &event);
822 
823             // The backend should've received this event.
824             assert_eq!(
825                 ctx.handler.backend.read().unwrap().evset,
826                 Some(epoll::Events::EPOLLIN)
827             );
828             // TX queue processing should've been triggered.
829             assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
830             // The RX queue should've been left untouched.
831             assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
832         }
833     }
834 
835     #[test]
836     fn test_unknown_event() {
837         let test_ctx = TestContext::new();
838         let mut ctx = test_ctx.create_epoll_handler_context();
839 
840         let events = epoll::Events::EPOLLIN;
841         let event = epoll::Event::new(events, 0xff);
842         let mut epoll_helper =
843             EpollHelper::new(&ctx.handler.kill_evt, &ctx.handler.pause_evt).unwrap();
844 
845         if !ctx.handler.handle_event(&mut epoll_helper, &event) {
846             panic!("handle_event() should have failed");
847         }
848     }
849 }
850