xref: /cloud-hypervisor/virtio-devices/src/vsock/mod.rs (revision b686a5bb24f949e3b201308d69b01e85c14f1ad6)
1 // Copyright 2019 Intel Corporation. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 // SPDX-License-Identifier: Apache-2.0
6 //
7 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
8 // Use of this source code is governed by a BSD-style license that can be
9 // found in the THIRD-PARTY file.
10 
11 mod csm;
12 mod device;
13 mod packet;
14 mod unix;
15 
16 use std::os::unix::io::RawFd;
17 
18 use packet::VsockPacket;
19 
20 pub use self::device::Vsock;
21 pub use self::unix::{VsockUnixBackend, VsockUnixError};
22 
23 mod defs {
24 
25     /// Max vsock packet data/buffer size.
26     pub const MAX_PKT_BUF_SIZE: usize = 64 * 1024;
27 
28     pub mod uapi {
29 
30         /// Vsock packet operation IDs.
31         /// Defined in `/include/uapi/linux/virtio_vsock.h`.
32         ///
33         /// Connection request.
34         pub const VSOCK_OP_REQUEST: u16 = 1;
35         /// Connection response.
36         pub const VSOCK_OP_RESPONSE: u16 = 2;
37         /// Connection reset.
38         pub const VSOCK_OP_RST: u16 = 3;
39         /// Connection clean shutdown.
40         pub const VSOCK_OP_SHUTDOWN: u16 = 4;
41         /// Connection data (read/write).
42         pub const VSOCK_OP_RW: u16 = 5;
43         /// Flow control credit update.
44         pub const VSOCK_OP_CREDIT_UPDATE: u16 = 6;
45         /// Flow control credit update request.
46         pub const VSOCK_OP_CREDIT_REQUEST: u16 = 7;
47 
48         /// Vsock packet flags.
49         /// Defined in `/include/uapi/linux/virtio_vsock.h`.
50         ///
51         /// Valid with a VSOCK_OP_SHUTDOWN packet: the packet sender will receive no more data.
52         pub const VSOCK_FLAGS_SHUTDOWN_RCV: u32 = 1;
53         /// Valid with a VSOCK_OP_SHUTDOWN packet: the packet sender will send no more data.
54         pub const VSOCK_FLAGS_SHUTDOWN_SEND: u32 = 2;
55 
56         /// Vsock packet type.
57         /// Defined in `/include/uapi/linux/virtio_vsock.h`.
58         ///
59         /// Stream / connection-oriented packet (the only currently valid type).
60         pub const VSOCK_TYPE_STREAM: u16 = 1;
61 
62         pub const VSOCK_HOST_CID: u64 = 2;
63     }
64 }
65 
66 #[derive(Debug)]
67 pub enum VsockError {
68     /// The vsock data/buffer virtio descriptor length is smaller than expected.
69     BufDescTooSmall,
70     /// The vsock data/buffer virtio descriptor is expected, but missing.
71     BufDescMissing,
72     /// Chained GuestMemory error.
73     GuestMemory,
74     /// Bounds check failed on guest memory pointer.
75     GuestMemoryBounds,
76     /// The vsock header descriptor length is too small.
77     HdrDescTooSmall(u32),
78     /// The vsock header descriptor is expected, but missing.
79     HdrDescMissing,
80     /// The vsock header `len` field holds an invalid value.
81     InvalidPktLen(u32),
82     /// A data fetch was attempted when no data was available.
83     NoData,
84     /// A data buffer was expected for the provided packet, but it is missing.
85     PktBufMissing,
86     /// Encountered an unexpected write-only virtio descriptor.
87     UnreadableDescriptor,
88     /// Encountered an unexpected read-only virtio descriptor.
89     UnwritableDescriptor,
90 }
91 type Result<T> = std::result::Result<T, VsockError>;
92 
93 #[derive(Debug)]
94 pub enum VsockEpollHandlerError {
95     /// The vsock data/buffer virtio descriptor length is smaller than expected.
96     BufDescTooSmall,
97     /// The vsock data/buffer virtio descriptor is expected, but missing.
98     BufDescMissing,
99     /// Chained GuestMemory error.
100     GuestMemory,
101     /// Bounds check failed on guest memory pointer.
102     GuestMemoryBounds,
103     /// The vsock header descriptor length is too small.
104     HdrDescTooSmall(u32),
105     /// The vsock header `len` field holds an invalid value.
106     InvalidPktLen(u32),
107     /// A data fetch was attempted when no data was available.
108     NoData,
109     /// A data buffer was expected for the provided packet, but it is missing.
110     PktBufMissing,
111     /// Encountered an unexpected write-only virtio descriptor.
112     UnreadableDescriptor,
113     /// Encountered an unexpected read-only virtio descriptor.
114     UnwritableDescriptor,
115 }
116 
117 /// A passive, event-driven object, that needs to be notified whenever an epoll-able event occurs.
118 ///
119 /// An event-polling control loop will use `get_polled_fd()` and `get_polled_evset()` to query
120 /// the listener for the file descriptor and the set of events it's interested in. When such an
121 /// event occurs, the control loop will route the event to the listener via `notify()`.
122 ///
123 pub trait VsockEpollListener {
124     /// Get the file descriptor the listener needs polled.
125     fn get_polled_fd(&self) -> RawFd;
126 
127     /// Get the set of events for which the listener wants to be notified.
128     fn get_polled_evset(&self) -> epoll::Events;
129 
130     /// Notify the listener that one or more events have occurred.
131     fn notify(&mut self, evset: epoll::Events);
132 }
133 
134 /// Trait to describe any channel that handles vsock packet traffic (sending and receiving packets)
135 ///
136 /// Since we're implementing the device model here, our responsibility is to always process the sending of
137 /// packets (i.e. the TX queue). So, any locally generated data, addressed to the driver (e.g.
138 /// a connection response or RST), will have to be queued, until we get to processing the RX queue.
139 ///
140 /// Note: `recv_pkt()` and `send_pkt()` are named analogous to `Read::read()` and `Write::write()`,
141 ///       respectively. I.e.
142 ///       - `recv_pkt(&mut pkt)` will read data from the channel, and place it into `pkt`; and
143 ///       - `send_pkt(&pkt)` will fetch data from `pkt`, and place it into the channel.
144 pub trait VsockChannel {
145     /// Read/receive an incoming packet from the channel.
146     fn recv_pkt(&mut self, pkt: &mut VsockPacket) -> Result<()>;
147 
148     /// Write/send a packet through the channel.
149     fn send_pkt(&mut self, pkt: &VsockPacket) -> Result<()>;
150 
151     /// Checks whether there is pending incoming data inside the channel, meaning that a subsequent
152     /// call to `recv_pkt()` won't fail.
153     fn has_pending_rx(&self) -> bool;
154 }
155 
156 /// The vsock backend, which is basically an epoll-event-driven vsock channel
157 ///
158 /// It that needs to be sendable through a mpsc channel (the latter due to how `vmm::EpollContext` works).
159 /// Currently, the only implementation we have is `crate::virtio::unix::muxer::VsockMuxer`, which
160 /// translates guest-side vsock connections to host-side Unix domain socket connections.
161 pub trait VsockBackend: VsockChannel + VsockEpollListener + Send {}
162 
163 #[cfg(any(test, fuzzing))]
164 pub mod tests {
165     use std::os::unix::io::AsRawFd;
166     use std::path::PathBuf;
167     use std::sync::{Arc, RwLock};
168 
169     use libc::EFD_NONBLOCK;
170     use virtio_bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE};
171     use vm_memory::{GuestAddress, GuestMemoryAtomic};
172     use vm_virtio::queue::testing::VirtQueue as GuestQ;
173     use vmm_sys_util::eventfd::EventFd;
174 
175     use super::device::{VsockEpollHandler, RX_QUEUE_EVENT, TX_QUEUE_EVENT};
176     use super::packet::VSOCK_PKT_HDR_SIZE;
177     use super::*;
178     use crate::device::{VirtioInterrupt, VirtioInterruptType};
179     use crate::epoll_helper::EpollHelperHandler;
180     use crate::{EpollHelper, GuestMemoryMmap};
181 
182     pub struct NoopVirtioInterrupt {}
183 
184     impl VirtioInterrupt for NoopVirtioInterrupt {
185         fn trigger(
186             &self,
187             _int_type: VirtioInterruptType,
188         ) -> std::result::Result<(), std::io::Error> {
189             Ok(())
190         }
191     }
192 
193     pub struct TestBackend {
194         pub evfd: EventFd,
195         pub rx_err: Option<VsockError>,
196         pub tx_err: Option<VsockError>,
197         pub pending_rx: bool,
198         pub rx_ok_cnt: usize,
199         pub tx_ok_cnt: usize,
200         pub evset: Option<epoll::Events>,
201     }
202     impl TestBackend {
203         #[allow(clippy::new_without_default)]
204         pub fn new() -> Self {
205             Self {
206                 evfd: EventFd::new(EFD_NONBLOCK).unwrap(),
207                 rx_err: None,
208                 tx_err: None,
209                 pending_rx: false,
210                 rx_ok_cnt: 0,
211                 tx_ok_cnt: 0,
212                 evset: None,
213             }
214         }
215         pub fn set_rx_err(&mut self, err: Option<VsockError>) {
216             self.rx_err = err;
217         }
218         pub fn set_tx_err(&mut self, err: Option<VsockError>) {
219             self.tx_err = err;
220         }
221         pub fn set_pending_rx(&mut self, prx: bool) {
222             self.pending_rx = prx;
223         }
224     }
225     impl VsockChannel for TestBackend {
226         fn recv_pkt(&mut self, _pkt: &mut VsockPacket) -> Result<()> {
227             match self.rx_err.take() {
228                 None => {
229                     self.rx_ok_cnt += 1;
230                     Ok(())
231                 }
232                 Some(e) => Err(e),
233             }
234         }
235         fn send_pkt(&mut self, _pkt: &VsockPacket) -> Result<()> {
236             match self.tx_err.take() {
237                 None => {
238                     self.tx_ok_cnt += 1;
239                     Ok(())
240                 }
241                 Some(e) => Err(e),
242             }
243         }
244         fn has_pending_rx(&self) -> bool {
245             self.pending_rx
246         }
247     }
248     impl VsockEpollListener for TestBackend {
249         fn get_polled_fd(&self) -> RawFd {
250             self.evfd.as_raw_fd()
251         }
252         fn get_polled_evset(&self) -> epoll::Events {
253             epoll::Events::EPOLLIN
254         }
255         fn notify(&mut self, evset: epoll::Events) {
256             self.evset = Some(evset);
257         }
258     }
259     impl VsockBackend for TestBackend {}
260 
261     pub struct TestContext {
262         pub cid: u64,
263         pub mem: GuestMemoryMmap,
264         pub device: Vsock<TestBackend>,
265     }
266 
267     impl TestContext {
268         #[allow(clippy::new_without_default)]
269         pub fn new() -> Self {
270             const CID: u32 = 52;
271             const MEM_SIZE: usize = 1024 * 1024 * 128;
272             Self {
273                 cid: CID as u64,
274                 mem: GuestMemoryMmap::from_ranges(&[(GuestAddress(0), MEM_SIZE)]).unwrap(),
275                 device: Vsock::new(
276                     String::from("vsock"),
277                     CID,
278                     PathBuf::from("/test/sock"),
279                     TestBackend::new(),
280                     false,
281                     seccompiler::SeccompAction::Trap,
282                     EventFd::new(EFD_NONBLOCK).unwrap(),
283                     None,
284                 )
285                 .unwrap(),
286             }
287         }
288 
289         pub fn create_epoll_handler_context(&self) -> EpollHandlerContext {
290             const QSIZE: u16 = 2;
291 
292             let guest_rxvq = GuestQ::new(GuestAddress(0x0010_0000), &self.mem, QSIZE);
293             let guest_txvq = GuestQ::new(GuestAddress(0x0020_0000), &self.mem, QSIZE);
294             let guest_evvq = GuestQ::new(GuestAddress(0x0030_0000), &self.mem, QSIZE);
295             let rxvq = guest_rxvq.create_queue();
296             let txvq = guest_txvq.create_queue();
297             let evvq = guest_evvq.create_queue();
298 
299             // Set up one available descriptor in the RX queue.
300             guest_rxvq.dtable[0].set(
301                 0x0040_0000,
302                 VSOCK_PKT_HDR_SIZE as u32,
303                 (VRING_DESC_F_WRITE | VRING_DESC_F_NEXT).try_into().unwrap(),
304                 1,
305             );
306             guest_rxvq.dtable[1].set(0x0040_1000, 4096, VRING_DESC_F_WRITE.try_into().unwrap(), 0);
307             guest_rxvq.avail.ring[0].set(0);
308             guest_rxvq.avail.idx.set(1);
309 
310             // Set up one available descriptor in the TX queue.
311             guest_txvq.dtable[0].set(
312                 0x0050_0000,
313                 VSOCK_PKT_HDR_SIZE as u32,
314                 VRING_DESC_F_NEXT.try_into().unwrap(),
315                 1,
316             );
317             guest_txvq.dtable[1].set(0x0050_1000, 4096, 0, 0);
318             guest_txvq.avail.ring[0].set(0);
319             guest_txvq.avail.idx.set(1);
320 
321             let queues = vec![rxvq, txvq, evvq];
322             let queue_evts = vec![
323                 EventFd::new(EFD_NONBLOCK).unwrap(),
324                 EventFd::new(EFD_NONBLOCK).unwrap(),
325                 EventFd::new(EFD_NONBLOCK).unwrap(),
326             ];
327             let interrupt_cb = Arc::new(NoopVirtioInterrupt {});
328 
329             EpollHandlerContext {
330                 guest_rxvq,
331                 guest_txvq,
332                 guest_evvq,
333                 handler: VsockEpollHandler {
334                     mem: GuestMemoryAtomic::new(self.mem.clone()),
335                     queues,
336                     queue_evts,
337                     kill_evt: EventFd::new(EFD_NONBLOCK).unwrap(),
338                     pause_evt: EventFd::new(EFD_NONBLOCK).unwrap(),
339                     interrupt_cb,
340                     backend: Arc::new(RwLock::new(TestBackend::new())),
341                     access_platform: None,
342                 },
343             }
344         }
345     }
346 
347     pub struct EpollHandlerContext<'a> {
348         pub handler: VsockEpollHandler<TestBackend>,
349         pub guest_rxvq: GuestQ<'a>,
350         pub guest_txvq: GuestQ<'a>,
351         pub guest_evvq: GuestQ<'a>,
352     }
353 
354     impl EpollHandlerContext<'_> {
355         pub fn signal_txq_event(&mut self) {
356             self.handler.queue_evts[1].write(1).unwrap();
357             let events = epoll::Events::EPOLLIN;
358             let event = epoll::Event::new(events, TX_QUEUE_EVENT as u64);
359             let mut epoll_helper =
360                 EpollHelper::new(&self.handler.kill_evt, &self.handler.pause_evt).unwrap();
361             self.handler.handle_event(&mut epoll_helper, &event).ok();
362         }
363         pub fn signal_rxq_event(&mut self) {
364             self.handler.queue_evts[0].write(1).unwrap();
365             let events = epoll::Events::EPOLLIN;
366             let event = epoll::Event::new(events, RX_QUEUE_EVENT as u64);
367             let mut epoll_helper =
368                 EpollHelper::new(&self.handler.kill_evt, &self.handler.pause_evt).unwrap();
369             self.handler.handle_event(&mut epoll_helper, &event).ok();
370         }
371     }
372 }
373