1 // Copyright 2019 Intel Corporation. All Rights Reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 // 4 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 // SPDX-License-Identifier: Apache-2.0 6 // 7 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. 8 // Use of this source code is governed by a BSD-style license that can be 9 // found in the THIRD-PARTY file. 10 11 mod csm; 12 mod device; 13 mod packet; 14 mod unix; 15 16 use std::os::unix::io::RawFd; 17 18 use packet::VsockPacket; 19 20 pub use self::device::Vsock; 21 pub use self::unix::VsockUnixBackend; 22 pub use self::unix::VsockUnixError; 23 24 mod defs { 25 26 /// Max vsock packet data/buffer size. 27 pub const MAX_PKT_BUF_SIZE: usize = 64 * 1024; 28 29 pub mod uapi { 30 31 /// Vsock packet operation IDs. 32 /// Defined in `/include/uapi/linux/virtio_vsock.h`. 33 /// 34 /// Connection request. 35 pub const VSOCK_OP_REQUEST: u16 = 1; 36 /// Connection response. 37 pub const VSOCK_OP_RESPONSE: u16 = 2; 38 /// Connection reset. 39 pub const VSOCK_OP_RST: u16 = 3; 40 /// Connection clean shutdown. 41 pub const VSOCK_OP_SHUTDOWN: u16 = 4; 42 /// Connection data (read/write). 43 pub const VSOCK_OP_RW: u16 = 5; 44 /// Flow control credit update. 45 pub const VSOCK_OP_CREDIT_UPDATE: u16 = 6; 46 /// Flow control credit update request. 47 pub const VSOCK_OP_CREDIT_REQUEST: u16 = 7; 48 49 /// Vsock packet flags. 50 /// Defined in `/include/uapi/linux/virtio_vsock.h`. 51 /// 52 /// Valid with a VSOCK_OP_SHUTDOWN packet: the packet sender will receive no more data. 53 pub const VSOCK_FLAGS_SHUTDOWN_RCV: u32 = 1; 54 /// Valid with a VSOCK_OP_SHUTDOWN packet: the packet sender will send no more data. 55 pub const VSOCK_FLAGS_SHUTDOWN_SEND: u32 = 2; 56 57 /// Vsock packet type. 58 /// Defined in `/include/uapi/linux/virtio_vsock.h`. 59 /// 60 /// Stream / connection-oriented packet (the only currently valid type). 61 pub const VSOCK_TYPE_STREAM: u16 = 1; 62 63 pub const VSOCK_HOST_CID: u64 = 2; 64 } 65 } 66 67 #[derive(Debug)] 68 pub enum VsockError { 69 /// The vsock data/buffer virtio descriptor length is smaller than expected. 70 BufDescTooSmall, 71 /// The vsock data/buffer virtio descriptor is expected, but missing. 72 BufDescMissing, 73 /// Chained GuestMemory error. 74 GuestMemory, 75 /// Bounds check failed on guest memory pointer. 76 GuestMemoryBounds, 77 /// The vsock header descriptor length is too small. 78 HdrDescTooSmall(u32), 79 /// The vsock header descriptor is expected, but missing. 80 HdrDescMissing, 81 /// The vsock header `len` field holds an invalid value. 82 InvalidPktLen(u32), 83 /// A data fetch was attempted when no data was available. 84 NoData, 85 /// A data buffer was expected for the provided packet, but it is missing. 86 PktBufMissing, 87 /// Encountered an unexpected write-only virtio descriptor. 88 UnreadableDescriptor, 89 /// Encountered an unexpected read-only virtio descriptor. 90 UnwritableDescriptor, 91 } 92 type Result<T> = std::result::Result<T, VsockError>; 93 94 #[derive(Debug)] 95 pub enum VsockEpollHandlerError { 96 /// The vsock data/buffer virtio descriptor length is smaller than expected. 97 BufDescTooSmall, 98 /// The vsock data/buffer virtio descriptor is expected, but missing. 99 BufDescMissing, 100 /// Chained GuestMemory error. 101 GuestMemory, 102 /// Bounds check failed on guest memory pointer. 103 GuestMemoryBounds, 104 /// The vsock header descriptor length is too small. 105 HdrDescTooSmall(u32), 106 /// The vsock header `len` field holds an invalid value. 107 InvalidPktLen(u32), 108 /// A data fetch was attempted when no data was available. 109 NoData, 110 /// A data buffer was expected for the provided packet, but it is missing. 111 PktBufMissing, 112 /// Encountered an unexpected write-only virtio descriptor. 113 UnreadableDescriptor, 114 /// Encountered an unexpected read-only virtio descriptor. 115 UnwritableDescriptor, 116 } 117 118 /// A passive, event-driven object, that needs to be notified whenever an epoll-able event occurs. 119 /// 120 /// An event-polling control loop will use `get_polled_fd()` and `get_polled_evset()` to query 121 /// the listener for the file descriptor and the set of events it's interested in. When such an 122 /// event occurs, the control loop will route the event to the listener via `notify()`. 123 /// 124 pub trait VsockEpollListener { 125 /// Get the file descriptor the listener needs polled. 126 fn get_polled_fd(&self) -> RawFd; 127 128 /// Get the set of events for which the listener wants to be notified. 129 fn get_polled_evset(&self) -> epoll::Events; 130 131 /// Notify the listener that one or more events have occurred. 132 fn notify(&mut self, evset: epoll::Events); 133 } 134 135 /// Trait to describe any channel that handles vsock packet traffic (sending and receiving packets) 136 /// 137 /// Since we're implementing the device model here, our responsibility is to always process the sending of 138 /// packets (i.e. the TX queue). So, any locally generated data, addressed to the driver (e.g. 139 /// a connection response or RST), will have to be queued, until we get to processing the RX queue. 140 /// 141 /// Note: `recv_pkt()` and `send_pkt()` are named analogous to `Read::read()` and `Write::write()`, 142 /// respectively. I.e. 143 /// - `recv_pkt(&mut pkt)` will read data from the channel, and place it into `pkt`; and 144 /// - `send_pkt(&pkt)` will fetch data from `pkt`, and place it into the channel. 145 pub trait VsockChannel { 146 /// Read/receive an incoming packet from the channel. 147 fn recv_pkt(&mut self, pkt: &mut VsockPacket) -> Result<()>; 148 149 /// Write/send a packet through the channel. 150 fn send_pkt(&mut self, pkt: &VsockPacket) -> Result<()>; 151 152 /// Checks whether there is pending incoming data inside the channel, meaning that a subsequent 153 /// call to `recv_pkt()` won't fail. 154 fn has_pending_rx(&self) -> bool; 155 } 156 157 /// The vsock backend, which is basically an epoll-event-driven vsock channel 158 /// 159 /// It that needs to be sendable through a mpsc channel (the latter due to how `vmm::EpollContext` works). 160 /// Currently, the only implementation we have is `crate::virtio::unix::muxer::VsockMuxer`, which 161 /// translates guest-side vsock connections to host-side Unix domain socket connections. 162 pub trait VsockBackend: VsockChannel + VsockEpollListener + Send {} 163 164 #[cfg(test)] 165 mod tests { 166 use std::os::unix::io::AsRawFd; 167 use std::path::PathBuf; 168 use std::sync::{Arc, RwLock}; 169 170 use libc::EFD_NONBLOCK; 171 use virtio_bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE}; 172 use vm_memory::{GuestAddress, GuestMemoryAtomic}; 173 use vm_virtio::queue::testing::VirtQueue as GuestQ; 174 use vmm_sys_util::eventfd::EventFd; 175 176 use super::device::{VsockEpollHandler, RX_QUEUE_EVENT, TX_QUEUE_EVENT}; 177 use super::packet::VSOCK_PKT_HDR_SIZE; 178 use super::*; 179 use crate::device::{VirtioInterrupt, VirtioInterruptType}; 180 use crate::epoll_helper::EpollHelperHandler; 181 use crate::EpollHelper; 182 use crate::GuestMemoryMmap; 183 184 pub struct NoopVirtioInterrupt {} 185 186 impl VirtioInterrupt for NoopVirtioInterrupt { 187 fn trigger( 188 &self, 189 _int_type: VirtioInterruptType, 190 ) -> std::result::Result<(), std::io::Error> { 191 Ok(()) 192 } 193 } 194 195 pub struct TestBackend { 196 pub evfd: EventFd, 197 pub rx_err: Option<VsockError>, 198 pub tx_err: Option<VsockError>, 199 pub pending_rx: bool, 200 pub rx_ok_cnt: usize, 201 pub tx_ok_cnt: usize, 202 pub evset: Option<epoll::Events>, 203 } 204 impl TestBackend { 205 pub fn new() -> Self { 206 Self { 207 evfd: EventFd::new(EFD_NONBLOCK).unwrap(), 208 rx_err: None, 209 tx_err: None, 210 pending_rx: false, 211 rx_ok_cnt: 0, 212 tx_ok_cnt: 0, 213 evset: None, 214 } 215 } 216 pub fn set_rx_err(&mut self, err: Option<VsockError>) { 217 self.rx_err = err; 218 } 219 pub fn set_tx_err(&mut self, err: Option<VsockError>) { 220 self.tx_err = err; 221 } 222 pub fn set_pending_rx(&mut self, prx: bool) { 223 self.pending_rx = prx; 224 } 225 } 226 impl VsockChannel for TestBackend { 227 fn recv_pkt(&mut self, _pkt: &mut VsockPacket) -> Result<()> { 228 match self.rx_err.take() { 229 None => { 230 self.rx_ok_cnt += 1; 231 Ok(()) 232 } 233 Some(e) => Err(e), 234 } 235 } 236 fn send_pkt(&mut self, _pkt: &VsockPacket) -> Result<()> { 237 match self.tx_err.take() { 238 None => { 239 self.tx_ok_cnt += 1; 240 Ok(()) 241 } 242 Some(e) => Err(e), 243 } 244 } 245 fn has_pending_rx(&self) -> bool { 246 self.pending_rx 247 } 248 } 249 impl VsockEpollListener for TestBackend { 250 fn get_polled_fd(&self) -> RawFd { 251 self.evfd.as_raw_fd() 252 } 253 fn get_polled_evset(&self) -> epoll::Events { 254 epoll::Events::EPOLLIN 255 } 256 fn notify(&mut self, evset: epoll::Events) { 257 self.evset = Some(evset); 258 } 259 } 260 impl VsockBackend for TestBackend {} 261 262 pub struct TestContext { 263 pub cid: u64, 264 pub mem: GuestMemoryMmap, 265 pub device: Vsock<TestBackend>, 266 } 267 268 impl TestContext { 269 pub fn new() -> Self { 270 const CID: u32 = 52; 271 const MEM_SIZE: usize = 1024 * 1024 * 128; 272 Self { 273 cid: CID as u64, 274 mem: GuestMemoryMmap::from_ranges(&[(GuestAddress(0), MEM_SIZE)]).unwrap(), 275 device: Vsock::new( 276 String::from("vsock"), 277 CID, 278 PathBuf::from("/test/sock"), 279 TestBackend::new(), 280 false, 281 seccompiler::SeccompAction::Trap, 282 EventFd::new(EFD_NONBLOCK).unwrap(), 283 None, 284 ) 285 .unwrap(), 286 } 287 } 288 289 pub fn create_epoll_handler_context(&self) -> EpollHandlerContext { 290 const QSIZE: u16 = 2; 291 292 let guest_rxvq = GuestQ::new(GuestAddress(0x0010_0000), &self.mem, QSIZE); 293 let guest_txvq = GuestQ::new(GuestAddress(0x0020_0000), &self.mem, QSIZE); 294 let guest_evvq = GuestQ::new(GuestAddress(0x0030_0000), &self.mem, QSIZE); 295 let rxvq = guest_rxvq.create_queue(); 296 let txvq = guest_txvq.create_queue(); 297 let evvq = guest_evvq.create_queue(); 298 299 // Set up one available descriptor in the RX queue. 300 guest_rxvq.dtable[0].set( 301 0x0040_0000, 302 VSOCK_PKT_HDR_SIZE as u32, 303 (VRING_DESC_F_WRITE | VRING_DESC_F_NEXT).try_into().unwrap(), 304 1, 305 ); 306 guest_rxvq.dtable[1].set(0x0040_1000, 4096, VRING_DESC_F_WRITE.try_into().unwrap(), 0); 307 guest_rxvq.avail.ring[0].set(0); 308 guest_rxvq.avail.idx.set(1); 309 310 // Set up one available descriptor in the TX queue. 311 guest_txvq.dtable[0].set( 312 0x0050_0000, 313 VSOCK_PKT_HDR_SIZE as u32, 314 VRING_DESC_F_NEXT.try_into().unwrap(), 315 1, 316 ); 317 guest_txvq.dtable[1].set(0x0050_1000, 4096, 0, 0); 318 guest_txvq.avail.ring[0].set(0); 319 guest_txvq.avail.idx.set(1); 320 321 let queues = vec![rxvq, txvq, evvq]; 322 let queue_evts = vec![ 323 EventFd::new(EFD_NONBLOCK).unwrap(), 324 EventFd::new(EFD_NONBLOCK).unwrap(), 325 EventFd::new(EFD_NONBLOCK).unwrap(), 326 ]; 327 let interrupt_cb = Arc::new(NoopVirtioInterrupt {}); 328 329 EpollHandlerContext { 330 guest_rxvq, 331 guest_txvq, 332 guest_evvq, 333 handler: VsockEpollHandler { 334 mem: GuestMemoryAtomic::new(self.mem.clone()), 335 queues, 336 queue_evts, 337 kill_evt: EventFd::new(EFD_NONBLOCK).unwrap(), 338 pause_evt: EventFd::new(EFD_NONBLOCK).unwrap(), 339 interrupt_cb, 340 backend: Arc::new(RwLock::new(TestBackend::new())), 341 access_platform: None, 342 }, 343 } 344 } 345 } 346 347 pub struct EpollHandlerContext<'a> { 348 pub handler: VsockEpollHandler<TestBackend>, 349 pub guest_rxvq: GuestQ<'a>, 350 pub guest_txvq: GuestQ<'a>, 351 pub guest_evvq: GuestQ<'a>, 352 } 353 354 impl<'a> EpollHandlerContext<'a> { 355 pub fn signal_txq_event(&mut self) { 356 self.handler.queue_evts[1].write(1).unwrap(); 357 let events = epoll::Events::EPOLLIN; 358 let event = epoll::Event::new(events, TX_QUEUE_EVENT as u64); 359 let mut epoll_helper = 360 EpollHelper::new(&self.handler.kill_evt, &self.handler.pause_evt).unwrap(); 361 self.handler.handle_event(&mut epoll_helper, &event).ok(); 362 } 363 pub fn signal_rxq_event(&mut self) { 364 self.handler.queue_evts[0].write(1).unwrap(); 365 let events = epoll::Events::EPOLLIN; 366 let event = epoll::Event::new(events, RX_QUEUE_EVENT as u64); 367 let mut epoll_helper = 368 EpollHelper::new(&self.handler.kill_evt, &self.handler.pause_evt).unwrap(); 369 self.handler.handle_event(&mut epoll_helper, &event).ok(); 370 } 371 } 372 } 373