xref: /cloud-hypervisor/virtio-devices/src/vsock/unix/muxer.rs (revision 7d7bfb2034001d4cb15df2ddc56d2d350c8da30f)
1 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /// `VsockMuxer` is the device-facing component of the Unix domain sockets vsock backend. I.e.
6 /// by implementing the `VsockBackend` trait, it abstracts away the gory details of translating
7 /// between AF_VSOCK and AF_UNIX, and presents a clean interface to the rest of the vsock
8 /// device model.
9 ///
10 /// The vsock muxer has two main roles:
11 /// 1. Vsock connection multiplexer:
12 ///    It's the muxer's job to create, manage, and terminate `VsockConnection` objects. The
13 ///    muxer also routes packets to their owning connections. It does so via a connection
14 ///    `HashMap`, keyed by what is basically a (host_port, guest_port) tuple.
15 ///    Vsock packet traffic needs to be inspected, in order to detect connection request
16 ///    packets (leading to the creation of a new connection), and connection reset packets
17 ///    (leading to the termination of an existing connection). All other packets, though, must
18 ///    belong to an existing connection and, as such, the muxer simply forwards them.
19 /// 2. Event dispatcher
20 ///    There are three event categories that the vsock backend is interested it:
21 ///    1. A new host-initiated connection is ready to be accepted from the listening host Unix
22 ///       socket;
23 ///    2. Data is available for reading from a newly-accepted host-initiated connection (i.e.
24 ///       the host is ready to issue a vsock connection request, informing us of the
25 ///       destination port to which it wants to connect);
26 ///    3. Some event was triggered for a connected Unix socket, that belongs to a
27 ///       `VsockConnection`.
28 ///    The muxer gets notified about all of these events, because, as a `VsockEpollListener`
29 ///    implementor, it gets to register a nested epoll FD into the main VMM epolling loop. All
30 ///    other pollable FDs are then registered under this nested epoll FD.
31 ///    To route all these events to their handlers, the muxer uses another `HashMap` object,
32 ///    mapping `RawFd`s to `EpollListener`s.
33 ///
34 use std::collections::{HashMap, HashSet};
35 use std::fs::File;
36 use std::io::{self, Read};
37 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
38 use std::os::unix::net::{UnixListener, UnixStream};
39 
40 use super::super::csm::ConnState;
41 use super::super::defs::uapi;
42 use super::super::packet::VsockPacket;
43 use super::super::{
44     Result as VsockResult, VsockBackend, VsockChannel, VsockEpollListener, VsockError,
45 };
46 use super::defs;
47 use super::muxer_killq::MuxerKillQ;
48 use super::muxer_rxq::MuxerRxQ;
49 use super::MuxerConnection;
50 use super::{Error, Result};
51 
52 /// A unique identifier of a `MuxerConnection` object. Connections are stored in a hash map,
53 /// keyed by a `ConnMapKey` object.
54 ///
55 #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
56 pub struct ConnMapKey {
57     local_port: u32,
58     peer_port: u32,
59 }
60 
61 /// A muxer RX queue item.
62 ///
63 #[derive(Clone, Copy, Debug)]
64 pub enum MuxerRx {
65     /// The packet must be fetched from the connection identified by `ConnMapKey`.
66     ConnRx(ConnMapKey),
67     /// The muxer must produce an RST packet.
68     RstPkt { local_port: u32, peer_port: u32 },
69 }
70 
71 /// An epoll listener, registered under the muxer's nested epoll FD.
72 ///
73 enum EpollListener {
74     /// The listener is a `MuxerConnection`, identified by `key`, and interested in the events
75     /// in `evset`. Since `MuxerConnection` implements `VsockEpollListener`, notifications will
76     /// be forwarded to the listener via `VsockEpollListener::notify()`.
77     Connection {
78         key: ConnMapKey,
79         evset: epoll::Events,
80     },
81     /// A listener interested in new host-initiated connections.
82     HostSock,
83     /// A listener interested in reading host "connect <port>" commands from a freshly
84     /// connected host socket.
85     LocalStream(UnixStream),
86 }
87 
88 /// The vsock connection multiplexer.
89 ///
90 pub struct VsockMuxer {
91     /// Guest CID.
92     cid: u64,
93     /// A hash map used to store the active connections.
94     conn_map: HashMap<ConnMapKey, MuxerConnection>,
95     /// A hash map used to store epoll event listeners / handlers.
96     listener_map: HashMap<RawFd, EpollListener>,
97     /// The RX queue. Items in this queue are consumed by `VsockMuxer::recv_pkt()`, and
98     /// produced
99     /// - by `VsockMuxer::send_pkt()` (e.g. RST in response to a connection request packet);
100     ///   and
101     /// - in response to EPOLLIN events (e.g. data available to be read from an AF_UNIX
102     ///   socket).
103     rxq: MuxerRxQ,
104     /// A queue used for terminating connections that are taking too long to shut down.
105     killq: MuxerKillQ,
106     /// The Unix socket, through which host-initiated connections are accepted.
107     host_sock: UnixListener,
108     /// The file system path of the host-side Unix socket. This is used to figure out the path
109     /// to Unix sockets listening on specific ports. I.e. "<this path>_<port number>".
110     host_sock_path: String,
111     /// The nested epoll File, used to register epoll listeners.
112     epoll_file: File,
113     /// A hash set used to keep track of used host-side (local) ports, in order to assign local
114     /// ports to host-initiated connections.
115     local_port_set: HashSet<u32>,
116     /// The last used host-side port.
117     local_port_last: u32,
118 }
119 
120 impl VsockChannel for VsockMuxer {
121     /// Deliver a vsock packet to the guest vsock driver.
122     ///
123     /// Returns:
124     /// - `Ok(())`: `pkt` has been successfully filled in; or
125     /// - `Err(VsockError::NoData)`: there was no available data with which to fill in the
126     ///   packet.
127     ///
128     fn recv_pkt(&mut self, pkt: &mut VsockPacket) -> VsockResult<()> {
129         // We'll look for instructions on how to build the RX packet in the RX queue. If the
130         // queue is empty, that doesn't necessarily mean we don't have any pending RX, since
131         // the queue might be out-of-sync. If that's the case, we'll attempt to sync it first,
132         // and then try to pop something out again.
133         if self.rxq.is_empty() && !self.rxq.is_synced() {
134             self.rxq = MuxerRxQ::from_conn_map(&self.conn_map);
135         }
136 
137         while let Some(rx) = self.rxq.peek() {
138             let res = match rx {
139                 // We need to build an RST packet, going from `local_port` to `peer_port`.
140                 MuxerRx::RstPkt {
141                     local_port,
142                     peer_port,
143                 } => {
144                     pkt.set_op(uapi::VSOCK_OP_RST)
145                         .set_src_cid(uapi::VSOCK_HOST_CID)
146                         .set_dst_cid(self.cid)
147                         .set_src_port(local_port)
148                         .set_dst_port(peer_port)
149                         .set_len(0)
150                         .set_type(uapi::VSOCK_TYPE_STREAM)
151                         .set_flags(0)
152                         .set_buf_alloc(0)
153                         .set_fwd_cnt(0);
154                     self.rxq.pop().unwrap();
155                     return Ok(());
156                 }
157 
158                 // We'll defer building the packet to this connection, since it has something
159                 // to say.
160                 MuxerRx::ConnRx(key) => {
161                     let mut conn_res = Err(VsockError::NoData);
162                     let mut do_pop = true;
163                     self.apply_conn_mutation(key, |conn| {
164                         conn_res = conn.recv_pkt(pkt);
165                         do_pop = !conn.has_pending_rx();
166                     });
167                     if do_pop {
168                         self.rxq.pop().unwrap();
169                     }
170                     conn_res
171                 }
172             };
173 
174             if res.is_ok() {
175                 // Inspect traffic, looking for RST packets, since that means we have to
176                 // terminate and remove this connection from the active connection pool.
177                 //
178                 if pkt.op() == uapi::VSOCK_OP_RST {
179                     self.remove_connection(ConnMapKey {
180                         local_port: pkt.src_port(),
181                         peer_port: pkt.dst_port(),
182                     });
183                 }
184 
185                 debug!("vsock muxer: RX pkt: {:?}", pkt.hdr());
186                 return Ok(());
187             }
188         }
189 
190         Err(VsockError::NoData)
191     }
192 
193     /// Deliver a guest-generated packet to its destination in the vsock backend.
194     ///
195     /// This absorbs unexpected packets, handles RSTs (by dropping connections), and forwards
196     /// all the rest to their owning `MuxerConnection`.
197     ///
198     /// Returns:
199     /// always `Ok(())` - the packet has been consumed, and its virtio TX buffers can be
200     /// returned to the guest vsock driver.
201     ///
202     fn send_pkt(&mut self, pkt: &VsockPacket) -> VsockResult<()> {
203         let conn_key = ConnMapKey {
204             local_port: pkt.dst_port(),
205             peer_port: pkt.src_port(),
206         };
207 
208         debug!(
209             "vsock: muxer.send[rxq.len={}]: {:?}",
210             self.rxq.len(),
211             pkt.hdr()
212         );
213 
214         // If this packet has an unsupported type (!=stream), we must send back an RST.
215         //
216         if pkt.type_() != uapi::VSOCK_TYPE_STREAM {
217             self.enq_rst(pkt.dst_port(), pkt.src_port());
218             return Ok(());
219         }
220 
221         // We don't know how to handle packets addressed to other CIDs. We only handle the host
222         // part of the guest - host communication here.
223         if pkt.dst_cid() != uapi::VSOCK_HOST_CID {
224             info!(
225                 "vsock: dropping guest packet for unknown CID: {:?}",
226                 pkt.hdr()
227             );
228             return Ok(());
229         }
230 
231         if !self.conn_map.contains_key(&conn_key) {
232             // This packet can't be routed to any active connection (based on its src and dst
233             // ports).  The only orphan / unroutable packets we know how to handle are
234             // connection requests.
235             if pkt.op() == uapi::VSOCK_OP_REQUEST {
236                 // Oh, this is a connection request!
237                 self.handle_peer_request_pkt(pkt);
238             } else {
239                 // Send back an RST, to let the drive know we weren't expecting this packet.
240                 self.enq_rst(pkt.dst_port(), pkt.src_port());
241             }
242             return Ok(());
243         }
244 
245         // Right, we know where to send this packet, then (to `conn_key`).
246         // However, if this is an RST, we have to forcefully terminate the connection, so
247         // there's no point in forwarding it the packet.
248         if pkt.op() == uapi::VSOCK_OP_RST {
249             self.remove_connection(conn_key);
250             return Ok(());
251         }
252 
253         // Alright, everything looks in order - forward this packet to its owning connection.
254         let mut res: VsockResult<()> = Ok(());
255         self.apply_conn_mutation(conn_key, |conn| {
256             res = conn.send_pkt(pkt);
257         });
258 
259         res
260     }
261 
262     /// Check if the muxer has any pending RX data, with which to fill a guest-provided RX
263     /// buffer.
264     ///
265     fn has_pending_rx(&self) -> bool {
266         !self.rxq.is_empty() || !self.rxq.is_synced()
267     }
268 }
269 
270 impl VsockEpollListener for VsockMuxer {
271     /// Get the FD to be registered for polling upstream (in the main VMM epoll loop, in this
272     /// case).
273     ///
274     /// This will be the muxer's nested epoll FD.
275     ///
276     fn get_polled_fd(&self) -> RawFd {
277         self.epoll_file.as_raw_fd()
278     }
279 
280     /// Get the epoll events to be polled upstream.
281     ///
282     /// Since the polled FD is a nested epoll FD, we're only interested in EPOLLIN events (i.e.
283     /// some event occurred on one of the FDs registered under our epoll FD).
284     ///
285     fn get_polled_evset(&self) -> epoll::Events {
286         epoll::Events::EPOLLIN
287     }
288 
289     /// Notify the muxer about a pending event having occurred under its nested epoll FD.
290     ///
291     fn notify(&mut self, _: epoll::Events) {
292         debug!("vsock: muxer received kick");
293 
294         let mut epoll_events = vec![epoll::Event::new(epoll::Events::empty(), 0); 32];
295         'epoll: loop {
296             match epoll::wait(self.epoll_file.as_raw_fd(), 0, epoll_events.as_mut_slice()) {
297                 Ok(ev_cnt) => {
298                     for evt in epoll_events.iter().take(ev_cnt) {
299                         self.handle_event(
300                             evt.data as RawFd,
301                             // It's ok to unwrap here, since the `evt.events` is filled
302                             // in by `epoll::wait()`, and therefore contains only valid epoll
303                             // flags.
304                             epoll::Events::from_bits(evt.events).unwrap(),
305                         );
306                     }
307                 }
308                 Err(e) => {
309                     if e.kind() == io::ErrorKind::Interrupted {
310                         // It's well defined from the epoll_wait() syscall
311                         // documentation that the epoll loop can be interrupted
312                         // before any of the requested events occurred or the
313                         // timeout expired. In both those cases, epoll_wait()
314                         // returns an error of type EINTR, but this should not
315                         // be considered as a regular error. Instead it is more
316                         // appropriate to retry, by calling into epoll_wait().
317                         continue;
318                     }
319                     warn!("vsock: failed to consume muxer epoll event: {}", e);
320                 }
321             }
322             break 'epoll;
323         }
324     }
325 }
326 
327 impl VsockBackend for VsockMuxer {}
328 
329 impl VsockMuxer {
330     /// Muxer constructor.
331     ///
332     pub fn new(cid: u64, host_sock_path: String) -> Result<Self> {
333         // Create the nested epoll FD. This FD will be added to the VMM `EpollContext`, at
334         // device activation time.
335         let epoll_fd = epoll::create(true).map_err(Error::EpollFdCreate)?;
336         // Use 'File' to enforce closing on 'epoll_fd'
337         let epoll_file = unsafe { File::from_raw_fd(epoll_fd) };
338 
339         // Open/bind/listen on the host Unix socket, so we can accept host-initiated
340         // connections.
341         let host_sock = UnixListener::bind(&host_sock_path)
342             .and_then(|sock| sock.set_nonblocking(true).map(|_| sock))
343             .map_err(Error::UnixBind)?;
344 
345         let mut muxer = Self {
346             cid,
347             host_sock,
348             host_sock_path,
349             epoll_file,
350             rxq: MuxerRxQ::new(),
351             conn_map: HashMap::with_capacity(defs::MAX_CONNECTIONS),
352             listener_map: HashMap::with_capacity(defs::MAX_CONNECTIONS + 1),
353             killq: MuxerKillQ::new(),
354             local_port_last: (1u32 << 30) - 1,
355             local_port_set: HashSet::with_capacity(defs::MAX_CONNECTIONS),
356         };
357 
358         muxer.add_listener(muxer.host_sock.as_raw_fd(), EpollListener::HostSock)?;
359         Ok(muxer)
360     }
361 
362     /// Handle/dispatch an epoll event to its listener.
363     ///
364     fn handle_event(&mut self, fd: RawFd, event_set: epoll::Events) {
365         debug!(
366             "vsock: muxer processing event: fd={}, event_set={:?}",
367             fd, event_set
368         );
369 
370         match self.listener_map.get_mut(&fd) {
371             // This event needs to be forwarded to a `MuxerConnection` that is listening for
372             // it.
373             //
374             Some(EpollListener::Connection { key, evset: _ }) => {
375                 let key_copy = *key;
376                 // The handling of this event will most probably mutate the state of the
377                 // receiving connection. We'll need to check for new pending RX, event set
378                 // mutation, and all that, so we're wrapping the event delivery inside those
379                 // checks.
380                 self.apply_conn_mutation(key_copy, |conn| {
381                     conn.notify(event_set);
382                 });
383             }
384 
385             // A new host-initiated connection is ready to be accepted.
386             //
387             Some(EpollListener::HostSock) => {
388                 if self.conn_map.len() == defs::MAX_CONNECTIONS {
389                     // If we're already maxed-out on connections, we'll just accept and
390                     // immediately discard this potentially new one.
391                     warn!("vsock: connection limit reached; refusing new host connection");
392                     self.host_sock.accept().map(|_| 0).unwrap_or(0);
393                     return;
394                 }
395                 self.host_sock
396                     .accept()
397                     .map_err(Error::UnixAccept)
398                     .and_then(|(stream, _)| {
399                         stream
400                             .set_nonblocking(true)
401                             .map(|_| stream)
402                             .map_err(Error::UnixAccept)
403                     })
404                     .and_then(|stream| {
405                         // Before forwarding this connection to a listening AF_VSOCK socket on
406                         // the guest side, we need to know the destination port. We'll read
407                         // that port from a "connect" command received on this socket, so the
408                         // next step is to ask to be notified the moment we can read from it.
409                         self.add_listener(stream.as_raw_fd(), EpollListener::LocalStream(stream))
410                     })
411                     .unwrap_or_else(|err| {
412                         warn!("vsock: unable to accept local connection: {:?}", err);
413                     });
414             }
415 
416             // Data is ready to be read from a host-initiated connection. That would be the
417             // "connect" command that we're expecting.
418             Some(EpollListener::LocalStream(_)) => {
419                 if let Some(EpollListener::LocalStream(mut stream)) = self.remove_listener(fd) {
420                     Self::read_local_stream_port(&mut stream)
421                         .map(|peer_port| (self.allocate_local_port(), peer_port))
422                         .and_then(|(local_port, peer_port)| {
423                             self.add_connection(
424                                 ConnMapKey {
425                                     local_port,
426                                     peer_port,
427                                 },
428                                 MuxerConnection::new_local_init(
429                                     stream,
430                                     uapi::VSOCK_HOST_CID,
431                                     self.cid,
432                                     local_port,
433                                     peer_port,
434                                 ),
435                             )
436                         })
437                         .unwrap_or_else(|err| {
438                             info!("vsock: error adding local-init connection: {:?}", err);
439                         })
440                 }
441             }
442 
443             _ => {
444                 info!(
445                     "vsock: unexpected event: fd={:?}, event_set={:?}",
446                     fd, event_set
447                 );
448             }
449         }
450     }
451 
452     /// Parse a host "connect" command, and extract the destination vsock port.
453     ///
454     fn read_local_stream_port(stream: &mut UnixStream) -> Result<u32> {
455         let mut buf = [0u8; 32];
456 
457         // This is the minimum number of bytes that we should be able to read, when parsing a
458         // valid connection request. I.e. `b"connect 0\n".len()`.
459         const MIN_READ_LEN: usize = 10;
460 
461         // Bring in the minimum number of bytes that we should be able to read.
462         stream
463             .read_exact(&mut buf[..MIN_READ_LEN])
464             .map_err(Error::UnixRead)?;
465 
466         // Now, finish reading the destination port number, by bringing in one byte at a time,
467         // until we reach an EOL terminator (or our buffer space runs out).  Yeah, not
468         // particularly proud of this approach, but it will have to do for now.
469         let mut blen = MIN_READ_LEN;
470         while buf[blen - 1] != b'\n' && blen < buf.len() {
471             stream
472                 .read_exact(&mut buf[blen..=blen])
473                 .map_err(Error::UnixRead)?;
474             blen += 1;
475         }
476 
477         let mut word_iter = std::str::from_utf8(&buf[..blen])
478             .map_err(Error::ConvertFromUtf8)?
479             .split_whitespace();
480 
481         word_iter
482             .next()
483             .ok_or(Error::InvalidPortRequest)
484             .and_then(|word| {
485                 if word.to_lowercase() == "connect" {
486                     Ok(())
487                 } else {
488                     Err(Error::InvalidPortRequest)
489                 }
490             })
491             .and_then(|_| word_iter.next().ok_or(Error::InvalidPortRequest))
492             .and_then(|word| word.parse::<u32>().map_err(Error::ParseInteger))
493             .map_err(|e| Error::ReadStreamPort(Box::new(e)))
494     }
495 
496     /// Add a new connection to the active connection pool.
497     ///
498     fn add_connection(&mut self, key: ConnMapKey, conn: MuxerConnection) -> Result<()> {
499         // We might need to make room for this new connection, so let's sweep the kill queue
500         // first.  It's fine to do this here because:
501         // - unless the kill queue is out of sync, this is a pretty inexpensive operation; and
502         // - we are under no pressure to respect any accurate timing for connection
503         //   termination.
504         self.sweep_killq();
505 
506         if self.conn_map.len() >= defs::MAX_CONNECTIONS {
507             info!(
508                 "vsock: muxer connection limit reached ({})",
509                 defs::MAX_CONNECTIONS
510             );
511             return Err(Error::TooManyConnections);
512         }
513 
514         self.add_listener(
515             conn.get_polled_fd(),
516             EpollListener::Connection {
517                 key,
518                 evset: conn.get_polled_evset(),
519             },
520         )
521         .map(|_| {
522             if conn.has_pending_rx() {
523                 // We can safely ignore any error in adding a connection RX indication. Worst
524                 // case scenario, the RX queue will get desynchronized, but we'll handle that
525                 // the next time we need to yield an RX packet.
526                 self.rxq.push(MuxerRx::ConnRx(key));
527             }
528             self.conn_map.insert(key, conn);
529         })
530     }
531 
532     /// Remove a connection from the active connection poll.
533     ///
534     fn remove_connection(&mut self, key: ConnMapKey) {
535         if let Some(conn) = self.conn_map.remove(&key) {
536             self.remove_listener(conn.get_polled_fd());
537         }
538         self.free_local_port(key.local_port);
539     }
540 
541     /// Schedule a connection for immediate termination.
542     /// I.e. as soon as we can also let our peer know we're dropping the connection, by sending
543     /// it an RST packet.
544     ///
545     fn kill_connection(&mut self, key: ConnMapKey) {
546         let mut had_rx = false;
547         self.conn_map.entry(key).and_modify(|conn| {
548             had_rx = conn.has_pending_rx();
549             conn.kill();
550         });
551         // This connection will now have an RST packet to yield, so we need to add it to the RX
552         // queue.  However, there's no point in doing that if it was already in the queue.
553         if !had_rx {
554             // We can safely ignore any error in adding a connection RX indication. Worst case
555             // scenario, the RX queue will get desynchronized, but we'll handle that the next
556             // time we need to yield an RX packet.
557             self.rxq.push(MuxerRx::ConnRx(key));
558         }
559     }
560 
561     /// Register a new epoll listener under the muxer's nested epoll FD.
562     ///
563     fn add_listener(&mut self, fd: RawFd, listener: EpollListener) -> Result<()> {
564         let evset = match listener {
565             EpollListener::Connection { evset, .. } => evset,
566             EpollListener::LocalStream(_) => epoll::Events::EPOLLIN,
567             EpollListener::HostSock => epoll::Events::EPOLLIN,
568         };
569 
570         epoll::ctl(
571             self.epoll_file.as_raw_fd(),
572             epoll::ControlOptions::EPOLL_CTL_ADD,
573             fd,
574             epoll::Event::new(evset, fd as u64),
575         )
576         .map(|_| {
577             self.listener_map.insert(fd, listener);
578         })
579         .map_err(Error::EpollAdd)?;
580 
581         Ok(())
582     }
583 
584     /// Remove (and return) a previously registered epoll listener.
585     ///
586     fn remove_listener(&mut self, fd: RawFd) -> Option<EpollListener> {
587         let maybe_listener = self.listener_map.remove(&fd);
588 
589         if maybe_listener.is_some() {
590             epoll::ctl(
591                 self.epoll_file.as_raw_fd(),
592                 epoll::ControlOptions::EPOLL_CTL_DEL,
593                 fd,
594                 epoll::Event::new(epoll::Events::empty(), 0),
595             )
596             .unwrap_or_else(|err| {
597                 warn!(
598                     "vosck muxer: error removing epoll listener for fd {:?}: {:?}",
599                     fd, err
600                 );
601             });
602         }
603 
604         maybe_listener
605     }
606 
607     /// Allocate a host-side port to be assigned to a new host-initiated connection.
608     ///
609     ///
610     fn allocate_local_port(&mut self) -> u32 {
611         // TODO: this doesn't seem very space-efficient.
612         // Maybe rewrite this to limit port range and use a bitmap?
613         //
614 
615         loop {
616             self.local_port_last = (self.local_port_last + 1) & !(1 << 31) | (1 << 30);
617             if self.local_port_set.insert(self.local_port_last) {
618                 break;
619             }
620         }
621         self.local_port_last
622     }
623 
624     /// Mark a previously used host-side port as free.
625     ///
626     fn free_local_port(&mut self, port: u32) {
627         self.local_port_set.remove(&port);
628     }
629 
630     /// Handle a new connection request coming from our peer (the guest vsock driver).
631     ///
632     /// This will attempt to connect to a host-side Unix socket, expected to be listening at
633     /// the file system path corresponding to the destination port. If successful, a new
634     /// connection object will be created and added to the connection pool. On failure, a new
635     /// RST packet will be scheduled for delivery to the guest.
636     ///
637     fn handle_peer_request_pkt(&mut self, pkt: &VsockPacket) {
638         let port_path = format!("{}_{}", self.host_sock_path, pkt.dst_port());
639 
640         UnixStream::connect(port_path)
641             .and_then(|stream| stream.set_nonblocking(true).map(|_| stream))
642             .map_err(Error::UnixConnect)
643             .and_then(|stream| {
644                 self.add_connection(
645                     ConnMapKey {
646                         local_port: pkt.dst_port(),
647                         peer_port: pkt.src_port(),
648                     },
649                     MuxerConnection::new_peer_init(
650                         stream,
651                         uapi::VSOCK_HOST_CID,
652                         self.cid,
653                         pkt.dst_port(),
654                         pkt.src_port(),
655                         pkt.buf_alloc(),
656                     ),
657                 )
658             })
659             .unwrap_or_else(|_| self.enq_rst(pkt.dst_port(), pkt.src_port()));
660     }
661 
662     /// Perform an action that might mutate a connection's state.
663     ///
664     /// This is used as shorthand for repetitive tasks that need to be performed after a
665     /// connection object mutates. E.g.
666     /// - update the connection's epoll listener;
667     /// - schedule the connection to be queried for RX data;
668     /// - kill the connection if an unrecoverable error occurs.
669     ///
670     fn apply_conn_mutation<F>(&mut self, key: ConnMapKey, mut_fn: F)
671     where
672         F: FnOnce(&mut MuxerConnection),
673     {
674         if let Some(conn) = self.conn_map.get_mut(&key) {
675             let had_rx = conn.has_pending_rx();
676             let was_expiring = conn.will_expire();
677             let prev_state = conn.state();
678 
679             mut_fn(conn);
680 
681             // If this is a host-initiated connection that has just become established, we'll have
682             // to send an ack message to the host end.
683             if prev_state == ConnState::LocalInit && conn.state() == ConnState::Established {
684                 let msg = format!("OK {}\n", key.local_port);
685                 match conn.send_bytes_raw(msg.as_bytes()) {
686                     Ok(written) if written == msg.len() => (),
687                     Ok(_) => {
688                         // If we can't write a dozen bytes to a pristine connection something
689                         // must be really wrong. Killing it.
690                         conn.kill();
691                         warn!("vsock: unable to fully write connection ack msg.");
692                     }
693                     Err(err) => {
694                         conn.kill();
695                         warn!("vsock: unable to ack host connection: {:?}", err);
696                     }
697                 };
698             }
699 
700             // If the connection wasn't previously scheduled for RX, add it to our RX queue.
701             if !had_rx && conn.has_pending_rx() {
702                 self.rxq.push(MuxerRx::ConnRx(key));
703             }
704 
705             // If the connection wasn't previously scheduled for termination, add it to the
706             // kill queue.
707             if !was_expiring && conn.will_expire() {
708                 // It's safe to unwrap here, since `conn.will_expire()` already guaranteed that
709                 // an `conn.expiry` is available.
710                 self.killq.push(key, conn.expiry().unwrap());
711             }
712 
713             let fd = conn.get_polled_fd();
714             let new_evset = conn.get_polled_evset();
715             if new_evset.is_empty() {
716                 // If the connection no longer needs epoll notifications, remove its listener
717                 // from our list.
718                 self.remove_listener(fd);
719                 return;
720             }
721             if let Some(EpollListener::Connection { evset, .. }) = self.listener_map.get_mut(&fd) {
722                 if *evset != new_evset {
723                     // If the set of events that the connection is interested in has changed,
724                     // we need to update its epoll listener.
725                     debug!(
726                         "vsock: updating listener for (lp={}, pp={}): old={:?}, new={:?}",
727                         key.local_port, key.peer_port, *evset, new_evset
728                     );
729 
730                     *evset = new_evset;
731                     epoll::ctl(
732                         self.epoll_file.as_raw_fd(),
733                         epoll::ControlOptions::EPOLL_CTL_MOD,
734                         fd,
735                         epoll::Event::new(new_evset, fd as u64),
736                     )
737                     .unwrap_or_else(|err| {
738                         // This really shouldn't happen, like, ever. However, "famous last
739                         // words" and all that, so let's just kill it with fire, and walk away.
740                         self.kill_connection(key);
741                         error!(
742                             "vsock: error updating epoll listener for (lp={}, pp={}): {:?}",
743                             key.local_port, key.peer_port, err
744                         );
745                     });
746                 }
747             } else {
748                 // The connection had previously asked to be removed from the listener map (by
749                 // returning an empty event set via `get_polled_fd()`), but now wants back in.
750                 self.add_listener(
751                     fd,
752                     EpollListener::Connection {
753                         key,
754                         evset: new_evset,
755                     },
756                 )
757                 .unwrap_or_else(|err| {
758                     self.kill_connection(key);
759                     error!(
760                         "vsock: error updating epoll listener for (lp={}, pp={}): {:?}",
761                         key.local_port, key.peer_port, err
762                     );
763                 });
764             }
765         }
766     }
767 
768     /// Check if any connections have timed out, and if so, schedule them for immediate
769     /// termination.
770     ///
771     fn sweep_killq(&mut self) {
772         while let Some(key) = self.killq.pop() {
773             // Connections don't get removed from the kill queue when their kill timer is
774             // disarmed, since that would be a costly operation. This means we must check if
775             // the connection has indeed expired, prior to killing it.
776             let mut kill = false;
777             self.conn_map
778                 .entry(key)
779                 .and_modify(|conn| kill = conn.has_expired());
780             if kill {
781                 self.kill_connection(key);
782             }
783         }
784 
785         if self.killq.is_empty() && !self.killq.is_synced() {
786             self.killq = MuxerKillQ::from_conn_map(&self.conn_map);
787             // If we've just re-created the kill queue, we can sweep it again; maybe there's
788             // more to kill.
789             self.sweep_killq();
790         }
791     }
792 
793     /// Enqueue an RST packet into `self.rxq`.
794     ///
795     /// Enqueue errors aren't propagated up the call chain, since there is nothing we can do to
796     /// handle them. We do, however, log a warning, since not being able to enqueue an RST
797     /// packet means we have to drop it, which is not normal operation.
798     ///
799     fn enq_rst(&mut self, local_port: u32, peer_port: u32) {
800         let pushed = self.rxq.push(MuxerRx::RstPkt {
801             local_port,
802             peer_port,
803         });
804         if !pushed {
805             warn!(
806                 "vsock: muxer.rxq full; dropping RST packet for lp={}, pp={}",
807                 local_port, peer_port
808             );
809         }
810     }
811 }
812 
813 #[cfg(test)]
814 mod tests {
815     use std::io::{Read, Write};
816     use std::ops::Drop;
817     use std::os::unix::net::{UnixListener, UnixStream};
818     use std::path::{Path, PathBuf};
819 
820     use super::super::super::csm::defs as csm_defs;
821     use super::super::super::tests::TestContext as VsockTestContext;
822     use super::*;
823 
824     const PEER_CID: u64 = 3;
825     const PEER_BUF_ALLOC: u32 = 64 * 1024;
826 
827     struct MuxerTestContext {
828         _vsock_test_ctx: VsockTestContext,
829         pkt: VsockPacket,
830         muxer: VsockMuxer,
831     }
832 
833     impl Drop for MuxerTestContext {
834         fn drop(&mut self) {
835             std::fs::remove_file(self.muxer.host_sock_path.as_str()).unwrap();
836         }
837     }
838 
839     impl MuxerTestContext {
840         fn new(name: &str) -> Self {
841             let vsock_test_ctx = VsockTestContext::new();
842             let mut handler_ctx = vsock_test_ctx.create_epoll_handler_context();
843             let pkt = VsockPacket::from_rx_virtq_head(
844                 &mut handler_ctx.handler.queues[0]
845                     .iter()
846                     .unwrap()
847                     .next()
848                     .unwrap(),
849                 None,
850             )
851             .unwrap();
852             let uds_path = format!("test_vsock_{}.sock", name);
853             let muxer = VsockMuxer::new(PEER_CID, uds_path).unwrap();
854 
855             Self {
856                 _vsock_test_ctx: vsock_test_ctx,
857                 pkt,
858                 muxer,
859             }
860         }
861 
862         fn init_pkt(&mut self, local_port: u32, peer_port: u32, op: u16) -> &mut VsockPacket {
863             for b in self.pkt.hdr_mut() {
864                 *b = 0;
865             }
866             self.pkt
867                 .set_type(uapi::VSOCK_TYPE_STREAM)
868                 .set_src_cid(PEER_CID)
869                 .set_dst_cid(uapi::VSOCK_HOST_CID)
870                 .set_src_port(peer_port)
871                 .set_dst_port(local_port)
872                 .set_op(op)
873                 .set_buf_alloc(PEER_BUF_ALLOC)
874         }
875 
876         fn init_data_pkt(
877             &mut self,
878             local_port: u32,
879             peer_port: u32,
880             data: &[u8],
881         ) -> &mut VsockPacket {
882             assert!(data.len() <= self.pkt.buf().unwrap().len() as usize);
883             self.init_pkt(local_port, peer_port, uapi::VSOCK_OP_RW)
884                 .set_len(data.len() as u32);
885             self.pkt.buf_mut().unwrap()[..data.len()].copy_from_slice(data);
886             &mut self.pkt
887         }
888 
889         fn send(&mut self) {
890             self.muxer.send_pkt(&self.pkt).unwrap();
891         }
892 
893         fn recv(&mut self) {
894             self.muxer.recv_pkt(&mut self.pkt).unwrap();
895         }
896 
897         fn notify_muxer(&mut self) {
898             self.muxer.notify(epoll::Events::EPOLLIN);
899         }
900 
901         fn count_epoll_listeners(&self) -> (usize, usize) {
902             let mut local_lsn_count = 0usize;
903             let mut conn_lsn_count = 0usize;
904             for key in self.muxer.listener_map.values() {
905                 match key {
906                     EpollListener::LocalStream(_) => local_lsn_count += 1,
907                     EpollListener::Connection { .. } => conn_lsn_count += 1,
908                     _ => (),
909                 };
910             }
911             (local_lsn_count, conn_lsn_count)
912         }
913 
914         fn create_local_listener(&self, port: u32) -> LocalListener {
915             LocalListener::new(format!("{}_{}", self.muxer.host_sock_path, port))
916         }
917 
918         fn local_connect(&mut self, peer_port: u32) -> (UnixStream, u32) {
919             let (init_local_lsn_count, init_conn_lsn_count) = self.count_epoll_listeners();
920 
921             let mut stream = UnixStream::connect(self.muxer.host_sock_path.clone()).unwrap();
922             stream.set_nonblocking(true).unwrap();
923             // The muxer would now get notified of a new connection having arrived at its Unix
924             // socket, so it can accept it.
925             self.notify_muxer();
926 
927             // Just after having accepted a new local connection, the muxer should've added a new
928             // `LocalStream` listener to its `listener_map`.
929             let (local_lsn_count, _) = self.count_epoll_listeners();
930             assert_eq!(local_lsn_count, init_local_lsn_count + 1);
931 
932             let buf = format!("CONNECT {}\n", peer_port);
933             stream.write_all(buf.as_bytes()).unwrap();
934             // The muxer would now get notified that data is available for reading from the locally
935             // initiated connection.
936             self.notify_muxer();
937 
938             // Successfully reading and parsing the connection request should have removed the
939             // LocalStream epoll listener and added a Connection epoll listener.
940             let (local_lsn_count, conn_lsn_count) = self.count_epoll_listeners();
941             assert_eq!(local_lsn_count, init_local_lsn_count);
942             assert_eq!(conn_lsn_count, init_conn_lsn_count + 1);
943 
944             // A LocalInit connection should've been added to the muxer connection map.  A new
945             // local port should also have been allocated for the new LocalInit connection.
946             let local_port = self.muxer.local_port_last;
947             let key = ConnMapKey {
948                 local_port,
949                 peer_port,
950             };
951             assert!(self.muxer.conn_map.contains_key(&key));
952             assert!(self.muxer.local_port_set.contains(&local_port));
953 
954             // A connection request for the peer should now be available from the muxer.
955             assert!(self.muxer.has_pending_rx());
956             self.recv();
957             assert_eq!(self.pkt.op(), uapi::VSOCK_OP_REQUEST);
958             assert_eq!(self.pkt.dst_port(), peer_port);
959             assert_eq!(self.pkt.src_port(), local_port);
960 
961             self.init_pkt(local_port, peer_port, uapi::VSOCK_OP_RESPONSE);
962             self.send();
963 
964             let mut buf = vec![0u8; 32];
965             let len = stream.read(&mut buf[..]).unwrap();
966             assert_eq!(&buf[..len], format!("OK {}\n", local_port).as_bytes());
967 
968             (stream, local_port)
969         }
970     }
971 
972     struct LocalListener {
973         path: PathBuf,
974         sock: UnixListener,
975     }
976     impl LocalListener {
977         fn new<P: AsRef<Path> + Clone>(path: P) -> Self {
978             let path_buf = path.as_ref().to_path_buf();
979             let sock = UnixListener::bind(path).unwrap();
980             sock.set_nonblocking(true).unwrap();
981             Self {
982                 path: path_buf,
983                 sock,
984             }
985         }
986         fn accept(&mut self) -> UnixStream {
987             let (stream, _) = self.sock.accept().unwrap();
988             stream.set_nonblocking(true).unwrap();
989             stream
990         }
991     }
992     impl Drop for LocalListener {
993         fn drop(&mut self) {
994             std::fs::remove_file(&self.path).unwrap();
995         }
996     }
997 
998     #[test]
999     fn test_muxer_epoll_listener() {
1000         let ctx = MuxerTestContext::new("muxer_epoll_listener");
1001         assert_eq!(ctx.muxer.get_polled_fd(), ctx.muxer.epoll_file.as_raw_fd());
1002         assert_eq!(ctx.muxer.get_polled_evset(), epoll::Events::EPOLLIN);
1003     }
1004 
1005     #[test]
1006     fn test_bad_peer_pkt() {
1007         const LOCAL_PORT: u32 = 1026;
1008         const PEER_PORT: u32 = 1025;
1009         const SOCK_DGRAM: u16 = 2;
1010 
1011         let mut ctx = MuxerTestContext::new("bad_peer_pkt");
1012         ctx.init_pkt(LOCAL_PORT, PEER_PORT, uapi::VSOCK_OP_REQUEST)
1013             .set_type(SOCK_DGRAM);
1014         ctx.send();
1015 
1016         // The guest sent a SOCK_DGRAM packet. Per the vsock spec, we need to reply with an RST
1017         // packet, since vsock only supports stream sockets.
1018         assert!(ctx.muxer.has_pending_rx());
1019         ctx.recv();
1020         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
1021         assert_eq!(ctx.pkt.src_cid(), uapi::VSOCK_HOST_CID);
1022         assert_eq!(ctx.pkt.dst_cid(), PEER_CID);
1023         assert_eq!(ctx.pkt.src_port(), LOCAL_PORT);
1024         assert_eq!(ctx.pkt.dst_port(), PEER_PORT);
1025 
1026         // Any orphan (i.e. without a connection), non-RST packet, should be replied to with an
1027         // RST.
1028         let bad_ops = [
1029             uapi::VSOCK_OP_RESPONSE,
1030             uapi::VSOCK_OP_CREDIT_REQUEST,
1031             uapi::VSOCK_OP_CREDIT_UPDATE,
1032             uapi::VSOCK_OP_SHUTDOWN,
1033             uapi::VSOCK_OP_RW,
1034         ];
1035         for op in bad_ops.iter() {
1036             ctx.init_pkt(LOCAL_PORT, PEER_PORT, *op);
1037             ctx.send();
1038             assert!(ctx.muxer.has_pending_rx());
1039             ctx.recv();
1040             assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
1041             assert_eq!(ctx.pkt.src_port(), LOCAL_PORT);
1042             assert_eq!(ctx.pkt.dst_port(), PEER_PORT);
1043         }
1044 
1045         // Any packet addressed to anything other than VSOCK_VHOST_CID should get dropped.
1046         assert!(!ctx.muxer.has_pending_rx());
1047         ctx.init_pkt(LOCAL_PORT, PEER_PORT, uapi::VSOCK_OP_REQUEST)
1048             .set_dst_cid(uapi::VSOCK_HOST_CID + 1);
1049         ctx.send();
1050         assert!(!ctx.muxer.has_pending_rx());
1051     }
1052 
1053     #[test]
1054     fn test_peer_connection() {
1055         const LOCAL_PORT: u32 = 1026;
1056         const PEER_PORT: u32 = 1025;
1057 
1058         let mut ctx = MuxerTestContext::new("peer_connection");
1059 
1060         // Test peer connection refused.
1061         ctx.init_pkt(LOCAL_PORT, PEER_PORT, uapi::VSOCK_OP_REQUEST);
1062         ctx.send();
1063         ctx.recv();
1064         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
1065         assert_eq!(ctx.pkt.len(), 0);
1066         assert_eq!(ctx.pkt.src_cid(), uapi::VSOCK_HOST_CID);
1067         assert_eq!(ctx.pkt.dst_cid(), PEER_CID);
1068         assert_eq!(ctx.pkt.src_port(), LOCAL_PORT);
1069         assert_eq!(ctx.pkt.dst_port(), PEER_PORT);
1070 
1071         // Test peer connection accepted.
1072         let mut listener = ctx.create_local_listener(LOCAL_PORT);
1073         ctx.init_pkt(LOCAL_PORT, PEER_PORT, uapi::VSOCK_OP_REQUEST);
1074         ctx.send();
1075         assert_eq!(ctx.muxer.conn_map.len(), 1);
1076         let mut stream = listener.accept();
1077         ctx.recv();
1078         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RESPONSE);
1079         assert_eq!(ctx.pkt.len(), 0);
1080         assert_eq!(ctx.pkt.src_cid(), uapi::VSOCK_HOST_CID);
1081         assert_eq!(ctx.pkt.dst_cid(), PEER_CID);
1082         assert_eq!(ctx.pkt.src_port(), LOCAL_PORT);
1083         assert_eq!(ctx.pkt.dst_port(), PEER_PORT);
1084         let key = ConnMapKey {
1085             local_port: LOCAL_PORT,
1086             peer_port: PEER_PORT,
1087         };
1088         assert!(ctx.muxer.conn_map.contains_key(&key));
1089 
1090         // Test guest -> host data flow.
1091         let data = [1, 2, 3, 4];
1092         ctx.init_data_pkt(LOCAL_PORT, PEER_PORT, &data);
1093         ctx.send();
1094         let mut buf = vec![0; data.len()];
1095         stream.read_exact(buf.as_mut_slice()).unwrap();
1096         assert_eq!(buf.as_slice(), data);
1097 
1098         // Test host -> guest data flow.
1099         let data = [5u8, 6, 7, 8];
1100         stream.write_all(&data).unwrap();
1101 
1102         // When data is available on the local stream, an EPOLLIN event would normally be delivered
1103         // to the muxer's nested epoll FD. For testing only, we can fake that event notification
1104         // here.
1105         ctx.notify_muxer();
1106         // After being notified, the muxer should've figured out that RX data was available for one
1107         // of its connections, so it should now be reporting that it can fill in an RX packet.
1108         assert!(ctx.muxer.has_pending_rx());
1109         ctx.recv();
1110         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RW);
1111         assert_eq!(ctx.pkt.buf().unwrap()[..data.len()], data);
1112         assert_eq!(ctx.pkt.src_port(), LOCAL_PORT);
1113         assert_eq!(ctx.pkt.dst_port(), PEER_PORT);
1114 
1115         assert!(!ctx.muxer.has_pending_rx());
1116     }
1117 
1118     #[test]
1119     fn test_local_connection() {
1120         let mut ctx = MuxerTestContext::new("local_connection");
1121         let peer_port = 1025;
1122         let (mut stream, local_port) = ctx.local_connect(peer_port);
1123 
1124         // Test guest -> host data flow.
1125         let data = [1, 2, 3, 4];
1126         ctx.init_data_pkt(local_port, peer_port, &data);
1127         ctx.send();
1128 
1129         let mut buf = vec![0u8; data.len()];
1130         stream.read_exact(buf.as_mut_slice()).unwrap();
1131         assert_eq!(buf.as_slice(), &data);
1132 
1133         // Test host -> guest data flow.
1134         let data = [5, 6, 7, 8];
1135         stream.write_all(&data).unwrap();
1136         ctx.notify_muxer();
1137 
1138         assert!(ctx.muxer.has_pending_rx());
1139         ctx.recv();
1140         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RW);
1141         assert_eq!(ctx.pkt.src_port(), local_port);
1142         assert_eq!(ctx.pkt.dst_port(), peer_port);
1143         assert_eq!(ctx.pkt.buf().unwrap()[..data.len()], data);
1144     }
1145 
1146     #[test]
1147     fn test_local_close() {
1148         let peer_port = 1025;
1149         let mut ctx = MuxerTestContext::new("local_close");
1150         let local_port;
1151         {
1152             let (_stream, local_port_) = ctx.local_connect(peer_port);
1153             local_port = local_port_;
1154         }
1155         // Local var `_stream` was now dropped, thus closing the local stream. After the muxer gets
1156         // notified via EPOLLIN, it should attempt to gracefully shutdown the connection, issuing a
1157         // VSOCK_OP_SHUTDOWN with both no-more-send and no-more-recv indications set.
1158         ctx.notify_muxer();
1159         assert!(ctx.muxer.has_pending_rx());
1160         ctx.recv();
1161         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_SHUTDOWN);
1162         assert_ne!(ctx.pkt.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_SEND, 0);
1163         assert_ne!(ctx.pkt.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_RCV, 0);
1164         assert_eq!(ctx.pkt.src_port(), local_port);
1165         assert_eq!(ctx.pkt.dst_port(), peer_port);
1166 
1167         // The connection should get removed (and its local port freed), after the peer replies
1168         // with an RST.
1169         ctx.init_pkt(local_port, peer_port, uapi::VSOCK_OP_RST);
1170         ctx.send();
1171         let key = ConnMapKey {
1172             local_port,
1173             peer_port,
1174         };
1175         assert!(!ctx.muxer.conn_map.contains_key(&key));
1176         assert!(!ctx.muxer.local_port_set.contains(&local_port));
1177     }
1178 
1179     #[test]
1180     fn test_peer_close() {
1181         let peer_port = 1025;
1182         let local_port = 1026;
1183         let mut ctx = MuxerTestContext::new("peer_close");
1184 
1185         let mut sock = ctx.create_local_listener(local_port);
1186         ctx.init_pkt(local_port, peer_port, uapi::VSOCK_OP_REQUEST);
1187         ctx.send();
1188         let mut stream = sock.accept();
1189 
1190         assert!(ctx.muxer.has_pending_rx());
1191         ctx.recv();
1192         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RESPONSE);
1193         assert_eq!(ctx.pkt.src_port(), local_port);
1194         assert_eq!(ctx.pkt.dst_port(), peer_port);
1195         let key = ConnMapKey {
1196             local_port,
1197             peer_port,
1198         };
1199         assert!(ctx.muxer.conn_map.contains_key(&key));
1200 
1201         // Emulate a full shutdown from the peer (no-more-send + no-more-recv).
1202         ctx.init_pkt(local_port, peer_port, uapi::VSOCK_OP_SHUTDOWN)
1203             .set_flag(uapi::VSOCK_FLAGS_SHUTDOWN_SEND)
1204             .set_flag(uapi::VSOCK_FLAGS_SHUTDOWN_RCV);
1205         ctx.send();
1206 
1207         // Now, the muxer should remove the connection from its map, and reply with an RST.
1208         assert!(ctx.muxer.has_pending_rx());
1209         ctx.recv();
1210         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
1211         assert_eq!(ctx.pkt.src_port(), local_port);
1212         assert_eq!(ctx.pkt.dst_port(), peer_port);
1213         let key = ConnMapKey {
1214             local_port,
1215             peer_port,
1216         };
1217         assert!(!ctx.muxer.conn_map.contains_key(&key));
1218 
1219         // The muxer should also drop / close the local Unix socket for this connection.
1220         let mut buf = vec![0u8; 16];
1221         assert_eq!(stream.read(buf.as_mut_slice()).unwrap(), 0);
1222     }
1223 
1224     #[test]
1225     fn test_muxer_rxq() {
1226         let mut ctx = MuxerTestContext::new("muxer_rxq");
1227         let local_port = 1026;
1228         let peer_port_first = 1025;
1229         let mut listener = ctx.create_local_listener(local_port);
1230         let mut streams: Vec<UnixStream> = Vec::new();
1231 
1232         for peer_port in peer_port_first..peer_port_first + defs::MUXER_RXQ_SIZE {
1233             ctx.init_pkt(local_port, peer_port as u32, uapi::VSOCK_OP_REQUEST);
1234             ctx.send();
1235             streams.push(listener.accept());
1236         }
1237 
1238         // The muxer RX queue should now be full (with connection responses), but still
1239         // synchronized.
1240         assert!(ctx.muxer.rxq.is_synced());
1241 
1242         // One more queued reply should desync the RX queue.
1243         ctx.init_pkt(
1244             local_port,
1245             (peer_port_first + defs::MUXER_RXQ_SIZE) as u32,
1246             uapi::VSOCK_OP_REQUEST,
1247         );
1248         ctx.send();
1249         assert!(!ctx.muxer.rxq.is_synced());
1250 
1251         // With an out-of-sync queue, an RST should evict any non-RST packet from the queue, and
1252         // take its place. We'll check that by making sure that the last packet popped from the
1253         // queue is an RST.
1254         ctx.init_pkt(
1255             local_port + 1,
1256             peer_port_first as u32,
1257             uapi::VSOCK_OP_REQUEST,
1258         );
1259         ctx.send();
1260 
1261         for peer_port in peer_port_first..peer_port_first + defs::MUXER_RXQ_SIZE - 1 {
1262             ctx.recv();
1263             assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RESPONSE);
1264             // The response order should hold. The evicted response should have been the last
1265             // enqueued.
1266             assert_eq!(ctx.pkt.dst_port(), peer_port as u32);
1267         }
1268         // There should be one more packet in the queue: the RST.
1269         assert_eq!(ctx.muxer.rxq.len(), 1);
1270         ctx.recv();
1271         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
1272 
1273         // The queue should now be empty, but out-of-sync, so the muxer should report it has some
1274         // pending RX.
1275         assert!(ctx.muxer.rxq.is_empty());
1276         assert!(!ctx.muxer.rxq.is_synced());
1277         assert!(ctx.muxer.has_pending_rx());
1278 
1279         // The next recv should sync the queue back up. It should also yield one of the two
1280         // responses that are still left:
1281         // - the one that desynchronized the queue; and
1282         // - the one that got evicted by the RST.
1283         ctx.recv();
1284         assert!(ctx.muxer.rxq.is_synced());
1285         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RESPONSE);
1286 
1287         assert!(ctx.muxer.has_pending_rx());
1288         ctx.recv();
1289         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RESPONSE);
1290     }
1291 
1292     #[test]
1293     fn test_muxer_killq() {
1294         let mut ctx = MuxerTestContext::new("muxer_killq");
1295         let local_port = 1026;
1296         let peer_port_first = 1025;
1297         let peer_port_last = peer_port_first + defs::MUXER_KILLQ_SIZE;
1298         let mut listener = ctx.create_local_listener(local_port);
1299 
1300         for peer_port in peer_port_first..=peer_port_last {
1301             ctx.init_pkt(local_port, peer_port as u32, uapi::VSOCK_OP_REQUEST);
1302             ctx.send();
1303             ctx.notify_muxer();
1304             ctx.recv();
1305             assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RESPONSE);
1306             assert_eq!(ctx.pkt.src_port(), local_port);
1307             assert_eq!(ctx.pkt.dst_port(), peer_port as u32);
1308             {
1309                 let _stream = listener.accept();
1310             }
1311             ctx.notify_muxer();
1312             ctx.recv();
1313             assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_SHUTDOWN);
1314             assert_eq!(ctx.pkt.src_port(), local_port);
1315             assert_eq!(ctx.pkt.dst_port(), peer_port as u32);
1316             // The kill queue should be synchronized, up until the `defs::MUXER_KILLQ_SIZE`th
1317             // connection we schedule for termination.
1318             assert_eq!(
1319                 ctx.muxer.killq.is_synced(),
1320                 peer_port < peer_port_first + defs::MUXER_KILLQ_SIZE
1321             );
1322         }
1323 
1324         assert!(!ctx.muxer.killq.is_synced());
1325         assert!(!ctx.muxer.has_pending_rx());
1326 
1327         // Wait for the kill timers to expire.
1328         std::thread::sleep(std::time::Duration::from_millis(
1329             csm_defs::CONN_SHUTDOWN_TIMEOUT_MS,
1330         ));
1331 
1332         // Trigger a kill queue sweep, by requesting a new connection.
1333         ctx.init_pkt(
1334             local_port,
1335             peer_port_last as u32 + 1,
1336             uapi::VSOCK_OP_REQUEST,
1337         );
1338         ctx.send();
1339 
1340         // After sweeping the kill queue, it should now be synced (assuming the RX queue is larger
1341         // than the kill queue, since an RST packet will be queued for each killed connection).
1342         assert!(ctx.muxer.killq.is_synced());
1343         assert!(ctx.muxer.has_pending_rx());
1344         // There should be `defs::MUXER_KILLQ_SIZE` RSTs in the RX queue, from terminating the
1345         // dying connections in the recent killq sweep.
1346         for _p in peer_port_first..peer_port_last {
1347             ctx.recv();
1348             assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
1349             assert_eq!(ctx.pkt.src_port(), local_port);
1350         }
1351 
1352         // There should be one more packet in the RX queue: the connection response our request
1353         // that triggered the kill queue sweep.
1354         ctx.recv();
1355         assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RESPONSE);
1356         assert_eq!(ctx.pkt.dst_port(), peer_port_last as u32 + 1);
1357 
1358         assert!(!ctx.muxer.has_pending_rx());
1359     }
1360 
1361     #[test]
1362     fn test_regression_handshake() {
1363         // Address one of the issues found while fixing the following issue:
1364         // https://github.com/firecracker-microvm/firecracker/issues/1751
1365         // This test checks that the handshake message is not accounted for
1366         let mut ctx = MuxerTestContext::new("regression_handshake");
1367         let peer_port = 1025;
1368 
1369         // Create a local connection.
1370         let (_, local_port) = ctx.local_connect(peer_port);
1371 
1372         // Get the connection from the connection map.
1373         let key = ConnMapKey {
1374             local_port,
1375             peer_port,
1376         };
1377         let conn = ctx.muxer.conn_map.get_mut(&key).unwrap();
1378 
1379         // Check that fwd_cnt is 0 - "OK ..." was not accounted for.
1380         assert_eq!(conn.fwd_cnt().0, 0);
1381     }
1382 
1383     #[test]
1384     fn test_regression_rxq_pop() {
1385         // Address one of the issues found while fixing the following issue:
1386         // https://github.com/firecracker-microvm/firecracker/issues/1751
1387         // This test checks that a connection is not popped out of the muxer
1388         // rxq when multiple flags are set
1389         let mut ctx = MuxerTestContext::new("regression_rxq_pop");
1390         let peer_port = 1025;
1391         let (mut stream, local_port) = ctx.local_connect(peer_port);
1392 
1393         // Send some data.
1394         let data = [5u8, 6, 7, 8];
1395         stream.write_all(&data).unwrap();
1396         ctx.notify_muxer();
1397 
1398         // Get the connection from the connection map.
1399         let key = ConnMapKey {
1400             local_port,
1401             peer_port,
1402         };
1403         let conn = ctx.muxer.conn_map.get_mut(&key).unwrap();
1404 
1405         // Forcefully insert another flag.
1406         conn.insert_credit_update();
1407 
1408         // Call recv twice in order to check that the connection is still
1409         // in the rxq.
1410         assert!(ctx.muxer.has_pending_rx());
1411         ctx.recv();
1412         assert!(ctx.muxer.has_pending_rx());
1413         ctx.recv();
1414 
1415         // Since initially the connection had two flags set, now there should
1416         // not be any pending RX in the muxer.
1417         assert!(!ctx.muxer.has_pending_rx());
1418     }
1419 }
1420