xref: /cloud-hypervisor/virtio-devices/src/vsock/packet.rs (revision 7d7bfb2034001d4cb15df2ddc56d2d350c8da30f)
1 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /// `VsockPacket` provides a thin wrapper over the buffers exchanged via virtio queues.
6 /// There are two components to a vsock packet, each using its own descriptor in a
7 /// virtio queue:
8 /// - the packet header; and
9 /// - the packet data/buffer.
10 /// There is a 1:1 relation between descriptor chains and packets: the first (chain head) holds
11 /// the header, and an optional second descriptor holds the data. The second descriptor is only
12 /// present for data packets (VSOCK_OP_RW).
13 ///
14 /// `VsockPacket` wraps these two buffers and provides direct access to the data stored
15 /// in guest memory. This is done to avoid unnecessarily copying data from guest memory
16 /// to temporary buffers, before passing it on to the vsock backend.
17 ///
18 use byteorder::{ByteOrder, LittleEndian};
19 use std::sync::Arc;
20 
21 use super::defs;
22 use super::{Result, VsockError};
23 use crate::{get_host_address_range, GuestMemoryMmap};
24 use virtio_queue::DescriptorChain;
25 use vm_memory::GuestMemoryLoadGuard;
26 use vm_virtio::{AccessPlatform, Translatable};
27 
28 // The vsock packet header is defined by the C struct:
29 //
30 // ```C
31 // struct virtio_vsock_hdr {
32 //     le64 src_cid;
33 //     le64 dst_cid;
34 //     le32 src_port;
35 //     le32 dst_port;
36 //     le32 len;
37 //     le16 type;
38 //     le16 op;
39 //     le32 flags;
40 //     le32 buf_alloc;
41 //     le32 fwd_cnt;
42 // };
43 // ```
44 //
45 // This structed will occupy the buffer pointed to by the head descriptor. We'll be accessing it
46 // as a byte slice. To that end, we define below the offsets for each field struct, as well as the
47 // packed struct size, as a bunch of `usize` consts.
48 // Note that these offsets are only used privately by the `VsockPacket` struct, the public interface
49 // consisting of getter and setter methods, for each struct field, that will also handle the correct
50 // endianess.
51 
52 /// The vsock packet header struct size (when packed).
53 pub const VSOCK_PKT_HDR_SIZE: usize = 44;
54 
55 // Source CID.
56 const HDROFF_SRC_CID: usize = 0;
57 
58 // Destination CID.
59 const HDROFF_DST_CID: usize = 8;
60 
61 // Source port.
62 const HDROFF_SRC_PORT: usize = 16;
63 
64 // Destination port.
65 const HDROFF_DST_PORT: usize = 20;
66 
67 // Data length (in bytes) - may be 0, if there is no data buffer.
68 const HDROFF_LEN: usize = 24;
69 
70 // Socket type. Currently, only connection-oriented streams are defined by the vsock protocol.
71 const HDROFF_TYPE: usize = 28;
72 
73 // Operation ID - one of the VSOCK_OP_* values; e.g.
74 // - VSOCK_OP_RW: a data packet;
75 // - VSOCK_OP_REQUEST: connection request;
76 // - VSOCK_OP_RST: forceful connection termination;
77 // etc (see `super::defs::uapi` for the full list).
78 const HDROFF_OP: usize = 30;
79 
80 // Additional options (flags) associated with the current operation (`op`).
81 // Currently, only used with shutdown requests (VSOCK_OP_SHUTDOWN).
82 const HDROFF_FLAGS: usize = 32;
83 
84 // Size (in bytes) of the packet sender receive buffer (for the connection to which this packet
85 // belongs).
86 const HDROFF_BUF_ALLOC: usize = 36;
87 
88 // Number of bytes the sender has received and consumed (for the connection to which this packet
89 // belongs). For instance, for our Unix backend, this counter would be the total number of bytes
90 // we have successfully written to a backing Unix socket.
91 const HDROFF_FWD_CNT: usize = 40;
92 
93 /// The vsock packet, implemented as a wrapper over a virtq descriptor chain:
94 /// - the chain head, holding the packet header; and
95 /// - (an optional) data/buffer descriptor, only present for data packets (VSOCK_OP_RW).
96 ///
97 pub struct VsockPacket {
98     hdr: *mut u8,
99     buf: Option<*mut u8>,
100     buf_size: usize,
101 }
102 
103 impl VsockPacket {
104     /// Create the packet wrapper from a TX virtq chain head.
105     ///
106     /// The chain head is expected to hold valid packet header data. A following packet buffer
107     /// descriptor can optionally end the chain. Bounds and pointer checks are performed when
108     /// creating the wrapper.
109     ///
110     pub fn from_tx_virtq_head(
111         desc_chain: &mut DescriptorChain<GuestMemoryLoadGuard<GuestMemoryMmap>>,
112         access_platform: Option<&Arc<dyn AccessPlatform>>,
113     ) -> Result<Self> {
114         let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
115 
116         // All buffers in the TX queue must be readable.
117         //
118         if head.is_write_only() {
119             return Err(VsockError::UnreadableDescriptor);
120         }
121 
122         // The packet header should fit inside the head descriptor.
123         if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
124             return Err(VsockError::HdrDescTooSmall(head.len()));
125         }
126 
127         let mut pkt = Self {
128             hdr: get_host_address_range(
129                 desc_chain.memory(),
130                 head.addr()
131                     .translate_gva(access_platform, head.len() as usize),
132                 VSOCK_PKT_HDR_SIZE,
133             )
134             .ok_or(VsockError::GuestMemory)? as *mut u8,
135             buf: None,
136             buf_size: 0,
137         };
138 
139         // No point looking for a data/buffer descriptor, if the packet is zero-lengthed.
140         if pkt.is_empty() {
141             return Ok(pkt);
142         }
143 
144         // Reject weirdly-sized packets.
145         //
146         if pkt.len() > defs::MAX_PKT_BUF_SIZE as u32 {
147             return Err(VsockError::InvalidPktLen(pkt.len()));
148         }
149 
150         // If the packet header showed a non-zero length, there should be a data descriptor here.
151         let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
152 
153         // TX data should be read-only.
154         if buf_desc.is_write_only() {
155             return Err(VsockError::UnreadableDescriptor);
156         }
157 
158         // The data buffer should be large enough to fit the size of the data, as described by
159         // the header descriptor.
160         if buf_desc.len() < pkt.len() {
161             return Err(VsockError::BufDescTooSmall);
162         }
163 
164         pkt.buf_size = buf_desc.len() as usize;
165         pkt.buf = Some(
166             get_host_address_range(
167                 desc_chain.memory(),
168                 buf_desc
169                     .addr()
170                     .translate_gva(access_platform, buf_desc.len() as usize),
171                 pkt.buf_size,
172             )
173             .ok_or(VsockError::GuestMemory)? as *mut u8,
174         );
175 
176         Ok(pkt)
177     }
178 
179     /// Create the packet wrapper from an RX virtq chain head.
180     ///
181     /// There must be two descriptors in the chain, both writable: a header descriptor and a data
182     /// descriptor. Bounds and pointer checks are performed when creating the wrapper.
183     ///
184     pub fn from_rx_virtq_head(
185         desc_chain: &mut DescriptorChain<GuestMemoryLoadGuard<GuestMemoryMmap>>,
186         access_platform: Option<&Arc<dyn AccessPlatform>>,
187     ) -> Result<Self> {
188         let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
189 
190         // All RX buffers must be writable.
191         //
192         if !head.is_write_only() {
193             return Err(VsockError::UnwritableDescriptor);
194         }
195 
196         // The packet header should fit inside the head descriptor.
197         if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
198             return Err(VsockError::HdrDescTooSmall(head.len()));
199         }
200 
201         // All RX descriptor chains should have a header and a data descriptor.
202         if !head.has_next() {
203             return Err(VsockError::BufDescMissing);
204         }
205         let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
206         let buf_size = buf_desc.len() as usize;
207 
208         Ok(Self {
209             hdr: get_host_address_range(
210                 desc_chain.memory(),
211                 head.addr()
212                     .translate_gva(access_platform, head.len() as usize),
213                 VSOCK_PKT_HDR_SIZE,
214             )
215             .ok_or(VsockError::GuestMemory)? as *mut u8,
216             buf: Some(
217                 get_host_address_range(
218                     desc_chain.memory(),
219                     buf_desc
220                         .addr()
221                         .translate_gva(access_platform, buf_desc.len() as usize),
222                     buf_size,
223                 )
224                 .ok_or(VsockError::GuestMemory)? as *mut u8,
225             ),
226             buf_size,
227         })
228     }
229 
230     /// Provides in-place, byte-slice, access to the vsock packet header.
231     ///
232     pub fn hdr(&self) -> &[u8] {
233         // This is safe since bound checks have already been performed when creating the packet
234         // from the virtq descriptor.
235         unsafe { std::slice::from_raw_parts(self.hdr as *const u8, VSOCK_PKT_HDR_SIZE) }
236     }
237 
238     /// Provides in-place, byte-slice, mutable access to the vsock packet header.
239     ///
240     pub fn hdr_mut(&mut self) -> &mut [u8] {
241         // This is safe since bound checks have already been performed when creating the packet
242         // from the virtq descriptor.
243         unsafe { std::slice::from_raw_parts_mut(self.hdr, VSOCK_PKT_HDR_SIZE) }
244     }
245 
246     /// Provides in-place, byte-slice access to the vsock packet data buffer.
247     ///
248     /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
249     ///       For those packets, this method will return `None`.
250     /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
251     ///            (and often is) larger than the length of the packet data. The packet data length
252     ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
253     pub fn buf(&self) -> Option<&[u8]> {
254         self.buf.map(|ptr| {
255             // This is safe since bound checks have already been performed when creating the packet
256             // from the virtq descriptor.
257             unsafe { std::slice::from_raw_parts(ptr as *const u8, self.buf_size) }
258         })
259     }
260 
261     /// Provides in-place, byte-slice, mutable access to the vsock packet data buffer.
262     ///
263     /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
264     ///       For those packets, this method will return `None`.
265     /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
266     ///            (and often is) larger than the length of the packet data. The packet data length
267     ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
268     pub fn buf_mut(&mut self) -> Option<&mut [u8]> {
269         self.buf.map(|ptr| {
270             // This is safe since bound checks have already been performed when creating the packet
271             // from the virtq descriptor.
272             unsafe { std::slice::from_raw_parts_mut(ptr, self.buf_size) }
273         })
274     }
275 
276     pub fn src_cid(&self) -> u64 {
277         LittleEndian::read_u64(&self.hdr()[HDROFF_SRC_CID..])
278     }
279 
280     pub fn set_src_cid(&mut self, cid: u64) -> &mut Self {
281         LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_SRC_CID..], cid);
282         self
283     }
284 
285     pub fn dst_cid(&self) -> u64 {
286         LittleEndian::read_u64(&self.hdr()[HDROFF_DST_CID..])
287     }
288 
289     pub fn set_dst_cid(&mut self, cid: u64) -> &mut Self {
290         LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_DST_CID..], cid);
291         self
292     }
293 
294     pub fn src_port(&self) -> u32 {
295         LittleEndian::read_u32(&self.hdr()[HDROFF_SRC_PORT..])
296     }
297 
298     pub fn set_src_port(&mut self, port: u32) -> &mut Self {
299         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_SRC_PORT..], port);
300         self
301     }
302 
303     pub fn dst_port(&self) -> u32 {
304         LittleEndian::read_u32(&self.hdr()[HDROFF_DST_PORT..])
305     }
306 
307     pub fn set_dst_port(&mut self, port: u32) -> &mut Self {
308         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_DST_PORT..], port);
309         self
310     }
311 
312     pub fn len(&self) -> u32 {
313         LittleEndian::read_u32(&self.hdr()[HDROFF_LEN..])
314     }
315 
316     pub fn is_empty(&self) -> bool {
317         self.len() == 0
318     }
319 
320     pub fn set_len(&mut self, len: u32) -> &mut Self {
321         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_LEN..], len);
322         self
323     }
324 
325     pub fn type_(&self) -> u16 {
326         LittleEndian::read_u16(&self.hdr()[HDROFF_TYPE..])
327     }
328 
329     pub fn set_type(&mut self, type_: u16) -> &mut Self {
330         LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_TYPE..], type_);
331         self
332     }
333 
334     pub fn op(&self) -> u16 {
335         LittleEndian::read_u16(&self.hdr()[HDROFF_OP..])
336     }
337 
338     pub fn set_op(&mut self, op: u16) -> &mut Self {
339         LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_OP..], op);
340         self
341     }
342 
343     pub fn flags(&self) -> u32 {
344         LittleEndian::read_u32(&self.hdr()[HDROFF_FLAGS..])
345     }
346 
347     pub fn set_flags(&mut self, flags: u32) -> &mut Self {
348         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FLAGS..], flags);
349         self
350     }
351 
352     pub fn set_flag(&mut self, flag: u32) -> &mut Self {
353         self.set_flags(self.flags() | flag);
354         self
355     }
356 
357     pub fn buf_alloc(&self) -> u32 {
358         LittleEndian::read_u32(&self.hdr()[HDROFF_BUF_ALLOC..])
359     }
360 
361     pub fn set_buf_alloc(&mut self, buf_alloc: u32) -> &mut Self {
362         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_BUF_ALLOC..], buf_alloc);
363         self
364     }
365 
366     pub fn fwd_cnt(&self) -> u32 {
367         LittleEndian::read_u32(&self.hdr()[HDROFF_FWD_CNT..])
368     }
369 
370     pub fn set_fwd_cnt(&mut self, fwd_cnt: u32) -> &mut Self {
371         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FWD_CNT..], fwd_cnt);
372         self
373     }
374 }
375 
376 #[cfg(test)]
377 mod tests {
378     use super::super::tests::TestContext;
379     use super::*;
380     use crate::vsock::defs::MAX_PKT_BUF_SIZE;
381     use crate::GuestMemoryMmap;
382     use virtio_queue::defs::VIRTQ_DESC_F_WRITE;
383     use vm_memory::GuestAddress;
384     use vm_virtio::queue::testing::VirtqDesc as GuestQDesc;
385 
386     macro_rules! create_context {
387         ($test_ctx:ident, $handler_ctx:ident) => {
388             let $test_ctx = TestContext::new();
389             let mut $handler_ctx = $test_ctx.create_epoll_handler_context();
390             // For TX packets, hdr.len should be set to a valid value.
391             set_pkt_len(1024, &$handler_ctx.guest_txvq.dtable[0], &$test_ctx.mem);
392         };
393     }
394 
395     macro_rules! expect_asm_error {
396         (tx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
397             expect_asm_error!($test_ctx, $handler_ctx, $err, from_tx_virtq_head, 1);
398         };
399         (rx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
400             expect_asm_error!($test_ctx, $handler_ctx, $err, from_rx_virtq_head, 0);
401         };
402         ($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => {
403             match VsockPacket::$ctor(
404                 &mut $handler_ctx.handler.queues[$vq]
405                     .iter()
406                     .unwrap()
407                     .next()
408                     .unwrap(),
409                 None,
410             ) {
411                 Err($err) => (),
412                 Ok(_) => panic!("Packet assembly should've failed!"),
413                 Err(other) => panic!("Packet assembly failed with: {:?}", other),
414             }
415         };
416     }
417 
418     fn set_pkt_len(len: u32, guest_desc: &GuestQDesc, mem: &GuestMemoryMmap) {
419         let hdr_gpa = guest_desc.addr.get();
420         let hdr_ptr = get_host_address_range(mem, GuestAddress(hdr_gpa), VSOCK_PKT_HDR_SIZE)
421             .unwrap() as *mut u8;
422         let len_ptr = unsafe { hdr_ptr.add(HDROFF_LEN) };
423 
424         LittleEndian::write_u32(unsafe { std::slice::from_raw_parts_mut(len_ptr, 4) }, len);
425     }
426 
427     #[test]
428     #[allow(clippy::cognitive_complexity)]
429     fn test_tx_packet_assembly() {
430         // Test case: successful TX packet assembly.
431         {
432             create_context!(test_ctx, handler_ctx);
433 
434             let pkt = VsockPacket::from_tx_virtq_head(
435                 &mut handler_ctx.handler.queues[1]
436                     .iter()
437                     .unwrap()
438                     .next()
439                     .unwrap(),
440                 None,
441             )
442             .unwrap();
443             assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
444             assert_eq!(
445                 pkt.buf().unwrap().len(),
446                 handler_ctx.guest_txvq.dtable[1].len.get() as usize
447             );
448         }
449 
450         // Test case: error on write-only hdr descriptor.
451         {
452             create_context!(test_ctx, handler_ctx);
453             handler_ctx.guest_txvq.dtable[0]
454                 .flags
455                 .set(VIRTQ_DESC_F_WRITE);
456             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
457         }
458 
459         // Test case: header descriptor has insufficient space to hold the packet header.
460         {
461             create_context!(test_ctx, handler_ctx);
462             handler_ctx.guest_txvq.dtable[0]
463                 .len
464                 .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
465             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
466         }
467 
468         // Test case: zero-length TX packet.
469         {
470             create_context!(test_ctx, handler_ctx);
471             set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
472             let mut pkt = VsockPacket::from_tx_virtq_head(
473                 &mut handler_ctx.handler.queues[1]
474                     .iter()
475                     .unwrap()
476                     .next()
477                     .unwrap(),
478                 None,
479             )
480             .unwrap();
481             assert!(pkt.buf().is_none());
482             assert!(pkt.buf_mut().is_none());
483         }
484 
485         // Test case: TX packet has more data than we can handle.
486         {
487             create_context!(test_ctx, handler_ctx);
488             set_pkt_len(
489                 MAX_PKT_BUF_SIZE as u32 + 1,
490                 &handler_ctx.guest_txvq.dtable[0],
491                 &test_ctx.mem,
492             );
493             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::InvalidPktLen(_));
494         }
495 
496         // Test case:
497         // - packet header advertises some data length; and
498         // - the data descriptor is missing.
499         {
500             create_context!(test_ctx, handler_ctx);
501             set_pkt_len(1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
502             handler_ctx.guest_txvq.dtable[0].flags.set(0);
503             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::BufDescMissing);
504         }
505 
506         // Test case: error on write-only buf descriptor.
507         {
508             create_context!(test_ctx, handler_ctx);
509             handler_ctx.guest_txvq.dtable[1]
510                 .flags
511                 .set(VIRTQ_DESC_F_WRITE);
512             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
513         }
514 
515         // Test case: the buffer descriptor cannot fit all the data advertised by the
516         // packet header `len` field.
517         {
518             create_context!(test_ctx, handler_ctx);
519             set_pkt_len(8 * 1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
520             handler_ctx.guest_txvq.dtable[1].len.set(4 * 1024);
521             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::BufDescTooSmall);
522         }
523     }
524 
525     #[test]
526     fn test_rx_packet_assembly() {
527         // Test case: successful RX packet assembly.
528         {
529             create_context!(test_ctx, handler_ctx);
530             let pkt = VsockPacket::from_rx_virtq_head(
531                 &mut handler_ctx.handler.queues[0]
532                     .iter()
533                     .unwrap()
534                     .next()
535                     .unwrap(),
536                 None,
537             )
538             .unwrap();
539             assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
540             assert_eq!(
541                 pkt.buf().unwrap().len(),
542                 handler_ctx.guest_rxvq.dtable[1].len.get() as usize
543             );
544         }
545 
546         // Test case: read-only RX packet header.
547         {
548             create_context!(test_ctx, handler_ctx);
549             handler_ctx.guest_rxvq.dtable[0].flags.set(0);
550             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::UnwritableDescriptor);
551         }
552 
553         // Test case: RX descriptor head cannot fit the entire packet header.
554         {
555             create_context!(test_ctx, handler_ctx);
556             handler_ctx.guest_rxvq.dtable[0]
557                 .len
558                 .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
559             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
560         }
561 
562         // Test case: RX descriptor chain is missing the packet buffer descriptor.
563         {
564             create_context!(test_ctx, handler_ctx);
565             handler_ctx.guest_rxvq.dtable[0]
566                 .flags
567                 .set(VIRTQ_DESC_F_WRITE);
568             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::BufDescMissing);
569         }
570     }
571 
572     #[test]
573     #[allow(clippy::cognitive_complexity)]
574     fn test_packet_hdr_accessors() {
575         const SRC_CID: u64 = 1;
576         const DST_CID: u64 = 2;
577         const SRC_PORT: u32 = 3;
578         const DST_PORT: u32 = 4;
579         const LEN: u32 = 5;
580         const TYPE: u16 = 6;
581         const OP: u16 = 7;
582         const FLAGS: u32 = 8;
583         const BUF_ALLOC: u32 = 9;
584         const FWD_CNT: u32 = 10;
585 
586         create_context!(test_ctx, handler_ctx);
587         let mut pkt = VsockPacket::from_rx_virtq_head(
588             &mut handler_ctx.handler.queues[0]
589                 .iter()
590                 .unwrap()
591                 .next()
592                 .unwrap(),
593             None,
594         )
595         .unwrap();
596 
597         // Test field accessors.
598         pkt.set_src_cid(SRC_CID)
599             .set_dst_cid(DST_CID)
600             .set_src_port(SRC_PORT)
601             .set_dst_port(DST_PORT)
602             .set_len(LEN)
603             .set_type(TYPE)
604             .set_op(OP)
605             .set_flags(FLAGS)
606             .set_buf_alloc(BUF_ALLOC)
607             .set_fwd_cnt(FWD_CNT);
608 
609         assert_eq!(pkt.src_cid(), SRC_CID);
610         assert_eq!(pkt.dst_cid(), DST_CID);
611         assert_eq!(pkt.src_port(), SRC_PORT);
612         assert_eq!(pkt.dst_port(), DST_PORT);
613         assert_eq!(pkt.len(), LEN);
614         assert_eq!(pkt.type_(), TYPE);
615         assert_eq!(pkt.op(), OP);
616         assert_eq!(pkt.flags(), FLAGS);
617         assert_eq!(pkt.buf_alloc(), BUF_ALLOC);
618         assert_eq!(pkt.fwd_cnt(), FWD_CNT);
619 
620         // Test individual flag setting.
621         let flags = pkt.flags() | 0b1000;
622         pkt.set_flag(0b1000);
623         assert_eq!(pkt.flags(), flags);
624 
625         // Test packet header as-slice access.
626         //
627 
628         assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
629 
630         assert_eq!(
631             SRC_CID,
632             LittleEndian::read_u64(&pkt.hdr()[HDROFF_SRC_CID..])
633         );
634         assert_eq!(
635             DST_CID,
636             LittleEndian::read_u64(&pkt.hdr()[HDROFF_DST_CID..])
637         );
638         assert_eq!(
639             SRC_PORT,
640             LittleEndian::read_u32(&pkt.hdr()[HDROFF_SRC_PORT..])
641         );
642         assert_eq!(
643             DST_PORT,
644             LittleEndian::read_u32(&pkt.hdr()[HDROFF_DST_PORT..])
645         );
646         assert_eq!(LEN, LittleEndian::read_u32(&pkt.hdr()[HDROFF_LEN..]));
647         assert_eq!(TYPE, LittleEndian::read_u16(&pkt.hdr()[HDROFF_TYPE..]));
648         assert_eq!(OP, LittleEndian::read_u16(&pkt.hdr()[HDROFF_OP..]));
649         assert_eq!(FLAGS, LittleEndian::read_u32(&pkt.hdr()[HDROFF_FLAGS..]));
650         assert_eq!(
651             BUF_ALLOC,
652             LittleEndian::read_u32(&pkt.hdr()[HDROFF_BUF_ALLOC..])
653         );
654         assert_eq!(
655             FWD_CNT,
656             LittleEndian::read_u32(&pkt.hdr()[HDROFF_FWD_CNT..])
657         );
658 
659         assert_eq!(pkt.hdr_mut().len(), VSOCK_PKT_HDR_SIZE);
660         for b in pkt.hdr_mut() {
661             *b = 0;
662         }
663         assert_eq!(pkt.src_cid(), 0);
664         assert_eq!(pkt.dst_cid(), 0);
665         assert_eq!(pkt.src_port(), 0);
666         assert_eq!(pkt.dst_port(), 0);
667         assert_eq!(pkt.len(), 0);
668         assert_eq!(pkt.type_(), 0);
669         assert_eq!(pkt.op(), 0);
670         assert_eq!(pkt.flags(), 0);
671         assert_eq!(pkt.buf_alloc(), 0);
672         assert_eq!(pkt.fwd_cnt(), 0);
673     }
674 
675     #[test]
676     fn test_packet_buf() {
677         create_context!(test_ctx, handler_ctx);
678         let mut pkt = VsockPacket::from_rx_virtq_head(
679             &mut handler_ctx.handler.queues[0]
680                 .iter()
681                 .unwrap()
682                 .next()
683                 .unwrap(),
684             None,
685         )
686         .unwrap();
687 
688         assert_eq!(
689             pkt.buf().unwrap().len(),
690             handler_ctx.guest_rxvq.dtable[1].len.get() as usize
691         );
692         assert_eq!(
693             pkt.buf_mut().unwrap().len(),
694             handler_ctx.guest_rxvq.dtable[1].len.get() as usize
695         );
696 
697         for i in 0..pkt.buf().unwrap().len() {
698             pkt.buf_mut().unwrap()[i] = (i % 0x100) as u8;
699             assert_eq!(pkt.buf().unwrap()[i], (i % 0x100) as u8);
700         }
701     }
702 }
703