xref: /cloud-hypervisor/virtio-devices/src/vsock/packet.rs (revision 6f8bd27cf7629733582d930519e98d19e90afb16)
1 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /// `VsockPacket` provides a thin wrapper over the buffers exchanged via virtio queues.
6 /// There are two components to a vsock packet, each using its own descriptor in a
7 /// virtio queue:
8 /// - the packet header; and
9 /// - the packet data/buffer.
10 /// There is a 1:1 relation between descriptor chains and packets: the first (chain head) holds
11 /// the header, and an optional second descriptor holds the data. The second descriptor is only
12 /// present for data packets (VSOCK_OP_RW).
13 ///
14 /// `VsockPacket` wraps these two buffers and provides direct access to the data stored
15 /// in guest memory. This is done to avoid unnecessarily copying data from guest memory
16 /// to temporary buffers, before passing it on to the vsock backend.
17 ///
18 use byteorder::{ByteOrder, LittleEndian};
19 use std::ops::Deref;
20 use std::sync::Arc;
21 
22 use super::defs;
23 use super::{Result, VsockError};
24 use crate::get_host_address_range;
25 use virtio_queue::DescriptorChain;
26 use vm_memory::GuestMemory;
27 use vm_virtio::{AccessPlatform, Translatable};
28 
29 // The vsock packet header is defined by the C struct:
30 //
31 // ```C
32 // struct virtio_vsock_hdr {
33 //     le64 src_cid;
34 //     le64 dst_cid;
35 //     le32 src_port;
36 //     le32 dst_port;
37 //     le32 len;
38 //     le16 type;
39 //     le16 op;
40 //     le32 flags;
41 //     le32 buf_alloc;
42 //     le32 fwd_cnt;
43 // };
44 // ```
45 //
46 // This structed will occupy the buffer pointed to by the head descriptor. We'll be accessing it
47 // as a byte slice. To that end, we define below the offsets for each field struct, as well as the
48 // packed struct size, as a bunch of `usize` consts.
49 // Note that these offsets are only used privately by the `VsockPacket` struct, the public interface
50 // consisting of getter and setter methods, for each struct field, that will also handle the correct
51 // endianess.
52 
53 /// The vsock packet header struct size (when packed).
54 pub const VSOCK_PKT_HDR_SIZE: usize = 44;
55 
56 // Source CID.
57 const HDROFF_SRC_CID: usize = 0;
58 
59 // Destination CID.
60 const HDROFF_DST_CID: usize = 8;
61 
62 // Source port.
63 const HDROFF_SRC_PORT: usize = 16;
64 
65 // Destination port.
66 const HDROFF_DST_PORT: usize = 20;
67 
68 // Data length (in bytes) - may be 0, if there is no data buffer.
69 const HDROFF_LEN: usize = 24;
70 
71 // Socket type. Currently, only connection-oriented streams are defined by the vsock protocol.
72 const HDROFF_TYPE: usize = 28;
73 
74 // Operation ID - one of the VSOCK_OP_* values; e.g.
75 // - VSOCK_OP_RW: a data packet;
76 // - VSOCK_OP_REQUEST: connection request;
77 // - VSOCK_OP_RST: forceful connection termination;
78 // etc (see `super::defs::uapi` for the full list).
79 const HDROFF_OP: usize = 30;
80 
81 // Additional options (flags) associated with the current operation (`op`).
82 // Currently, only used with shutdown requests (VSOCK_OP_SHUTDOWN).
83 const HDROFF_FLAGS: usize = 32;
84 
85 // Size (in bytes) of the packet sender receive buffer (for the connection to which this packet
86 // belongs).
87 const HDROFF_BUF_ALLOC: usize = 36;
88 
89 // Number of bytes the sender has received and consumed (for the connection to which this packet
90 // belongs). For instance, for our Unix backend, this counter would be the total number of bytes
91 // we have successfully written to a backing Unix socket.
92 const HDROFF_FWD_CNT: usize = 40;
93 
94 /// The vsock packet, implemented as a wrapper over a virtq descriptor chain:
95 /// - the chain head, holding the packet header; and
96 /// - (an optional) data/buffer descriptor, only present for data packets (VSOCK_OP_RW).
97 ///
98 pub struct VsockPacket {
99     hdr: *mut u8,
100     buf: Option<*mut u8>,
101     buf_size: usize,
102 }
103 
104 impl VsockPacket {
105     /// Create the packet wrapper from a TX virtq chain head.
106     ///
107     /// The chain head is expected to hold valid packet header data. A following packet buffer
108     /// descriptor can optionally end the chain. Bounds and pointer checks are performed when
109     /// creating the wrapper.
110     ///
111     pub fn from_tx_virtq_head<M>(
112         desc_chain: &mut DescriptorChain<M>,
113         access_platform: Option<&Arc<dyn AccessPlatform>>,
114     ) -> Result<Self>
115     where
116         M: Clone + Deref,
117         M::Target: GuestMemory,
118     {
119         let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
120 
121         // All buffers in the TX queue must be readable.
122         //
123         if head.is_write_only() {
124             return Err(VsockError::UnreadableDescriptor);
125         }
126 
127         // The packet header should fit inside the head descriptor.
128         if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
129             return Err(VsockError::HdrDescTooSmall(head.len()));
130         }
131 
132         let mut pkt = Self {
133             hdr: get_host_address_range(
134                 desc_chain.memory(),
135                 head.addr()
136                     .translate_gva(access_platform, head.len() as usize),
137                 VSOCK_PKT_HDR_SIZE,
138             )
139             .ok_or(VsockError::GuestMemory)? as *mut u8,
140             buf: None,
141             buf_size: 0,
142         };
143 
144         // No point looking for a data/buffer descriptor, if the packet is zero-lengthed.
145         if pkt.is_empty() {
146             return Ok(pkt);
147         }
148 
149         // Reject weirdly-sized packets.
150         //
151         if pkt.len() > defs::MAX_PKT_BUF_SIZE as u32 {
152             return Err(VsockError::InvalidPktLen(pkt.len()));
153         }
154 
155         // If the packet header showed a non-zero length, there should be a data descriptor here.
156         let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
157 
158         // TX data should be read-only.
159         if buf_desc.is_write_only() {
160             return Err(VsockError::UnreadableDescriptor);
161         }
162 
163         // The data buffer should be large enough to fit the size of the data, as described by
164         // the header descriptor.
165         if buf_desc.len() < pkt.len() {
166             return Err(VsockError::BufDescTooSmall);
167         }
168 
169         pkt.buf_size = buf_desc.len() as usize;
170         pkt.buf = Some(
171             get_host_address_range(
172                 desc_chain.memory(),
173                 buf_desc
174                     .addr()
175                     .translate_gva(access_platform, buf_desc.len() as usize),
176                 pkt.buf_size,
177             )
178             .ok_or(VsockError::GuestMemory)? as *mut u8,
179         );
180 
181         Ok(pkt)
182     }
183 
184     /// Create the packet wrapper from an RX virtq chain head.
185     ///
186     /// There must be two descriptors in the chain, both writable: a header descriptor and a data
187     /// descriptor. Bounds and pointer checks are performed when creating the wrapper.
188     ///
189     pub fn from_rx_virtq_head<M>(
190         desc_chain: &mut DescriptorChain<M>,
191         access_platform: Option<&Arc<dyn AccessPlatform>>,
192     ) -> Result<Self>
193     where
194         M: Clone + Deref,
195         M::Target: GuestMemory,
196     {
197         let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
198 
199         // All RX buffers must be writable.
200         //
201         if !head.is_write_only() {
202             return Err(VsockError::UnwritableDescriptor);
203         }
204 
205         // The packet header should fit inside the head descriptor.
206         if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
207             return Err(VsockError::HdrDescTooSmall(head.len()));
208         }
209 
210         // All RX descriptor chains should have a header and a data descriptor.
211         if !head.has_next() {
212             return Err(VsockError::BufDescMissing);
213         }
214         let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
215         let buf_size = buf_desc.len() as usize;
216 
217         Ok(Self {
218             hdr: get_host_address_range(
219                 desc_chain.memory(),
220                 head.addr()
221                     .translate_gva(access_platform, head.len() as usize),
222                 VSOCK_PKT_HDR_SIZE,
223             )
224             .ok_or(VsockError::GuestMemory)? as *mut u8,
225             buf: Some(
226                 get_host_address_range(
227                     desc_chain.memory(),
228                     buf_desc
229                         .addr()
230                         .translate_gva(access_platform, buf_desc.len() as usize),
231                     buf_size,
232                 )
233                 .ok_or(VsockError::GuestMemory)? as *mut u8,
234             ),
235             buf_size,
236         })
237     }
238 
239     /// Provides in-place, byte-slice, access to the vsock packet header.
240     ///
241     pub fn hdr(&self) -> &[u8] {
242         // SAFETY: bound checks have already been performed when creating the packet
243         // from the virtq descriptor.
244         unsafe { std::slice::from_raw_parts(self.hdr as *const u8, VSOCK_PKT_HDR_SIZE) }
245     }
246 
247     /// Provides in-place, byte-slice, mutable access to the vsock packet header.
248     ///
249     pub fn hdr_mut(&mut self) -> &mut [u8] {
250         // SAFETY: bound checks have already been performed when creating the packet
251         // from the virtq descriptor.
252         unsafe { std::slice::from_raw_parts_mut(self.hdr, VSOCK_PKT_HDR_SIZE) }
253     }
254 
255     /// Provides in-place, byte-slice access to the vsock packet data buffer.
256     ///
257     /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
258     ///       For those packets, this method will return `None`.
259     /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
260     ///            (and often is) larger than the length of the packet data. The packet data length
261     ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
262     pub fn buf(&self) -> Option<&[u8]> {
263         self.buf.map(|ptr| {
264             // SAFETY: bound checks have already been performed when creating the packet
265             // from the virtq descriptor.
266             unsafe { std::slice::from_raw_parts(ptr as *const u8, self.buf_size) }
267         })
268     }
269 
270     /// Provides in-place, byte-slice, mutable access to the vsock packet data buffer.
271     ///
272     /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
273     ///       For those packets, this method will return `None`.
274     /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
275     ///            (and often is) larger than the length of the packet data. The packet data length
276     ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
277     pub fn buf_mut(&mut self) -> Option<&mut [u8]> {
278         self.buf.map(|ptr| {
279             // SAFETY: bound checks have already been performed when creating the packet
280             // from the virtq descriptor.
281             unsafe { std::slice::from_raw_parts_mut(ptr, self.buf_size) }
282         })
283     }
284 
285     pub fn src_cid(&self) -> u64 {
286         LittleEndian::read_u64(&self.hdr()[HDROFF_SRC_CID..])
287     }
288 
289     pub fn set_src_cid(&mut self, cid: u64) -> &mut Self {
290         LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_SRC_CID..], cid);
291         self
292     }
293 
294     pub fn dst_cid(&self) -> u64 {
295         LittleEndian::read_u64(&self.hdr()[HDROFF_DST_CID..])
296     }
297 
298     pub fn set_dst_cid(&mut self, cid: u64) -> &mut Self {
299         LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_DST_CID..], cid);
300         self
301     }
302 
303     pub fn src_port(&self) -> u32 {
304         LittleEndian::read_u32(&self.hdr()[HDROFF_SRC_PORT..])
305     }
306 
307     pub fn set_src_port(&mut self, port: u32) -> &mut Self {
308         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_SRC_PORT..], port);
309         self
310     }
311 
312     pub fn dst_port(&self) -> u32 {
313         LittleEndian::read_u32(&self.hdr()[HDROFF_DST_PORT..])
314     }
315 
316     pub fn set_dst_port(&mut self, port: u32) -> &mut Self {
317         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_DST_PORT..], port);
318         self
319     }
320 
321     pub fn len(&self) -> u32 {
322         LittleEndian::read_u32(&self.hdr()[HDROFF_LEN..])
323     }
324 
325     pub fn is_empty(&self) -> bool {
326         self.len() == 0
327     }
328 
329     pub fn set_len(&mut self, len: u32) -> &mut Self {
330         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_LEN..], len);
331         self
332     }
333 
334     pub fn type_(&self) -> u16 {
335         LittleEndian::read_u16(&self.hdr()[HDROFF_TYPE..])
336     }
337 
338     pub fn set_type(&mut self, type_: u16) -> &mut Self {
339         LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_TYPE..], type_);
340         self
341     }
342 
343     pub fn op(&self) -> u16 {
344         LittleEndian::read_u16(&self.hdr()[HDROFF_OP..])
345     }
346 
347     pub fn set_op(&mut self, op: u16) -> &mut Self {
348         LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_OP..], op);
349         self
350     }
351 
352     pub fn flags(&self) -> u32 {
353         LittleEndian::read_u32(&self.hdr()[HDROFF_FLAGS..])
354     }
355 
356     pub fn set_flags(&mut self, flags: u32) -> &mut Self {
357         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FLAGS..], flags);
358         self
359     }
360 
361     pub fn set_flag(&mut self, flag: u32) -> &mut Self {
362         self.set_flags(self.flags() | flag);
363         self
364     }
365 
366     pub fn buf_alloc(&self) -> u32 {
367         LittleEndian::read_u32(&self.hdr()[HDROFF_BUF_ALLOC..])
368     }
369 
370     pub fn set_buf_alloc(&mut self, buf_alloc: u32) -> &mut Self {
371         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_BUF_ALLOC..], buf_alloc);
372         self
373     }
374 
375     pub fn fwd_cnt(&self) -> u32 {
376         LittleEndian::read_u32(&self.hdr()[HDROFF_FWD_CNT..])
377     }
378 
379     pub fn set_fwd_cnt(&mut self, fwd_cnt: u32) -> &mut Self {
380         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FWD_CNT..], fwd_cnt);
381         self
382     }
383 }
384 
385 #[cfg(test)]
386 #[allow(clippy::undocumented_unsafe_blocks)]
387 mod tests {
388     use super::super::tests::TestContext;
389     use super::*;
390     use crate::vsock::defs::MAX_PKT_BUF_SIZE;
391     use crate::GuestMemoryMmap;
392     use virtio_bindings::bindings::virtio_ring::VRING_DESC_F_WRITE;
393     use virtio_queue::QueueOwnedT;
394     use vm_memory::GuestAddress;
395     use vm_virtio::queue::testing::VirtqDesc as GuestQDesc;
396 
397     macro_rules! create_context {
398         ($test_ctx:ident, $handler_ctx:ident) => {
399             let $test_ctx = TestContext::new();
400             let mut $handler_ctx = $test_ctx.create_epoll_handler_context();
401             // For TX packets, hdr.len should be set to a valid value.
402             set_pkt_len(1024, &$handler_ctx.guest_txvq.dtable[0], &$test_ctx.mem);
403         };
404     }
405 
406     macro_rules! expect_asm_error {
407         (tx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
408             expect_asm_error!($test_ctx, $handler_ctx, $err, from_tx_virtq_head, 1);
409         };
410         (rx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
411             expect_asm_error!($test_ctx, $handler_ctx, $err, from_rx_virtq_head, 0);
412         };
413         ($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => {
414             match VsockPacket::$ctor(
415                 &mut $handler_ctx.handler.queues[$vq]
416                     .iter(&$test_ctx.mem)
417                     .unwrap()
418                     .next()
419                     .unwrap(),
420                 None,
421             ) {
422                 Err($err) => (),
423                 Ok(_) => panic!("Packet assembly should've failed!"),
424                 Err(other) => panic!("Packet assembly failed with: {:?}", other),
425             }
426         };
427     }
428 
429     fn set_pkt_len(len: u32, guest_desc: &GuestQDesc, mem: &GuestMemoryMmap) {
430         let hdr_gpa = guest_desc.addr.get();
431         let hdr_ptr = get_host_address_range(mem, GuestAddress(hdr_gpa), VSOCK_PKT_HDR_SIZE)
432             .unwrap() as *mut u8;
433         let len_ptr = unsafe { hdr_ptr.add(HDROFF_LEN) };
434 
435         LittleEndian::write_u32(unsafe { std::slice::from_raw_parts_mut(len_ptr, 4) }, len);
436     }
437 
438     #[test]
439     #[allow(clippy::cognitive_complexity)]
440     fn test_tx_packet_assembly() {
441         // Test case: successful TX packet assembly.
442         {
443             create_context!(test_ctx, handler_ctx);
444 
445             let pkt = VsockPacket::from_tx_virtq_head(
446                 &mut handler_ctx.handler.queues[1]
447                     .iter(&test_ctx.mem)
448                     .unwrap()
449                     .next()
450                     .unwrap(),
451                 None,
452             )
453             .unwrap();
454             assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
455             assert_eq!(
456                 pkt.buf().unwrap().len(),
457                 handler_ctx.guest_txvq.dtable[1].len.get() as usize
458             );
459         }
460 
461         // Test case: error on write-only hdr descriptor.
462         {
463             create_context!(test_ctx, handler_ctx);
464             handler_ctx.guest_txvq.dtable[0]
465                 .flags
466                 .set(VRING_DESC_F_WRITE.try_into().unwrap());
467             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
468         }
469 
470         // Test case: header descriptor has insufficient space to hold the packet header.
471         {
472             create_context!(test_ctx, handler_ctx);
473             handler_ctx.guest_txvq.dtable[0]
474                 .len
475                 .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
476             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
477         }
478 
479         // Test case: zero-length TX packet.
480         {
481             create_context!(test_ctx, handler_ctx);
482             set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
483             let mut pkt = VsockPacket::from_tx_virtq_head(
484                 &mut handler_ctx.handler.queues[1]
485                     .iter(&test_ctx.mem)
486                     .unwrap()
487                     .next()
488                     .unwrap(),
489                 None,
490             )
491             .unwrap();
492             assert!(pkt.buf().is_none());
493             assert!(pkt.buf_mut().is_none());
494         }
495 
496         // Test case: TX packet has more data than we can handle.
497         {
498             create_context!(test_ctx, handler_ctx);
499             set_pkt_len(
500                 MAX_PKT_BUF_SIZE as u32 + 1,
501                 &handler_ctx.guest_txvq.dtable[0],
502                 &test_ctx.mem,
503             );
504             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::InvalidPktLen(_));
505         }
506 
507         // Test case:
508         // - packet header advertises some data length; and
509         // - the data descriptor is missing.
510         {
511             create_context!(test_ctx, handler_ctx);
512             set_pkt_len(1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
513             handler_ctx.guest_txvq.dtable[0].flags.set(0);
514             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::BufDescMissing);
515         }
516 
517         // Test case: error on write-only buf descriptor.
518         {
519             create_context!(test_ctx, handler_ctx);
520             handler_ctx.guest_txvq.dtable[1]
521                 .flags
522                 .set(VRING_DESC_F_WRITE.try_into().unwrap());
523             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
524         }
525 
526         // Test case: the buffer descriptor cannot fit all the data advertised by the
527         // packet header `len` field.
528         {
529             create_context!(test_ctx, handler_ctx);
530             set_pkt_len(8 * 1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
531             handler_ctx.guest_txvq.dtable[1].len.set(4 * 1024);
532             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::BufDescTooSmall);
533         }
534     }
535 
536     #[test]
537     fn test_rx_packet_assembly() {
538         // Test case: successful RX packet assembly.
539         {
540             create_context!(test_ctx, handler_ctx);
541             let pkt = VsockPacket::from_rx_virtq_head(
542                 &mut handler_ctx.handler.queues[0]
543                     .iter(&test_ctx.mem)
544                     .unwrap()
545                     .next()
546                     .unwrap(),
547                 None,
548             )
549             .unwrap();
550             assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
551             assert_eq!(
552                 pkt.buf().unwrap().len(),
553                 handler_ctx.guest_rxvq.dtable[1].len.get() as usize
554             );
555         }
556 
557         // Test case: read-only RX packet header.
558         {
559             create_context!(test_ctx, handler_ctx);
560             handler_ctx.guest_rxvq.dtable[0].flags.set(0);
561             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::UnwritableDescriptor);
562         }
563 
564         // Test case: RX descriptor head cannot fit the entire packet header.
565         {
566             create_context!(test_ctx, handler_ctx);
567             handler_ctx.guest_rxvq.dtable[0]
568                 .len
569                 .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
570             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
571         }
572 
573         // Test case: RX descriptor chain is missing the packet buffer descriptor.
574         {
575             create_context!(test_ctx, handler_ctx);
576             handler_ctx.guest_rxvq.dtable[0]
577                 .flags
578                 .set(VRING_DESC_F_WRITE.try_into().unwrap());
579             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::BufDescMissing);
580         }
581     }
582 
583     #[test]
584     #[allow(clippy::cognitive_complexity)]
585     fn test_packet_hdr_accessors() {
586         const SRC_CID: u64 = 1;
587         const DST_CID: u64 = 2;
588         const SRC_PORT: u32 = 3;
589         const DST_PORT: u32 = 4;
590         const LEN: u32 = 5;
591         const TYPE: u16 = 6;
592         const OP: u16 = 7;
593         const FLAGS: u32 = 8;
594         const BUF_ALLOC: u32 = 9;
595         const FWD_CNT: u32 = 10;
596 
597         create_context!(test_ctx, handler_ctx);
598         let mut pkt = VsockPacket::from_rx_virtq_head(
599             &mut handler_ctx.handler.queues[0]
600                 .iter(&test_ctx.mem)
601                 .unwrap()
602                 .next()
603                 .unwrap(),
604             None,
605         )
606         .unwrap();
607 
608         // Test field accessors.
609         pkt.set_src_cid(SRC_CID)
610             .set_dst_cid(DST_CID)
611             .set_src_port(SRC_PORT)
612             .set_dst_port(DST_PORT)
613             .set_len(LEN)
614             .set_type(TYPE)
615             .set_op(OP)
616             .set_flags(FLAGS)
617             .set_buf_alloc(BUF_ALLOC)
618             .set_fwd_cnt(FWD_CNT);
619 
620         assert_eq!(pkt.src_cid(), SRC_CID);
621         assert_eq!(pkt.dst_cid(), DST_CID);
622         assert_eq!(pkt.src_port(), SRC_PORT);
623         assert_eq!(pkt.dst_port(), DST_PORT);
624         assert_eq!(pkt.len(), LEN);
625         assert_eq!(pkt.type_(), TYPE);
626         assert_eq!(pkt.op(), OP);
627         assert_eq!(pkt.flags(), FLAGS);
628         assert_eq!(pkt.buf_alloc(), BUF_ALLOC);
629         assert_eq!(pkt.fwd_cnt(), FWD_CNT);
630 
631         // Test individual flag setting.
632         let flags = pkt.flags() | 0b1000;
633         pkt.set_flag(0b1000);
634         assert_eq!(pkt.flags(), flags);
635 
636         // Test packet header as-slice access.
637         //
638 
639         assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
640 
641         assert_eq!(
642             SRC_CID,
643             LittleEndian::read_u64(&pkt.hdr()[HDROFF_SRC_CID..])
644         );
645         assert_eq!(
646             DST_CID,
647             LittleEndian::read_u64(&pkt.hdr()[HDROFF_DST_CID..])
648         );
649         assert_eq!(
650             SRC_PORT,
651             LittleEndian::read_u32(&pkt.hdr()[HDROFF_SRC_PORT..])
652         );
653         assert_eq!(
654             DST_PORT,
655             LittleEndian::read_u32(&pkt.hdr()[HDROFF_DST_PORT..])
656         );
657         assert_eq!(LEN, LittleEndian::read_u32(&pkt.hdr()[HDROFF_LEN..]));
658         assert_eq!(TYPE, LittleEndian::read_u16(&pkt.hdr()[HDROFF_TYPE..]));
659         assert_eq!(OP, LittleEndian::read_u16(&pkt.hdr()[HDROFF_OP..]));
660         assert_eq!(FLAGS, LittleEndian::read_u32(&pkt.hdr()[HDROFF_FLAGS..]));
661         assert_eq!(
662             BUF_ALLOC,
663             LittleEndian::read_u32(&pkt.hdr()[HDROFF_BUF_ALLOC..])
664         );
665         assert_eq!(
666             FWD_CNT,
667             LittleEndian::read_u32(&pkt.hdr()[HDROFF_FWD_CNT..])
668         );
669 
670         assert_eq!(pkt.hdr_mut().len(), VSOCK_PKT_HDR_SIZE);
671         for b in pkt.hdr_mut() {
672             *b = 0;
673         }
674         assert_eq!(pkt.src_cid(), 0);
675         assert_eq!(pkt.dst_cid(), 0);
676         assert_eq!(pkt.src_port(), 0);
677         assert_eq!(pkt.dst_port(), 0);
678         assert_eq!(pkt.len(), 0);
679         assert_eq!(pkt.type_(), 0);
680         assert_eq!(pkt.op(), 0);
681         assert_eq!(pkt.flags(), 0);
682         assert_eq!(pkt.buf_alloc(), 0);
683         assert_eq!(pkt.fwd_cnt(), 0);
684     }
685 
686     #[test]
687     fn test_packet_buf() {
688         create_context!(test_ctx, handler_ctx);
689         let mut pkt = VsockPacket::from_rx_virtq_head(
690             &mut handler_ctx.handler.queues[0]
691                 .iter(&test_ctx.mem)
692                 .unwrap()
693                 .next()
694                 .unwrap(),
695             None,
696         )
697         .unwrap();
698 
699         assert_eq!(
700             pkt.buf().unwrap().len(),
701             handler_ctx.guest_rxvq.dtable[1].len.get() as usize
702         );
703         assert_eq!(
704             pkt.buf_mut().unwrap().len(),
705             handler_ctx.guest_rxvq.dtable[1].len.get() as usize
706         );
707 
708         for i in 0..pkt.buf().unwrap().len() {
709             pkt.buf_mut().unwrap()[i] = (i % 0x100) as u8;
710             assert_eq!(pkt.buf().unwrap()[i], (i % 0x100) as u8);
711         }
712     }
713 }
714