xref: /cloud-hypervisor/virtio-devices/src/vsock/packet.rs (revision 07d1208dd53a207a65b649b8952780dfd0ca59d9)
1 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 //! `VsockPacket` provides a thin wrapper over the buffers exchanged via virtio queues.
6 //! There are two components to a vsock packet, each using its own descriptor in a
7 //! virtio queue:
8 //! - the packet header; and
9 //! - the packet data/buffer.
10 //! There is a 1:1 relation between descriptor chains and packets: the first (chain head) holds
11 //! the header, and an optional second descriptor holds the data. The second descriptor is only
12 //! present for data packets (VSOCK_OP_RW).
13 //!
14 //! `VsockPacket` wraps these two buffers and provides direct access to the data stored
15 //! in guest memory. This is done to avoid unnecessarily copying data from guest memory
16 //! to temporary buffers, before passing it on to the vsock backend.
17 
18 use byteorder::{ByteOrder, LittleEndian};
19 use std::ops::Deref;
20 use std::sync::Arc;
21 
22 use super::defs;
23 use super::{Result, VsockError};
24 use crate::get_host_address_range;
25 use virtio_queue::DescriptorChain;
26 use vm_memory::{Address, GuestMemory};
27 use vm_virtio::{AccessPlatform, Translatable};
28 
29 // The vsock packet header is defined by the C struct:
30 //
31 // ```C
32 // struct virtio_vsock_hdr {
33 //     le64 src_cid;
34 //     le64 dst_cid;
35 //     le32 src_port;
36 //     le32 dst_port;
37 //     le32 len;
38 //     le16 type;
39 //     le16 op;
40 //     le32 flags;
41 //     le32 buf_alloc;
42 //     le32 fwd_cnt;
43 // };
44 // ```
45 //
46 // This struct will occupy the buffer pointed to by the head descriptor. We'll be accessing it
47 // as a byte slice. To that end, we define below the offsets for each field struct, as well as the
48 // packed struct size, as a bunch of `usize` consts.
49 // Note that these offsets are only used privately by the `VsockPacket` struct, the public interface
50 // consisting of getter and setter methods, for each struct field, that will also handle the correct
51 // endianness.
52 
53 /// The vsock packet header struct size (when packed).
54 pub const VSOCK_PKT_HDR_SIZE: usize = 44;
55 
56 // Source CID.
57 const HDROFF_SRC_CID: usize = 0;
58 
59 // Destination CID.
60 const HDROFF_DST_CID: usize = 8;
61 
62 // Source port.
63 const HDROFF_SRC_PORT: usize = 16;
64 
65 // Destination port.
66 const HDROFF_DST_PORT: usize = 20;
67 
68 // Data length (in bytes) - may be 0, if there is no data buffer.
69 const HDROFF_LEN: usize = 24;
70 
71 // Socket type. Currently, only connection-oriented streams are defined by the vsock protocol.
72 const HDROFF_TYPE: usize = 28;
73 
74 // Operation ID - one of the VSOCK_OP_* values; e.g.
75 // - VSOCK_OP_RW: a data packet;
76 // - VSOCK_OP_REQUEST: connection request;
77 // - VSOCK_OP_RST: forceful connection termination;
78 // etc (see `super::defs::uapi` for the full list).
79 const HDROFF_OP: usize = 30;
80 
81 // Additional options (flags) associated with the current operation (`op`).
82 // Currently, only used with shutdown requests (VSOCK_OP_SHUTDOWN).
83 const HDROFF_FLAGS: usize = 32;
84 
85 // Size (in bytes) of the packet sender receive buffer (for the connection to which this packet
86 // belongs).
87 const HDROFF_BUF_ALLOC: usize = 36;
88 
89 // Number of bytes the sender has received and consumed (for the connection to which this packet
90 // belongs). For instance, for our Unix backend, this counter would be the total number of bytes
91 // we have successfully written to a backing Unix socket.
92 const HDROFF_FWD_CNT: usize = 40;
93 
94 /// The vsock packet, implemented as a wrapper over a virtq descriptor chain:
95 /// - the chain head, holding the packet header; and
96 /// - (an optional) data/buffer descriptor, only present for data packets (VSOCK_OP_RW).
97 ///
98 pub struct VsockPacket {
99     hdr: *mut u8,
100     buf: Option<*mut u8>,
101     buf_size: usize,
102 }
103 
104 impl VsockPacket {
105     /// Create the packet wrapper from a TX virtq chain head.
106     ///
107     /// The chain head is expected to hold valid packet header data. A following packet buffer
108     /// descriptor can optionally end the chain. Bounds and pointer checks are performed when
109     /// creating the wrapper.
110     ///
111     pub fn from_tx_virtq_head<M>(
112         desc_chain: &mut DescriptorChain<M>,
113         access_platform: Option<&Arc<dyn AccessPlatform>>,
114     ) -> Result<Self>
115     where
116         M: Clone + Deref,
117         M::Target: GuestMemory,
118     {
119         let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
120 
121         // All buffers in the TX queue must be readable.
122         //
123         if head.is_write_only() {
124             return Err(VsockError::UnreadableDescriptor);
125         }
126 
127         // The packet header should fit inside the head descriptor.
128         if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
129             return Err(VsockError::HdrDescTooSmall(head.len()));
130         }
131 
132         let mut pkt = Self {
133             hdr: get_host_address_range(
134                 desc_chain.memory(),
135                 head.addr()
136                     .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
137                 VSOCK_PKT_HDR_SIZE,
138             )
139             .ok_or(VsockError::GuestMemory)?,
140             buf: None,
141             buf_size: 0,
142         };
143 
144         // No point looking for a data/buffer descriptor, if the packet is zero-lengthed.
145         if pkt.is_empty() {
146             return Ok(pkt);
147         }
148 
149         // Reject weirdly-sized packets.
150         //
151         if pkt.len() > defs::MAX_PKT_BUF_SIZE as u32 {
152             return Err(VsockError::InvalidPktLen(pkt.len()));
153         }
154 
155         // Prior to Linux v6.3 there are two descriptors
156         if head.has_next() {
157             let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
158 
159             // TX data should be read-only.
160             if buf_desc.is_write_only() {
161                 return Err(VsockError::UnreadableDescriptor);
162             }
163 
164             // The data buffer should be large enough to fit the size of the data, as described by
165             // the header descriptor.
166             if buf_desc.len() < pkt.len() {
167                 return Err(VsockError::BufDescTooSmall);
168             }
169             let buf_size = buf_desc.len() as usize;
170             pkt.buf_size = buf_size;
171             pkt.buf = Some(
172                 get_host_address_range(
173                     desc_chain.memory(),
174                     buf_desc.addr().translate_gva(access_platform, buf_size),
175                     pkt.buf_size,
176                 )
177                 .ok_or(VsockError::GuestMemory)?,
178             );
179         } else {
180             let buf_size: usize = head.len() as usize - VSOCK_PKT_HDR_SIZE;
181             pkt.buf_size = buf_size;
182             pkt.buf = Some(
183                 get_host_address_range(
184                     desc_chain.memory(),
185                     head.addr()
186                         .checked_add(VSOCK_PKT_HDR_SIZE as u64)
187                         .unwrap()
188                         .translate_gva(access_platform, buf_size),
189                     buf_size,
190                 )
191                 .ok_or(VsockError::GuestMemory)?,
192             );
193         }
194 
195         Ok(pkt)
196     }
197 
198     /// Create the packet wrapper from an RX virtq chain head.
199     ///
200     /// There must be two descriptors in the chain, both writable: a header descriptor and a data
201     /// descriptor. Bounds and pointer checks are performed when creating the wrapper.
202     ///
203     pub fn from_rx_virtq_head<M>(
204         desc_chain: &mut DescriptorChain<M>,
205         access_platform: Option<&Arc<dyn AccessPlatform>>,
206     ) -> Result<Self>
207     where
208         M: Clone + Deref,
209         M::Target: GuestMemory,
210     {
211         let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
212 
213         // All RX buffers must be writable.
214         //
215         if !head.is_write_only() {
216             return Err(VsockError::UnwritableDescriptor);
217         }
218 
219         // The packet header should fit inside the head descriptor.
220         if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
221             return Err(VsockError::HdrDescTooSmall(head.len()));
222         }
223 
224         // Prior to Linux v6.3 there are two descriptors
225         if head.has_next() {
226             let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
227             let buf_size = buf_desc.len() as usize;
228 
229             Ok(Self {
230                 hdr: get_host_address_range(
231                     desc_chain.memory(),
232                     head.addr()
233                         .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
234                     VSOCK_PKT_HDR_SIZE,
235                 )
236                 .ok_or(VsockError::GuestMemory)?,
237                 buf: Some(
238                     get_host_address_range(
239                         desc_chain.memory(),
240                         buf_desc.addr().translate_gva(access_platform, buf_size),
241                         buf_size,
242                     )
243                     .ok_or(VsockError::GuestMemory)?,
244                 ),
245                 buf_size,
246             })
247         } else {
248             let buf_size: usize = head.len() as usize - VSOCK_PKT_HDR_SIZE;
249             Ok(Self {
250                 hdr: get_host_address_range(
251                     desc_chain.memory(),
252                     head.addr()
253                         .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
254                     VSOCK_PKT_HDR_SIZE,
255                 )
256                 .ok_or(VsockError::GuestMemory)?,
257                 buf: Some(
258                     get_host_address_range(
259                         desc_chain.memory(),
260                         head.addr()
261                             .checked_add(VSOCK_PKT_HDR_SIZE as u64)
262                             .unwrap()
263                             .translate_gva(access_platform, buf_size),
264                         buf_size,
265                     )
266                     .ok_or(VsockError::GuestMemory)?,
267                 ),
268                 buf_size,
269             })
270         }
271     }
272 
273     /// Provides in-place, byte-slice, access to the vsock packet header.
274     ///
275     pub fn hdr(&self) -> &[u8] {
276         // SAFETY: bound checks have already been performed when creating the packet
277         // from the virtq descriptor.
278         unsafe { std::slice::from_raw_parts(self.hdr as *const u8, VSOCK_PKT_HDR_SIZE) }
279     }
280 
281     /// Provides in-place, byte-slice, mutable access to the vsock packet header.
282     ///
283     pub fn hdr_mut(&mut self) -> &mut [u8] {
284         // SAFETY: bound checks have already been performed when creating the packet
285         // from the virtq descriptor.
286         unsafe { std::slice::from_raw_parts_mut(self.hdr, VSOCK_PKT_HDR_SIZE) }
287     }
288 
289     /// Provides in-place, byte-slice access to the vsock packet data buffer.
290     ///
291     /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
292     ///       For those packets, this method will return `None`.
293     /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
294     ///            (and often is) larger than the length of the packet data. The packet data length
295     ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
296     pub fn buf(&self) -> Option<&[u8]> {
297         self.buf.map(|ptr| {
298             // SAFETY: bound checks have already been performed when creating the packet
299             // from the virtq descriptor.
300             unsafe { std::slice::from_raw_parts(ptr as *const u8, self.buf_size) }
301         })
302     }
303 
304     /// Provides in-place, byte-slice, mutable access to the vsock packet data buffer.
305     ///
306     /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
307     ///       For those packets, this method will return `None`.
308     /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
309     ///            (and often is) larger than the length of the packet data. The packet data length
310     ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
311     pub fn buf_mut(&mut self) -> Option<&mut [u8]> {
312         self.buf.map(|ptr| {
313             // SAFETY: bound checks have already been performed when creating the packet
314             // from the virtq descriptor.
315             unsafe { std::slice::from_raw_parts_mut(ptr, self.buf_size) }
316         })
317     }
318 
319     pub fn src_cid(&self) -> u64 {
320         LittleEndian::read_u64(&self.hdr()[HDROFF_SRC_CID..])
321     }
322 
323     pub fn set_src_cid(&mut self, cid: u64) -> &mut Self {
324         LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_SRC_CID..], cid);
325         self
326     }
327 
328     pub fn dst_cid(&self) -> u64 {
329         LittleEndian::read_u64(&self.hdr()[HDROFF_DST_CID..])
330     }
331 
332     pub fn set_dst_cid(&mut self, cid: u64) -> &mut Self {
333         LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_DST_CID..], cid);
334         self
335     }
336 
337     pub fn src_port(&self) -> u32 {
338         LittleEndian::read_u32(&self.hdr()[HDROFF_SRC_PORT..])
339     }
340 
341     pub fn set_src_port(&mut self, port: u32) -> &mut Self {
342         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_SRC_PORT..], port);
343         self
344     }
345 
346     pub fn dst_port(&self) -> u32 {
347         LittleEndian::read_u32(&self.hdr()[HDROFF_DST_PORT..])
348     }
349 
350     pub fn set_dst_port(&mut self, port: u32) -> &mut Self {
351         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_DST_PORT..], port);
352         self
353     }
354 
355     pub fn len(&self) -> u32 {
356         LittleEndian::read_u32(&self.hdr()[HDROFF_LEN..])
357     }
358 
359     pub fn is_empty(&self) -> bool {
360         self.len() == 0
361     }
362 
363     pub fn set_len(&mut self, len: u32) -> &mut Self {
364         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_LEN..], len);
365         self
366     }
367 
368     pub fn type_(&self) -> u16 {
369         LittleEndian::read_u16(&self.hdr()[HDROFF_TYPE..])
370     }
371 
372     pub fn set_type(&mut self, type_: u16) -> &mut Self {
373         LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_TYPE..], type_);
374         self
375     }
376 
377     pub fn op(&self) -> u16 {
378         LittleEndian::read_u16(&self.hdr()[HDROFF_OP..])
379     }
380 
381     pub fn set_op(&mut self, op: u16) -> &mut Self {
382         LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_OP..], op);
383         self
384     }
385 
386     pub fn flags(&self) -> u32 {
387         LittleEndian::read_u32(&self.hdr()[HDROFF_FLAGS..])
388     }
389 
390     pub fn set_flags(&mut self, flags: u32) -> &mut Self {
391         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FLAGS..], flags);
392         self
393     }
394 
395     pub fn set_flag(&mut self, flag: u32) -> &mut Self {
396         self.set_flags(self.flags() | flag);
397         self
398     }
399 
400     pub fn buf_alloc(&self) -> u32 {
401         LittleEndian::read_u32(&self.hdr()[HDROFF_BUF_ALLOC..])
402     }
403 
404     pub fn set_buf_alloc(&mut self, buf_alloc: u32) -> &mut Self {
405         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_BUF_ALLOC..], buf_alloc);
406         self
407     }
408 
409     pub fn fwd_cnt(&self) -> u32 {
410         LittleEndian::read_u32(&self.hdr()[HDROFF_FWD_CNT..])
411     }
412 
413     pub fn set_fwd_cnt(&mut self, fwd_cnt: u32) -> &mut Self {
414         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FWD_CNT..], fwd_cnt);
415         self
416     }
417 }
418 
419 #[cfg(test)]
420 #[allow(clippy::undocumented_unsafe_blocks)]
421 mod tests {
422     use super::super::tests::TestContext;
423     use super::*;
424     use crate::vsock::defs::MAX_PKT_BUF_SIZE;
425     use crate::GuestMemoryMmap;
426     use virtio_bindings::virtio_ring::VRING_DESC_F_WRITE;
427     use virtio_queue::QueueOwnedT;
428     use vm_memory::GuestAddress;
429     use vm_virtio::queue::testing::VirtqDesc as GuestQDesc;
430 
431     macro_rules! create_context {
432         ($test_ctx:ident, $handler_ctx:ident) => {
433             let $test_ctx = TestContext::new();
434             let mut $handler_ctx = $test_ctx.create_epoll_handler_context();
435             // For TX packets, hdr.len should be set to a valid value.
436             set_pkt_len(1024, &$handler_ctx.guest_txvq.dtable[0], &$test_ctx.mem);
437         };
438     }
439 
440     macro_rules! expect_asm_error {
441         (tx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
442             expect_asm_error!($test_ctx, $handler_ctx, $err, from_tx_virtq_head, 1);
443         };
444         (rx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
445             expect_asm_error!($test_ctx, $handler_ctx, $err, from_rx_virtq_head, 0);
446         };
447         ($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => {
448             match VsockPacket::$ctor(
449                 &mut $handler_ctx.handler.queues[$vq]
450                     .iter(&$test_ctx.mem)
451                     .unwrap()
452                     .next()
453                     .unwrap(),
454                 None,
455             ) {
456                 Err($err) => (),
457                 Ok(_) => panic!("Packet assembly should've failed!"),
458                 Err(other) => panic!("Packet assembly failed with: {:?}", other),
459             }
460         };
461     }
462 
463     fn set_pkt_len(len: u32, guest_desc: &GuestQDesc, mem: &GuestMemoryMmap) {
464         let hdr_gpa = guest_desc.addr.get();
465         let hdr_ptr =
466             get_host_address_range(mem, GuestAddress(hdr_gpa), VSOCK_PKT_HDR_SIZE).unwrap();
467         let len_ptr = unsafe { hdr_ptr.add(HDROFF_LEN) };
468 
469         LittleEndian::write_u32(unsafe { std::slice::from_raw_parts_mut(len_ptr, 4) }, len);
470     }
471 
472     #[test]
473     fn test_tx_packet_assembly() {
474         // Test case: successful TX packet assembly.
475         {
476             create_context!(test_ctx, handler_ctx);
477 
478             let pkt = VsockPacket::from_tx_virtq_head(
479                 &mut handler_ctx.handler.queues[1]
480                     .iter(&test_ctx.mem)
481                     .unwrap()
482                     .next()
483                     .unwrap(),
484                 None,
485             )
486             .unwrap();
487             assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
488             assert_eq!(
489                 pkt.buf().unwrap().len(),
490                 handler_ctx.guest_txvq.dtable[1].len.get() as usize
491             );
492         }
493 
494         // Test case: error on write-only hdr descriptor.
495         {
496             create_context!(test_ctx, handler_ctx);
497             handler_ctx.guest_txvq.dtable[0]
498                 .flags
499                 .set(VRING_DESC_F_WRITE.try_into().unwrap());
500             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
501         }
502 
503         // Test case: header descriptor has insufficient space to hold the packet header.
504         {
505             create_context!(test_ctx, handler_ctx);
506             handler_ctx.guest_txvq.dtable[0]
507                 .len
508                 .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
509             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
510         }
511 
512         // Test case: zero-length TX packet.
513         {
514             create_context!(test_ctx, handler_ctx);
515             set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
516             let mut pkt = VsockPacket::from_tx_virtq_head(
517                 &mut handler_ctx.handler.queues[1]
518                     .iter(&test_ctx.mem)
519                     .unwrap()
520                     .next()
521                     .unwrap(),
522                 None,
523             )
524             .unwrap();
525             assert!(pkt.buf().is_none());
526             assert!(pkt.buf_mut().is_none());
527         }
528 
529         // Test case: TX packet has more data than we can handle.
530         {
531             create_context!(test_ctx, handler_ctx);
532             set_pkt_len(
533                 MAX_PKT_BUF_SIZE as u32 + 1,
534                 &handler_ctx.guest_txvq.dtable[0],
535                 &test_ctx.mem,
536             );
537             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::InvalidPktLen(_));
538         }
539 
540         // Test case: error on write-only buf descriptor.
541         {
542             create_context!(test_ctx, handler_ctx);
543             handler_ctx.guest_txvq.dtable[1]
544                 .flags
545                 .set(VRING_DESC_F_WRITE.try_into().unwrap());
546             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
547         }
548 
549         // Test case: the buffer descriptor cannot fit all the data advertised by the
550         // packet header `len` field.
551         {
552             create_context!(test_ctx, handler_ctx);
553             set_pkt_len(8 * 1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
554             handler_ctx.guest_txvq.dtable[1].len.set(4 * 1024);
555             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::BufDescTooSmall);
556         }
557     }
558 
559     #[test]
560     fn test_rx_packet_assembly() {
561         // Test case: successful RX packet assembly.
562         {
563             create_context!(test_ctx, handler_ctx);
564             let pkt = VsockPacket::from_rx_virtq_head(
565                 &mut handler_ctx.handler.queues[0]
566                     .iter(&test_ctx.mem)
567                     .unwrap()
568                     .next()
569                     .unwrap(),
570                 None,
571             )
572             .unwrap();
573             assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
574             assert_eq!(
575                 pkt.buf().unwrap().len(),
576                 handler_ctx.guest_rxvq.dtable[1].len.get() as usize
577             );
578         }
579 
580         // Test case: read-only RX packet header.
581         {
582             create_context!(test_ctx, handler_ctx);
583             handler_ctx.guest_rxvq.dtable[0].flags.set(0);
584             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::UnwritableDescriptor);
585         }
586 
587         // Test case: RX descriptor head cannot fit the entire packet header.
588         {
589             create_context!(test_ctx, handler_ctx);
590             handler_ctx.guest_rxvq.dtable[0]
591                 .len
592                 .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
593             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
594         }
595     }
596 
597     #[test]
598     fn test_packet_hdr_accessors() {
599         const SRC_CID: u64 = 1;
600         const DST_CID: u64 = 2;
601         const SRC_PORT: u32 = 3;
602         const DST_PORT: u32 = 4;
603         const LEN: u32 = 5;
604         const TYPE: u16 = 6;
605         const OP: u16 = 7;
606         const FLAGS: u32 = 8;
607         const BUF_ALLOC: u32 = 9;
608         const FWD_CNT: u32 = 10;
609 
610         create_context!(test_ctx, handler_ctx);
611         let mut pkt = VsockPacket::from_rx_virtq_head(
612             &mut handler_ctx.handler.queues[0]
613                 .iter(&test_ctx.mem)
614                 .unwrap()
615                 .next()
616                 .unwrap(),
617             None,
618         )
619         .unwrap();
620 
621         // Test field accessors.
622         pkt.set_src_cid(SRC_CID)
623             .set_dst_cid(DST_CID)
624             .set_src_port(SRC_PORT)
625             .set_dst_port(DST_PORT)
626             .set_len(LEN)
627             .set_type(TYPE)
628             .set_op(OP)
629             .set_flags(FLAGS)
630             .set_buf_alloc(BUF_ALLOC)
631             .set_fwd_cnt(FWD_CNT);
632 
633         assert_eq!(pkt.src_cid(), SRC_CID);
634         assert_eq!(pkt.dst_cid(), DST_CID);
635         assert_eq!(pkt.src_port(), SRC_PORT);
636         assert_eq!(pkt.dst_port(), DST_PORT);
637         assert_eq!(pkt.len(), LEN);
638         assert_eq!(pkt.type_(), TYPE);
639         assert_eq!(pkt.op(), OP);
640         assert_eq!(pkt.flags(), FLAGS);
641         assert_eq!(pkt.buf_alloc(), BUF_ALLOC);
642         assert_eq!(pkt.fwd_cnt(), FWD_CNT);
643 
644         // Test individual flag setting.
645         let flags = pkt.flags() | 0b1000;
646         pkt.set_flag(0b1000);
647         assert_eq!(pkt.flags(), flags);
648 
649         // Test packet header as-slice access.
650         //
651 
652         assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
653 
654         assert_eq!(
655             SRC_CID,
656             LittleEndian::read_u64(&pkt.hdr()[HDROFF_SRC_CID..])
657         );
658         assert_eq!(
659             DST_CID,
660             LittleEndian::read_u64(&pkt.hdr()[HDROFF_DST_CID..])
661         );
662         assert_eq!(
663             SRC_PORT,
664             LittleEndian::read_u32(&pkt.hdr()[HDROFF_SRC_PORT..])
665         );
666         assert_eq!(
667             DST_PORT,
668             LittleEndian::read_u32(&pkt.hdr()[HDROFF_DST_PORT..])
669         );
670         assert_eq!(LEN, LittleEndian::read_u32(&pkt.hdr()[HDROFF_LEN..]));
671         assert_eq!(TYPE, LittleEndian::read_u16(&pkt.hdr()[HDROFF_TYPE..]));
672         assert_eq!(OP, LittleEndian::read_u16(&pkt.hdr()[HDROFF_OP..]));
673         assert_eq!(FLAGS, LittleEndian::read_u32(&pkt.hdr()[HDROFF_FLAGS..]));
674         assert_eq!(
675             BUF_ALLOC,
676             LittleEndian::read_u32(&pkt.hdr()[HDROFF_BUF_ALLOC..])
677         );
678         assert_eq!(
679             FWD_CNT,
680             LittleEndian::read_u32(&pkt.hdr()[HDROFF_FWD_CNT..])
681         );
682 
683         assert_eq!(pkt.hdr_mut().len(), VSOCK_PKT_HDR_SIZE);
684         for b in pkt.hdr_mut() {
685             *b = 0;
686         }
687         assert_eq!(pkt.src_cid(), 0);
688         assert_eq!(pkt.dst_cid(), 0);
689         assert_eq!(pkt.src_port(), 0);
690         assert_eq!(pkt.dst_port(), 0);
691         assert_eq!(pkt.len(), 0);
692         assert_eq!(pkt.type_(), 0);
693         assert_eq!(pkt.op(), 0);
694         assert_eq!(pkt.flags(), 0);
695         assert_eq!(pkt.buf_alloc(), 0);
696         assert_eq!(pkt.fwd_cnt(), 0);
697     }
698 
699     #[test]
700     fn test_packet_buf() {
701         create_context!(test_ctx, handler_ctx);
702         let mut pkt = VsockPacket::from_rx_virtq_head(
703             &mut handler_ctx.handler.queues[0]
704                 .iter(&test_ctx.mem)
705                 .unwrap()
706                 .next()
707                 .unwrap(),
708             None,
709         )
710         .unwrap();
711 
712         assert_eq!(
713             pkt.buf().unwrap().len(),
714             handler_ctx.guest_rxvq.dtable[1].len.get() as usize
715         );
716         assert_eq!(
717             pkt.buf_mut().unwrap().len(),
718             handler_ctx.guest_rxvq.dtable[1].len.get() as usize
719         );
720 
721         for i in 0..pkt.buf().unwrap().len() {
722             pkt.buf_mut().unwrap()[i] = (i % 0x100) as u8;
723             assert_eq!(pkt.buf().unwrap()[i], (i % 0x100) as u8);
724         }
725     }
726 }
727