xref: /cloud-hypervisor/virtio-devices/src/vsock/packet.rs (revision 88a9f799449c04180c6b9a21d3b9c0c4b57e2bd6)
1 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 //! `VsockPacket` provides a thin wrapper over the buffers exchanged via virtio queues.
6 //! There are two components to a vsock packet, each using its own descriptor in a
7 //! virtio queue:
8 //! - the packet header; and
9 //! - the packet data/buffer.
10 //!
11 //! There is a 1:1 relation between descriptor chains and packets: the first (chain head) holds
12 //! the header, and an optional second descriptor holds the data. The second descriptor is only
13 //! present for data packets (VSOCK_OP_RW).
14 //!
15 //! `VsockPacket` wraps these two buffers and provides direct access to the data stored
16 //! in guest memory. This is done to avoid unnecessarily copying data from guest memory
17 //! to temporary buffers, before passing it on to the vsock backend.
18 
19 use std::ops::Deref;
20 use std::sync::Arc;
21 
22 use byteorder::{ByteOrder, LittleEndian};
23 use virtio_queue::DescriptorChain;
24 use vm_memory::{Address, GuestMemory};
25 use vm_virtio::{AccessPlatform, Translatable};
26 
27 use super::defs;
28 use super::{Result, VsockError};
29 use crate::get_host_address_range;
30 
31 // The vsock packet header is defined by the C struct:
32 //
33 // ```C
34 // struct virtio_vsock_hdr {
35 //     le64 src_cid;
36 //     le64 dst_cid;
37 //     le32 src_port;
38 //     le32 dst_port;
39 //     le32 len;
40 //     le16 type;
41 //     le16 op;
42 //     le32 flags;
43 //     le32 buf_alloc;
44 //     le32 fwd_cnt;
45 // };
46 // ```
47 //
48 // This struct will occupy the buffer pointed to by the head descriptor. We'll be accessing it
49 // as a byte slice. To that end, we define below the offsets for each field struct, as well as the
50 // packed struct size, as a bunch of `usize` consts.
51 // Note that these offsets are only used privately by the `VsockPacket` struct, the public interface
52 // consisting of getter and setter methods, for each struct field, that will also handle the correct
53 // endianness.
54 
55 /// The vsock packet header struct size (when packed).
56 pub const VSOCK_PKT_HDR_SIZE: usize = 44;
57 
58 // Source CID.
59 const HDROFF_SRC_CID: usize = 0;
60 
61 // Destination CID.
62 const HDROFF_DST_CID: usize = 8;
63 
64 // Source port.
65 const HDROFF_SRC_PORT: usize = 16;
66 
67 // Destination port.
68 const HDROFF_DST_PORT: usize = 20;
69 
70 // Data length (in bytes) - may be 0, if there is no data buffer.
71 const HDROFF_LEN: usize = 24;
72 
73 // Socket type. Currently, only connection-oriented streams are defined by the vsock protocol.
74 const HDROFF_TYPE: usize = 28;
75 
76 // Operation ID - one of the VSOCK_OP_* values; e.g.
77 // - VSOCK_OP_RW: a data packet;
78 // - VSOCK_OP_REQUEST: connection request;
79 // - VSOCK_OP_RST: forceful connection termination;
80 // etc (see `super::defs::uapi` for the full list).
81 const HDROFF_OP: usize = 30;
82 
83 // Additional options (flags) associated with the current operation (`op`).
84 // Currently, only used with shutdown requests (VSOCK_OP_SHUTDOWN).
85 const HDROFF_FLAGS: usize = 32;
86 
87 // Size (in bytes) of the packet sender receive buffer (for the connection to which this packet
88 // belongs).
89 const HDROFF_BUF_ALLOC: usize = 36;
90 
91 // Number of bytes the sender has received and consumed (for the connection to which this packet
92 // belongs). For instance, for our Unix backend, this counter would be the total number of bytes
93 // we have successfully written to a backing Unix socket.
94 const HDROFF_FWD_CNT: usize = 40;
95 
96 /// The vsock packet, implemented as a wrapper over a virtq descriptor chain:
97 /// - the chain head, holding the packet header; and
98 /// - (an optional) data/buffer descriptor, only present for data packets (VSOCK_OP_RW).
99 ///
100 pub struct VsockPacket {
101     hdr: *mut u8,
102     buf: Option<*mut u8>,
103     buf_size: usize,
104 }
105 
106 impl VsockPacket {
107     /// Create the packet wrapper from a TX virtq chain head.
108     ///
109     /// The chain head is expected to hold valid packet header data. A following packet buffer
110     /// descriptor can optionally end the chain. Bounds and pointer checks are performed when
111     /// creating the wrapper.
112     ///
113     pub fn from_tx_virtq_head<M>(
114         desc_chain: &mut DescriptorChain<M>,
115         access_platform: Option<&Arc<dyn AccessPlatform>>,
116     ) -> Result<Self>
117     where
118         M: Clone + Deref,
119         M::Target: GuestMemory,
120     {
121         let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
122 
123         // All buffers in the TX queue must be readable.
124         //
125         if head.is_write_only() {
126             return Err(VsockError::UnreadableDescriptor);
127         }
128 
129         // The packet header should fit inside the head descriptor.
130         if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
131             return Err(VsockError::HdrDescTooSmall(head.len()));
132         }
133 
134         let mut pkt = Self {
135             hdr: get_host_address_range(
136                 desc_chain.memory(),
137                 head.addr()
138                     .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
139                 VSOCK_PKT_HDR_SIZE,
140             )
141             .ok_or(VsockError::GuestMemory)?,
142             buf: None,
143             buf_size: 0,
144         };
145 
146         // No point looking for a data/buffer descriptor, if the packet is zero-length.
147         if pkt.is_empty() {
148             return Ok(pkt);
149         }
150 
151         // Reject weirdly-sized packets.
152         //
153         if pkt.len() > defs::MAX_PKT_BUF_SIZE as u32 {
154             return Err(VsockError::InvalidPktLen(pkt.len()));
155         }
156 
157         // Prior to Linux v6.3 there are two descriptors
158         if head.has_next() {
159             let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
160 
161             // TX data should be read-only.
162             if buf_desc.is_write_only() {
163                 return Err(VsockError::UnreadableDescriptor);
164             }
165 
166             // The data buffer should be large enough to fit the size of the data, as described by
167             // the header descriptor.
168             if buf_desc.len() < pkt.len() {
169                 return Err(VsockError::BufDescTooSmall);
170             }
171             let buf_size = buf_desc.len() as usize;
172             pkt.buf_size = buf_size;
173             pkt.buf = Some(
174                 get_host_address_range(
175                     desc_chain.memory(),
176                     buf_desc.addr().translate_gva(access_platform, buf_size),
177                     pkt.buf_size,
178                 )
179                 .ok_or(VsockError::GuestMemory)?,
180             );
181         } else {
182             let buf_size: usize = head.len() as usize - VSOCK_PKT_HDR_SIZE;
183             pkt.buf_size = buf_size;
184             pkt.buf = Some(
185                 get_host_address_range(
186                     desc_chain.memory(),
187                     head.addr()
188                         .checked_add(VSOCK_PKT_HDR_SIZE as u64)
189                         .unwrap()
190                         .translate_gva(access_platform, buf_size),
191                     buf_size,
192                 )
193                 .ok_or(VsockError::GuestMemory)?,
194             );
195         }
196 
197         Ok(pkt)
198     }
199 
200     /// Create the packet wrapper from an RX virtq chain head.
201     ///
202     /// There must be two descriptors in the chain, both writable: a header descriptor and a data
203     /// descriptor. Bounds and pointer checks are performed when creating the wrapper.
204     ///
205     pub fn from_rx_virtq_head<M>(
206         desc_chain: &mut DescriptorChain<M>,
207         access_platform: Option<&Arc<dyn AccessPlatform>>,
208     ) -> Result<Self>
209     where
210         M: Clone + Deref,
211         M::Target: GuestMemory,
212     {
213         let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
214 
215         // All RX buffers must be writable.
216         //
217         if !head.is_write_only() {
218             return Err(VsockError::UnwritableDescriptor);
219         }
220 
221         // The packet header should fit inside the head descriptor.
222         if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
223             return Err(VsockError::HdrDescTooSmall(head.len()));
224         }
225 
226         // Prior to Linux v6.3 there are two descriptors
227         if head.has_next() {
228             let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
229             let buf_size = buf_desc.len() as usize;
230 
231             Ok(Self {
232                 hdr: get_host_address_range(
233                     desc_chain.memory(),
234                     head.addr()
235                         .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
236                     VSOCK_PKT_HDR_SIZE,
237                 )
238                 .ok_or(VsockError::GuestMemory)?,
239                 buf: Some(
240                     get_host_address_range(
241                         desc_chain.memory(),
242                         buf_desc.addr().translate_gva(access_platform, buf_size),
243                         buf_size,
244                     )
245                     .ok_or(VsockError::GuestMemory)?,
246                 ),
247                 buf_size,
248             })
249         } else {
250             let buf_size: usize = head.len() as usize - VSOCK_PKT_HDR_SIZE;
251             Ok(Self {
252                 hdr: get_host_address_range(
253                     desc_chain.memory(),
254                     head.addr()
255                         .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
256                     VSOCK_PKT_HDR_SIZE,
257                 )
258                 .ok_or(VsockError::GuestMemory)?,
259                 buf: Some(
260                     get_host_address_range(
261                         desc_chain.memory(),
262                         head.addr()
263                             .checked_add(VSOCK_PKT_HDR_SIZE as u64)
264                             .unwrap()
265                             .translate_gva(access_platform, buf_size),
266                         buf_size,
267                     )
268                     .ok_or(VsockError::GuestMemory)?,
269                 ),
270                 buf_size,
271             })
272         }
273     }
274 
275     /// Provides in-place, byte-slice, access to the vsock packet header.
276     ///
277     pub fn hdr(&self) -> &[u8] {
278         // SAFETY: bound checks have already been performed when creating the packet
279         // from the virtq descriptor.
280         unsafe { std::slice::from_raw_parts(self.hdr as *const u8, VSOCK_PKT_HDR_SIZE) }
281     }
282 
283     /// Provides in-place, byte-slice, mutable access to the vsock packet header.
284     ///
285     pub fn hdr_mut(&mut self) -> &mut [u8] {
286         // SAFETY: bound checks have already been performed when creating the packet
287         // from the virtq descriptor.
288         unsafe { std::slice::from_raw_parts_mut(self.hdr, VSOCK_PKT_HDR_SIZE) }
289     }
290 
291     /// Provides in-place, byte-slice access to the vsock packet data buffer.
292     ///
293     /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
294     ///       For those packets, this method will return `None`.
295     /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
296     ///            (and often is) larger than the length of the packet data. The packet data length
297     ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
298     pub fn buf(&self) -> Option<&[u8]> {
299         self.buf.map(|ptr| {
300             // SAFETY: bound checks have already been performed when creating the packet
301             // from the virtq descriptor.
302             unsafe { std::slice::from_raw_parts(ptr as *const u8, self.buf_size) }
303         })
304     }
305 
306     /// Provides in-place, byte-slice, mutable access to the vsock packet data buffer.
307     ///
308     /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
309     ///       For those packets, this method will return `None`.
310     /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
311     ///            (and often is) larger than the length of the packet data. The packet data length
312     ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
313     pub fn buf_mut(&mut self) -> Option<&mut [u8]> {
314         self.buf.map(|ptr| {
315             // SAFETY: bound checks have already been performed when creating the packet
316             // from the virtq descriptor.
317             unsafe { std::slice::from_raw_parts_mut(ptr, self.buf_size) }
318         })
319     }
320 
321     pub fn src_cid(&self) -> u64 {
322         LittleEndian::read_u64(&self.hdr()[HDROFF_SRC_CID..])
323     }
324 
325     pub fn set_src_cid(&mut self, cid: u64) -> &mut Self {
326         LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_SRC_CID..], cid);
327         self
328     }
329 
330     pub fn dst_cid(&self) -> u64 {
331         LittleEndian::read_u64(&self.hdr()[HDROFF_DST_CID..])
332     }
333 
334     pub fn set_dst_cid(&mut self, cid: u64) -> &mut Self {
335         LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_DST_CID..], cid);
336         self
337     }
338 
339     pub fn src_port(&self) -> u32 {
340         LittleEndian::read_u32(&self.hdr()[HDROFF_SRC_PORT..])
341     }
342 
343     pub fn set_src_port(&mut self, port: u32) -> &mut Self {
344         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_SRC_PORT..], port);
345         self
346     }
347 
348     pub fn dst_port(&self) -> u32 {
349         LittleEndian::read_u32(&self.hdr()[HDROFF_DST_PORT..])
350     }
351 
352     pub fn set_dst_port(&mut self, port: u32) -> &mut Self {
353         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_DST_PORT..], port);
354         self
355     }
356 
357     pub fn len(&self) -> u32 {
358         LittleEndian::read_u32(&self.hdr()[HDROFF_LEN..])
359     }
360 
361     pub fn is_empty(&self) -> bool {
362         self.len() == 0
363     }
364 
365     pub fn set_len(&mut self, len: u32) -> &mut Self {
366         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_LEN..], len);
367         self
368     }
369 
370     pub fn type_(&self) -> u16 {
371         LittleEndian::read_u16(&self.hdr()[HDROFF_TYPE..])
372     }
373 
374     pub fn set_type(&mut self, type_: u16) -> &mut Self {
375         LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_TYPE..], type_);
376         self
377     }
378 
379     pub fn op(&self) -> u16 {
380         LittleEndian::read_u16(&self.hdr()[HDROFF_OP..])
381     }
382 
383     pub fn set_op(&mut self, op: u16) -> &mut Self {
384         LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_OP..], op);
385         self
386     }
387 
388     pub fn flags(&self) -> u32 {
389         LittleEndian::read_u32(&self.hdr()[HDROFF_FLAGS..])
390     }
391 
392     pub fn set_flags(&mut self, flags: u32) -> &mut Self {
393         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FLAGS..], flags);
394         self
395     }
396 
397     pub fn set_flag(&mut self, flag: u32) -> &mut Self {
398         self.set_flags(self.flags() | flag);
399         self
400     }
401 
402     pub fn buf_alloc(&self) -> u32 {
403         LittleEndian::read_u32(&self.hdr()[HDROFF_BUF_ALLOC..])
404     }
405 
406     pub fn set_buf_alloc(&mut self, buf_alloc: u32) -> &mut Self {
407         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_BUF_ALLOC..], buf_alloc);
408         self
409     }
410 
411     pub fn fwd_cnt(&self) -> u32 {
412         LittleEndian::read_u32(&self.hdr()[HDROFF_FWD_CNT..])
413     }
414 
415     pub fn set_fwd_cnt(&mut self, fwd_cnt: u32) -> &mut Self {
416         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FWD_CNT..], fwd_cnt);
417         self
418     }
419 }
420 
421 #[cfg(test)]
422 #[allow(clippy::undocumented_unsafe_blocks)]
423 mod tests {
424     use virtio_bindings::virtio_ring::VRING_DESC_F_WRITE;
425     use virtio_queue::QueueOwnedT;
426     use vm_memory::GuestAddress;
427     use vm_virtio::queue::testing::VirtqDesc as GuestQDesc;
428 
429     use super::super::tests::TestContext;
430     use super::*;
431     use crate::vsock::defs::MAX_PKT_BUF_SIZE;
432     use crate::GuestMemoryMmap;
433 
434     macro_rules! create_context {
435         ($test_ctx:ident, $handler_ctx:ident) => {
436             let $test_ctx = TestContext::new();
437             let mut $handler_ctx = $test_ctx.create_epoll_handler_context();
438             // For TX packets, hdr.len should be set to a valid value.
439             set_pkt_len(1024, &$handler_ctx.guest_txvq.dtable[0], &$test_ctx.mem);
440         };
441     }
442 
443     macro_rules! expect_asm_error {
444         (tx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
445             expect_asm_error!($test_ctx, $handler_ctx, $err, from_tx_virtq_head, 1);
446         };
447         (rx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
448             expect_asm_error!($test_ctx, $handler_ctx, $err, from_rx_virtq_head, 0);
449         };
450         ($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => {
451             match VsockPacket::$ctor(
452                 &mut $handler_ctx.handler.queues[$vq]
453                     .iter(&$test_ctx.mem)
454                     .unwrap()
455                     .next()
456                     .unwrap(),
457                 None,
458             ) {
459                 Err($err) => (),
460                 Ok(_) => panic!("Packet assembly should've failed!"),
461                 Err(other) => panic!("Packet assembly failed with: {:?}", other),
462             }
463         };
464     }
465 
466     fn set_pkt_len(len: u32, guest_desc: &GuestQDesc, mem: &GuestMemoryMmap) {
467         let hdr_gpa = guest_desc.addr.get();
468         let hdr_ptr =
469             get_host_address_range(mem, GuestAddress(hdr_gpa), VSOCK_PKT_HDR_SIZE).unwrap();
470         let len_ptr = unsafe { hdr_ptr.add(HDROFF_LEN) };
471 
472         LittleEndian::write_u32(unsafe { std::slice::from_raw_parts_mut(len_ptr, 4) }, len);
473     }
474 
475     #[test]
476     fn test_tx_packet_assembly() {
477         // Test case: successful TX packet assembly.
478         {
479             create_context!(test_ctx, handler_ctx);
480 
481             let pkt = VsockPacket::from_tx_virtq_head(
482                 &mut handler_ctx.handler.queues[1]
483                     .iter(&test_ctx.mem)
484                     .unwrap()
485                     .next()
486                     .unwrap(),
487                 None,
488             )
489             .unwrap();
490             assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
491             assert_eq!(
492                 pkt.buf().unwrap().len(),
493                 handler_ctx.guest_txvq.dtable[1].len.get() as usize
494             );
495         }
496 
497         // Test case: error on write-only hdr descriptor.
498         {
499             create_context!(test_ctx, handler_ctx);
500             handler_ctx.guest_txvq.dtable[0]
501                 .flags
502                 .set(VRING_DESC_F_WRITE.try_into().unwrap());
503             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
504         }
505 
506         // Test case: header descriptor has insufficient space to hold the packet header.
507         {
508             create_context!(test_ctx, handler_ctx);
509             handler_ctx.guest_txvq.dtable[0]
510                 .len
511                 .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
512             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
513         }
514 
515         // Test case: zero-length TX packet.
516         {
517             create_context!(test_ctx, handler_ctx);
518             set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
519             let mut pkt = VsockPacket::from_tx_virtq_head(
520                 &mut handler_ctx.handler.queues[1]
521                     .iter(&test_ctx.mem)
522                     .unwrap()
523                     .next()
524                     .unwrap(),
525                 None,
526             )
527             .unwrap();
528             assert!(pkt.buf().is_none());
529             assert!(pkt.buf_mut().is_none());
530         }
531 
532         // Test case: TX packet has more data than we can handle.
533         {
534             create_context!(test_ctx, handler_ctx);
535             set_pkt_len(
536                 MAX_PKT_BUF_SIZE as u32 + 1,
537                 &handler_ctx.guest_txvq.dtable[0],
538                 &test_ctx.mem,
539             );
540             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::InvalidPktLen(_));
541         }
542 
543         // Test case: error on write-only buf descriptor.
544         {
545             create_context!(test_ctx, handler_ctx);
546             handler_ctx.guest_txvq.dtable[1]
547                 .flags
548                 .set(VRING_DESC_F_WRITE.try_into().unwrap());
549             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
550         }
551 
552         // Test case: the buffer descriptor cannot fit all the data advertised by the
553         // packet header `len` field.
554         {
555             create_context!(test_ctx, handler_ctx);
556             set_pkt_len(8 * 1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
557             handler_ctx.guest_txvq.dtable[1].len.set(4 * 1024);
558             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::BufDescTooSmall);
559         }
560     }
561 
562     #[test]
563     fn test_rx_packet_assembly() {
564         // Test case: successful RX packet assembly.
565         {
566             create_context!(test_ctx, handler_ctx);
567             let pkt = VsockPacket::from_rx_virtq_head(
568                 &mut handler_ctx.handler.queues[0]
569                     .iter(&test_ctx.mem)
570                     .unwrap()
571                     .next()
572                     .unwrap(),
573                 None,
574             )
575             .unwrap();
576             assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
577             assert_eq!(
578                 pkt.buf().unwrap().len(),
579                 handler_ctx.guest_rxvq.dtable[1].len.get() as usize
580             );
581         }
582 
583         // Test case: read-only RX packet header.
584         {
585             create_context!(test_ctx, handler_ctx);
586             handler_ctx.guest_rxvq.dtable[0].flags.set(0);
587             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::UnwritableDescriptor);
588         }
589 
590         // Test case: RX descriptor head cannot fit the entire packet header.
591         {
592             create_context!(test_ctx, handler_ctx);
593             handler_ctx.guest_rxvq.dtable[0]
594                 .len
595                 .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
596             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
597         }
598     }
599 
600     #[test]
601     fn test_packet_hdr_accessors() {
602         const SRC_CID: u64 = 1;
603         const DST_CID: u64 = 2;
604         const SRC_PORT: u32 = 3;
605         const DST_PORT: u32 = 4;
606         const LEN: u32 = 5;
607         const TYPE: u16 = 6;
608         const OP: u16 = 7;
609         const FLAGS: u32 = 8;
610         const BUF_ALLOC: u32 = 9;
611         const FWD_CNT: u32 = 10;
612 
613         create_context!(test_ctx, handler_ctx);
614         let mut pkt = VsockPacket::from_rx_virtq_head(
615             &mut handler_ctx.handler.queues[0]
616                 .iter(&test_ctx.mem)
617                 .unwrap()
618                 .next()
619                 .unwrap(),
620             None,
621         )
622         .unwrap();
623 
624         // Test field accessors.
625         pkt.set_src_cid(SRC_CID)
626             .set_dst_cid(DST_CID)
627             .set_src_port(SRC_PORT)
628             .set_dst_port(DST_PORT)
629             .set_len(LEN)
630             .set_type(TYPE)
631             .set_op(OP)
632             .set_flags(FLAGS)
633             .set_buf_alloc(BUF_ALLOC)
634             .set_fwd_cnt(FWD_CNT);
635 
636         assert_eq!(pkt.src_cid(), SRC_CID);
637         assert_eq!(pkt.dst_cid(), DST_CID);
638         assert_eq!(pkt.src_port(), SRC_PORT);
639         assert_eq!(pkt.dst_port(), DST_PORT);
640         assert_eq!(pkt.len(), LEN);
641         assert_eq!(pkt.type_(), TYPE);
642         assert_eq!(pkt.op(), OP);
643         assert_eq!(pkt.flags(), FLAGS);
644         assert_eq!(pkt.buf_alloc(), BUF_ALLOC);
645         assert_eq!(pkt.fwd_cnt(), FWD_CNT);
646 
647         // Test individual flag setting.
648         let flags = pkt.flags() | 0b1000;
649         pkt.set_flag(0b1000);
650         assert_eq!(pkt.flags(), flags);
651 
652         // Test packet header as-slice access.
653         //
654 
655         assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
656 
657         assert_eq!(
658             SRC_CID,
659             LittleEndian::read_u64(&pkt.hdr()[HDROFF_SRC_CID..])
660         );
661         assert_eq!(
662             DST_CID,
663             LittleEndian::read_u64(&pkt.hdr()[HDROFF_DST_CID..])
664         );
665         assert_eq!(
666             SRC_PORT,
667             LittleEndian::read_u32(&pkt.hdr()[HDROFF_SRC_PORT..])
668         );
669         assert_eq!(
670             DST_PORT,
671             LittleEndian::read_u32(&pkt.hdr()[HDROFF_DST_PORT..])
672         );
673         assert_eq!(LEN, LittleEndian::read_u32(&pkt.hdr()[HDROFF_LEN..]));
674         assert_eq!(TYPE, LittleEndian::read_u16(&pkt.hdr()[HDROFF_TYPE..]));
675         assert_eq!(OP, LittleEndian::read_u16(&pkt.hdr()[HDROFF_OP..]));
676         assert_eq!(FLAGS, LittleEndian::read_u32(&pkt.hdr()[HDROFF_FLAGS..]));
677         assert_eq!(
678             BUF_ALLOC,
679             LittleEndian::read_u32(&pkt.hdr()[HDROFF_BUF_ALLOC..])
680         );
681         assert_eq!(
682             FWD_CNT,
683             LittleEndian::read_u32(&pkt.hdr()[HDROFF_FWD_CNT..])
684         );
685 
686         assert_eq!(pkt.hdr_mut().len(), VSOCK_PKT_HDR_SIZE);
687         for b in pkt.hdr_mut() {
688             *b = 0;
689         }
690         assert_eq!(pkt.src_cid(), 0);
691         assert_eq!(pkt.dst_cid(), 0);
692         assert_eq!(pkt.src_port(), 0);
693         assert_eq!(pkt.dst_port(), 0);
694         assert_eq!(pkt.len(), 0);
695         assert_eq!(pkt.type_(), 0);
696         assert_eq!(pkt.op(), 0);
697         assert_eq!(pkt.flags(), 0);
698         assert_eq!(pkt.buf_alloc(), 0);
699         assert_eq!(pkt.fwd_cnt(), 0);
700     }
701 
702     #[test]
703     fn test_packet_buf() {
704         create_context!(test_ctx, handler_ctx);
705         let mut pkt = VsockPacket::from_rx_virtq_head(
706             &mut handler_ctx.handler.queues[0]
707                 .iter(&test_ctx.mem)
708                 .unwrap()
709                 .next()
710                 .unwrap(),
711             None,
712         )
713         .unwrap();
714 
715         assert_eq!(
716             pkt.buf().unwrap().len(),
717             handler_ctx.guest_rxvq.dtable[1].len.get() as usize
718         );
719         assert_eq!(
720             pkt.buf_mut().unwrap().len(),
721             handler_ctx.guest_rxvq.dtable[1].len.get() as usize
722         );
723 
724         for i in 0..pkt.buf().unwrap().len() {
725             pkt.buf_mut().unwrap()[i] = (i % 0x100) as u8;
726             assert_eq!(pkt.buf().unwrap()[i], (i % 0x100) as u8);
727         }
728     }
729 }
730