xref: /cloud-hypervisor/virtio-devices/src/vsock/packet.rs (revision 8803e4a2e7f8e9596b72f81d3c916390e5b10fbd)
1 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 //! `VsockPacket` provides a thin wrapper over the buffers exchanged via virtio queues.
6 //! There are two components to a vsock packet, each using its own descriptor in a
7 //! virtio queue:
8 //! - the packet header; and
9 //! - the packet data/buffer.
10 //!
11 //! There is a 1:1 relation between descriptor chains and packets: the first (chain head) holds
12 //! the header, and an optional second descriptor holds the data. The second descriptor is only
13 //! present for data packets (VSOCK_OP_RW).
14 //!
15 //! `VsockPacket` wraps these two buffers and provides direct access to the data stored
16 //! in guest memory. This is done to avoid unnecessarily copying data from guest memory
17 //! to temporary buffers, before passing it on to the vsock backend.
18 
19 use byteorder::{ByteOrder, LittleEndian};
20 use std::ops::Deref;
21 use std::sync::Arc;
22 
23 use super::defs;
24 use super::{Result, VsockError};
25 use crate::get_host_address_range;
26 use virtio_queue::DescriptorChain;
27 use vm_memory::{Address, GuestMemory};
28 use vm_virtio::{AccessPlatform, Translatable};
29 
30 // The vsock packet header is defined by the C struct:
31 //
32 // ```C
33 // struct virtio_vsock_hdr {
34 //     le64 src_cid;
35 //     le64 dst_cid;
36 //     le32 src_port;
37 //     le32 dst_port;
38 //     le32 len;
39 //     le16 type;
40 //     le16 op;
41 //     le32 flags;
42 //     le32 buf_alloc;
43 //     le32 fwd_cnt;
44 // };
45 // ```
46 //
47 // This struct will occupy the buffer pointed to by the head descriptor. We'll be accessing it
48 // as a byte slice. To that end, we define below the offsets for each field struct, as well as the
49 // packed struct size, as a bunch of `usize` consts.
50 // Note that these offsets are only used privately by the `VsockPacket` struct, the public interface
51 // consisting of getter and setter methods, for each struct field, that will also handle the correct
52 // endianness.
53 
54 /// The vsock packet header struct size (when packed).
55 pub const VSOCK_PKT_HDR_SIZE: usize = 44;
56 
57 // Source CID.
58 const HDROFF_SRC_CID: usize = 0;
59 
60 // Destination CID.
61 const HDROFF_DST_CID: usize = 8;
62 
63 // Source port.
64 const HDROFF_SRC_PORT: usize = 16;
65 
66 // Destination port.
67 const HDROFF_DST_PORT: usize = 20;
68 
69 // Data length (in bytes) - may be 0, if there is no data buffer.
70 const HDROFF_LEN: usize = 24;
71 
72 // Socket type. Currently, only connection-oriented streams are defined by the vsock protocol.
73 const HDROFF_TYPE: usize = 28;
74 
75 // Operation ID - one of the VSOCK_OP_* values; e.g.
76 // - VSOCK_OP_RW: a data packet;
77 // - VSOCK_OP_REQUEST: connection request;
78 // - VSOCK_OP_RST: forceful connection termination;
79 // etc (see `super::defs::uapi` for the full list).
80 const HDROFF_OP: usize = 30;
81 
82 // Additional options (flags) associated with the current operation (`op`).
83 // Currently, only used with shutdown requests (VSOCK_OP_SHUTDOWN).
84 const HDROFF_FLAGS: usize = 32;
85 
86 // Size (in bytes) of the packet sender receive buffer (for the connection to which this packet
87 // belongs).
88 const HDROFF_BUF_ALLOC: usize = 36;
89 
90 // Number of bytes the sender has received and consumed (for the connection to which this packet
91 // belongs). For instance, for our Unix backend, this counter would be the total number of bytes
92 // we have successfully written to a backing Unix socket.
93 const HDROFF_FWD_CNT: usize = 40;
94 
95 /// The vsock packet, implemented as a wrapper over a virtq descriptor chain:
96 /// - the chain head, holding the packet header; and
97 /// - (an optional) data/buffer descriptor, only present for data packets (VSOCK_OP_RW).
98 ///
99 pub struct VsockPacket {
100     hdr: *mut u8,
101     buf: Option<*mut u8>,
102     buf_size: usize,
103 }
104 
105 impl VsockPacket {
106     /// Create the packet wrapper from a TX virtq chain head.
107     ///
108     /// The chain head is expected to hold valid packet header data. A following packet buffer
109     /// descriptor can optionally end the chain. Bounds and pointer checks are performed when
110     /// creating the wrapper.
111     ///
112     pub fn from_tx_virtq_head<M>(
113         desc_chain: &mut DescriptorChain<M>,
114         access_platform: Option<&Arc<dyn AccessPlatform>>,
115     ) -> Result<Self>
116     where
117         M: Clone + Deref,
118         M::Target: GuestMemory,
119     {
120         let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
121 
122         // All buffers in the TX queue must be readable.
123         //
124         if head.is_write_only() {
125             return Err(VsockError::UnreadableDescriptor);
126         }
127 
128         // The packet header should fit inside the head descriptor.
129         if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
130             return Err(VsockError::HdrDescTooSmall(head.len()));
131         }
132 
133         let mut pkt = Self {
134             hdr: get_host_address_range(
135                 desc_chain.memory(),
136                 head.addr()
137                     .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
138                 VSOCK_PKT_HDR_SIZE,
139             )
140             .ok_or(VsockError::GuestMemory)?,
141             buf: None,
142             buf_size: 0,
143         };
144 
145         // No point looking for a data/buffer descriptor, if the packet is zero-length.
146         if pkt.is_empty() {
147             return Ok(pkt);
148         }
149 
150         // Reject weirdly-sized packets.
151         //
152         if pkt.len() > defs::MAX_PKT_BUF_SIZE as u32 {
153             return Err(VsockError::InvalidPktLen(pkt.len()));
154         }
155 
156         // Prior to Linux v6.3 there are two descriptors
157         if head.has_next() {
158             let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
159 
160             // TX data should be read-only.
161             if buf_desc.is_write_only() {
162                 return Err(VsockError::UnreadableDescriptor);
163             }
164 
165             // The data buffer should be large enough to fit the size of the data, as described by
166             // the header descriptor.
167             if buf_desc.len() < pkt.len() {
168                 return Err(VsockError::BufDescTooSmall);
169             }
170             let buf_size = buf_desc.len() as usize;
171             pkt.buf_size = buf_size;
172             pkt.buf = Some(
173                 get_host_address_range(
174                     desc_chain.memory(),
175                     buf_desc.addr().translate_gva(access_platform, buf_size),
176                     pkt.buf_size,
177                 )
178                 .ok_or(VsockError::GuestMemory)?,
179             );
180         } else {
181             let buf_size: usize = head.len() as usize - VSOCK_PKT_HDR_SIZE;
182             pkt.buf_size = buf_size;
183             pkt.buf = Some(
184                 get_host_address_range(
185                     desc_chain.memory(),
186                     head.addr()
187                         .checked_add(VSOCK_PKT_HDR_SIZE as u64)
188                         .unwrap()
189                         .translate_gva(access_platform, buf_size),
190                     buf_size,
191                 )
192                 .ok_or(VsockError::GuestMemory)?,
193             );
194         }
195 
196         Ok(pkt)
197     }
198 
199     /// Create the packet wrapper from an RX virtq chain head.
200     ///
201     /// There must be two descriptors in the chain, both writable: a header descriptor and a data
202     /// descriptor. Bounds and pointer checks are performed when creating the wrapper.
203     ///
204     pub fn from_rx_virtq_head<M>(
205         desc_chain: &mut DescriptorChain<M>,
206         access_platform: Option<&Arc<dyn AccessPlatform>>,
207     ) -> Result<Self>
208     where
209         M: Clone + Deref,
210         M::Target: GuestMemory,
211     {
212         let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?;
213 
214         // All RX buffers must be writable.
215         //
216         if !head.is_write_only() {
217             return Err(VsockError::UnwritableDescriptor);
218         }
219 
220         // The packet header should fit inside the head descriptor.
221         if head.len() < VSOCK_PKT_HDR_SIZE as u32 {
222             return Err(VsockError::HdrDescTooSmall(head.len()));
223         }
224 
225         // Prior to Linux v6.3 there are two descriptors
226         if head.has_next() {
227             let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?;
228             let buf_size = buf_desc.len() as usize;
229 
230             Ok(Self {
231                 hdr: get_host_address_range(
232                     desc_chain.memory(),
233                     head.addr()
234                         .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
235                     VSOCK_PKT_HDR_SIZE,
236                 )
237                 .ok_or(VsockError::GuestMemory)?,
238                 buf: Some(
239                     get_host_address_range(
240                         desc_chain.memory(),
241                         buf_desc.addr().translate_gva(access_platform, buf_size),
242                         buf_size,
243                     )
244                     .ok_or(VsockError::GuestMemory)?,
245                 ),
246                 buf_size,
247             })
248         } else {
249             let buf_size: usize = head.len() as usize - VSOCK_PKT_HDR_SIZE;
250             Ok(Self {
251                 hdr: get_host_address_range(
252                     desc_chain.memory(),
253                     head.addr()
254                         .translate_gva(access_platform, VSOCK_PKT_HDR_SIZE),
255                     VSOCK_PKT_HDR_SIZE,
256                 )
257                 .ok_or(VsockError::GuestMemory)?,
258                 buf: Some(
259                     get_host_address_range(
260                         desc_chain.memory(),
261                         head.addr()
262                             .checked_add(VSOCK_PKT_HDR_SIZE as u64)
263                             .unwrap()
264                             .translate_gva(access_platform, buf_size),
265                         buf_size,
266                     )
267                     .ok_or(VsockError::GuestMemory)?,
268                 ),
269                 buf_size,
270             })
271         }
272     }
273 
274     /// Provides in-place, byte-slice, access to the vsock packet header.
275     ///
276     pub fn hdr(&self) -> &[u8] {
277         // SAFETY: bound checks have already been performed when creating the packet
278         // from the virtq descriptor.
279         unsafe { std::slice::from_raw_parts(self.hdr as *const u8, VSOCK_PKT_HDR_SIZE) }
280     }
281 
282     /// Provides in-place, byte-slice, mutable access to the vsock packet header.
283     ///
284     pub fn hdr_mut(&mut self) -> &mut [u8] {
285         // SAFETY: bound checks have already been performed when creating the packet
286         // from the virtq descriptor.
287         unsafe { std::slice::from_raw_parts_mut(self.hdr, VSOCK_PKT_HDR_SIZE) }
288     }
289 
290     /// Provides in-place, byte-slice access to the vsock packet data buffer.
291     ///
292     /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
293     ///       For those packets, this method will return `None`.
294     /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
295     ///            (and often is) larger than the length of the packet data. The packet data length
296     ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
297     pub fn buf(&self) -> Option<&[u8]> {
298         self.buf.map(|ptr| {
299             // SAFETY: bound checks have already been performed when creating the packet
300             // from the virtq descriptor.
301             unsafe { std::slice::from_raw_parts(ptr as *const u8, self.buf_size) }
302         })
303     }
304 
305     /// Provides in-place, byte-slice, mutable access to the vsock packet data buffer.
306     ///
307     /// Note: control packets (e.g. connection request or reset) have no data buffer associated.
308     ///       For those packets, this method will return `None`.
309     /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be
310     ///            (and often is) larger than the length of the packet data. The packet data length
311     ///            is stored in the packet header, and accessible via `VsockPacket::len()`.
312     pub fn buf_mut(&mut self) -> Option<&mut [u8]> {
313         self.buf.map(|ptr| {
314             // SAFETY: bound checks have already been performed when creating the packet
315             // from the virtq descriptor.
316             unsafe { std::slice::from_raw_parts_mut(ptr, self.buf_size) }
317         })
318     }
319 
320     pub fn src_cid(&self) -> u64 {
321         LittleEndian::read_u64(&self.hdr()[HDROFF_SRC_CID..])
322     }
323 
324     pub fn set_src_cid(&mut self, cid: u64) -> &mut Self {
325         LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_SRC_CID..], cid);
326         self
327     }
328 
329     pub fn dst_cid(&self) -> u64 {
330         LittleEndian::read_u64(&self.hdr()[HDROFF_DST_CID..])
331     }
332 
333     pub fn set_dst_cid(&mut self, cid: u64) -> &mut Self {
334         LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_DST_CID..], cid);
335         self
336     }
337 
338     pub fn src_port(&self) -> u32 {
339         LittleEndian::read_u32(&self.hdr()[HDROFF_SRC_PORT..])
340     }
341 
342     pub fn set_src_port(&mut self, port: u32) -> &mut Self {
343         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_SRC_PORT..], port);
344         self
345     }
346 
347     pub fn dst_port(&self) -> u32 {
348         LittleEndian::read_u32(&self.hdr()[HDROFF_DST_PORT..])
349     }
350 
351     pub fn set_dst_port(&mut self, port: u32) -> &mut Self {
352         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_DST_PORT..], port);
353         self
354     }
355 
356     pub fn len(&self) -> u32 {
357         LittleEndian::read_u32(&self.hdr()[HDROFF_LEN..])
358     }
359 
360     pub fn is_empty(&self) -> bool {
361         self.len() == 0
362     }
363 
364     pub fn set_len(&mut self, len: u32) -> &mut Self {
365         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_LEN..], len);
366         self
367     }
368 
369     pub fn type_(&self) -> u16 {
370         LittleEndian::read_u16(&self.hdr()[HDROFF_TYPE..])
371     }
372 
373     pub fn set_type(&mut self, type_: u16) -> &mut Self {
374         LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_TYPE..], type_);
375         self
376     }
377 
378     pub fn op(&self) -> u16 {
379         LittleEndian::read_u16(&self.hdr()[HDROFF_OP..])
380     }
381 
382     pub fn set_op(&mut self, op: u16) -> &mut Self {
383         LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_OP..], op);
384         self
385     }
386 
387     pub fn flags(&self) -> u32 {
388         LittleEndian::read_u32(&self.hdr()[HDROFF_FLAGS..])
389     }
390 
391     pub fn set_flags(&mut self, flags: u32) -> &mut Self {
392         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FLAGS..], flags);
393         self
394     }
395 
396     pub fn set_flag(&mut self, flag: u32) -> &mut Self {
397         self.set_flags(self.flags() | flag);
398         self
399     }
400 
401     pub fn buf_alloc(&self) -> u32 {
402         LittleEndian::read_u32(&self.hdr()[HDROFF_BUF_ALLOC..])
403     }
404 
405     pub fn set_buf_alloc(&mut self, buf_alloc: u32) -> &mut Self {
406         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_BUF_ALLOC..], buf_alloc);
407         self
408     }
409 
410     pub fn fwd_cnt(&self) -> u32 {
411         LittleEndian::read_u32(&self.hdr()[HDROFF_FWD_CNT..])
412     }
413 
414     pub fn set_fwd_cnt(&mut self, fwd_cnt: u32) -> &mut Self {
415         LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FWD_CNT..], fwd_cnt);
416         self
417     }
418 }
419 
420 #[cfg(test)]
421 #[allow(clippy::undocumented_unsafe_blocks)]
422 mod tests {
423     use super::super::tests::TestContext;
424     use super::*;
425     use crate::vsock::defs::MAX_PKT_BUF_SIZE;
426     use crate::GuestMemoryMmap;
427     use virtio_bindings::virtio_ring::VRING_DESC_F_WRITE;
428     use virtio_queue::QueueOwnedT;
429     use vm_memory::GuestAddress;
430     use vm_virtio::queue::testing::VirtqDesc as GuestQDesc;
431 
432     macro_rules! create_context {
433         ($test_ctx:ident, $handler_ctx:ident) => {
434             let $test_ctx = TestContext::new();
435             let mut $handler_ctx = $test_ctx.create_epoll_handler_context();
436             // For TX packets, hdr.len should be set to a valid value.
437             set_pkt_len(1024, &$handler_ctx.guest_txvq.dtable[0], &$test_ctx.mem);
438         };
439     }
440 
441     macro_rules! expect_asm_error {
442         (tx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
443             expect_asm_error!($test_ctx, $handler_ctx, $err, from_tx_virtq_head, 1);
444         };
445         (rx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => {
446             expect_asm_error!($test_ctx, $handler_ctx, $err, from_rx_virtq_head, 0);
447         };
448         ($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => {
449             match VsockPacket::$ctor(
450                 &mut $handler_ctx.handler.queues[$vq]
451                     .iter(&$test_ctx.mem)
452                     .unwrap()
453                     .next()
454                     .unwrap(),
455                 None,
456             ) {
457                 Err($err) => (),
458                 Ok(_) => panic!("Packet assembly should've failed!"),
459                 Err(other) => panic!("Packet assembly failed with: {:?}", other),
460             }
461         };
462     }
463 
464     fn set_pkt_len(len: u32, guest_desc: &GuestQDesc, mem: &GuestMemoryMmap) {
465         let hdr_gpa = guest_desc.addr.get();
466         let hdr_ptr =
467             get_host_address_range(mem, GuestAddress(hdr_gpa), VSOCK_PKT_HDR_SIZE).unwrap();
468         let len_ptr = unsafe { hdr_ptr.add(HDROFF_LEN) };
469 
470         LittleEndian::write_u32(unsafe { std::slice::from_raw_parts_mut(len_ptr, 4) }, len);
471     }
472 
473     #[test]
474     fn test_tx_packet_assembly() {
475         // Test case: successful TX packet assembly.
476         {
477             create_context!(test_ctx, handler_ctx);
478 
479             let pkt = VsockPacket::from_tx_virtq_head(
480                 &mut handler_ctx.handler.queues[1]
481                     .iter(&test_ctx.mem)
482                     .unwrap()
483                     .next()
484                     .unwrap(),
485                 None,
486             )
487             .unwrap();
488             assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
489             assert_eq!(
490                 pkt.buf().unwrap().len(),
491                 handler_ctx.guest_txvq.dtable[1].len.get() as usize
492             );
493         }
494 
495         // Test case: error on write-only hdr descriptor.
496         {
497             create_context!(test_ctx, handler_ctx);
498             handler_ctx.guest_txvq.dtable[0]
499                 .flags
500                 .set(VRING_DESC_F_WRITE.try_into().unwrap());
501             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
502         }
503 
504         // Test case: header descriptor has insufficient space to hold the packet header.
505         {
506             create_context!(test_ctx, handler_ctx);
507             handler_ctx.guest_txvq.dtable[0]
508                 .len
509                 .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
510             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
511         }
512 
513         // Test case: zero-length TX packet.
514         {
515             create_context!(test_ctx, handler_ctx);
516             set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
517             let mut pkt = VsockPacket::from_tx_virtq_head(
518                 &mut handler_ctx.handler.queues[1]
519                     .iter(&test_ctx.mem)
520                     .unwrap()
521                     .next()
522                     .unwrap(),
523                 None,
524             )
525             .unwrap();
526             assert!(pkt.buf().is_none());
527             assert!(pkt.buf_mut().is_none());
528         }
529 
530         // Test case: TX packet has more data than we can handle.
531         {
532             create_context!(test_ctx, handler_ctx);
533             set_pkt_len(
534                 MAX_PKT_BUF_SIZE as u32 + 1,
535                 &handler_ctx.guest_txvq.dtable[0],
536                 &test_ctx.mem,
537             );
538             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::InvalidPktLen(_));
539         }
540 
541         // Test case: error on write-only buf descriptor.
542         {
543             create_context!(test_ctx, handler_ctx);
544             handler_ctx.guest_txvq.dtable[1]
545                 .flags
546                 .set(VRING_DESC_F_WRITE.try_into().unwrap());
547             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor);
548         }
549 
550         // Test case: the buffer descriptor cannot fit all the data advertised by the
551         // packet header `len` field.
552         {
553             create_context!(test_ctx, handler_ctx);
554             set_pkt_len(8 * 1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
555             handler_ctx.guest_txvq.dtable[1].len.set(4 * 1024);
556             expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::BufDescTooSmall);
557         }
558     }
559 
560     #[test]
561     fn test_rx_packet_assembly() {
562         // Test case: successful RX packet assembly.
563         {
564             create_context!(test_ctx, handler_ctx);
565             let pkt = VsockPacket::from_rx_virtq_head(
566                 &mut handler_ctx.handler.queues[0]
567                     .iter(&test_ctx.mem)
568                     .unwrap()
569                     .next()
570                     .unwrap(),
571                 None,
572             )
573             .unwrap();
574             assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
575             assert_eq!(
576                 pkt.buf().unwrap().len(),
577                 handler_ctx.guest_rxvq.dtable[1].len.get() as usize
578             );
579         }
580 
581         // Test case: read-only RX packet header.
582         {
583             create_context!(test_ctx, handler_ctx);
584             handler_ctx.guest_rxvq.dtable[0].flags.set(0);
585             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::UnwritableDescriptor);
586         }
587 
588         // Test case: RX descriptor head cannot fit the entire packet header.
589         {
590             create_context!(test_ctx, handler_ctx);
591             handler_ctx.guest_rxvq.dtable[0]
592                 .len
593                 .set(VSOCK_PKT_HDR_SIZE as u32 - 1);
594             expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_));
595         }
596     }
597 
598     #[test]
599     fn test_packet_hdr_accessors() {
600         const SRC_CID: u64 = 1;
601         const DST_CID: u64 = 2;
602         const SRC_PORT: u32 = 3;
603         const DST_PORT: u32 = 4;
604         const LEN: u32 = 5;
605         const TYPE: u16 = 6;
606         const OP: u16 = 7;
607         const FLAGS: u32 = 8;
608         const BUF_ALLOC: u32 = 9;
609         const FWD_CNT: u32 = 10;
610 
611         create_context!(test_ctx, handler_ctx);
612         let mut pkt = VsockPacket::from_rx_virtq_head(
613             &mut handler_ctx.handler.queues[0]
614                 .iter(&test_ctx.mem)
615                 .unwrap()
616                 .next()
617                 .unwrap(),
618             None,
619         )
620         .unwrap();
621 
622         // Test field accessors.
623         pkt.set_src_cid(SRC_CID)
624             .set_dst_cid(DST_CID)
625             .set_src_port(SRC_PORT)
626             .set_dst_port(DST_PORT)
627             .set_len(LEN)
628             .set_type(TYPE)
629             .set_op(OP)
630             .set_flags(FLAGS)
631             .set_buf_alloc(BUF_ALLOC)
632             .set_fwd_cnt(FWD_CNT);
633 
634         assert_eq!(pkt.src_cid(), SRC_CID);
635         assert_eq!(pkt.dst_cid(), DST_CID);
636         assert_eq!(pkt.src_port(), SRC_PORT);
637         assert_eq!(pkt.dst_port(), DST_PORT);
638         assert_eq!(pkt.len(), LEN);
639         assert_eq!(pkt.type_(), TYPE);
640         assert_eq!(pkt.op(), OP);
641         assert_eq!(pkt.flags(), FLAGS);
642         assert_eq!(pkt.buf_alloc(), BUF_ALLOC);
643         assert_eq!(pkt.fwd_cnt(), FWD_CNT);
644 
645         // Test individual flag setting.
646         let flags = pkt.flags() | 0b1000;
647         pkt.set_flag(0b1000);
648         assert_eq!(pkt.flags(), flags);
649 
650         // Test packet header as-slice access.
651         //
652 
653         assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE);
654 
655         assert_eq!(
656             SRC_CID,
657             LittleEndian::read_u64(&pkt.hdr()[HDROFF_SRC_CID..])
658         );
659         assert_eq!(
660             DST_CID,
661             LittleEndian::read_u64(&pkt.hdr()[HDROFF_DST_CID..])
662         );
663         assert_eq!(
664             SRC_PORT,
665             LittleEndian::read_u32(&pkt.hdr()[HDROFF_SRC_PORT..])
666         );
667         assert_eq!(
668             DST_PORT,
669             LittleEndian::read_u32(&pkt.hdr()[HDROFF_DST_PORT..])
670         );
671         assert_eq!(LEN, LittleEndian::read_u32(&pkt.hdr()[HDROFF_LEN..]));
672         assert_eq!(TYPE, LittleEndian::read_u16(&pkt.hdr()[HDROFF_TYPE..]));
673         assert_eq!(OP, LittleEndian::read_u16(&pkt.hdr()[HDROFF_OP..]));
674         assert_eq!(FLAGS, LittleEndian::read_u32(&pkt.hdr()[HDROFF_FLAGS..]));
675         assert_eq!(
676             BUF_ALLOC,
677             LittleEndian::read_u32(&pkt.hdr()[HDROFF_BUF_ALLOC..])
678         );
679         assert_eq!(
680             FWD_CNT,
681             LittleEndian::read_u32(&pkt.hdr()[HDROFF_FWD_CNT..])
682         );
683 
684         assert_eq!(pkt.hdr_mut().len(), VSOCK_PKT_HDR_SIZE);
685         for b in pkt.hdr_mut() {
686             *b = 0;
687         }
688         assert_eq!(pkt.src_cid(), 0);
689         assert_eq!(pkt.dst_cid(), 0);
690         assert_eq!(pkt.src_port(), 0);
691         assert_eq!(pkt.dst_port(), 0);
692         assert_eq!(pkt.len(), 0);
693         assert_eq!(pkt.type_(), 0);
694         assert_eq!(pkt.op(), 0);
695         assert_eq!(pkt.flags(), 0);
696         assert_eq!(pkt.buf_alloc(), 0);
697         assert_eq!(pkt.fwd_cnt(), 0);
698     }
699 
700     #[test]
701     fn test_packet_buf() {
702         create_context!(test_ctx, handler_ctx);
703         let mut pkt = VsockPacket::from_rx_virtq_head(
704             &mut handler_ctx.handler.queues[0]
705                 .iter(&test_ctx.mem)
706                 .unwrap()
707                 .next()
708                 .unwrap(),
709             None,
710         )
711         .unwrap();
712 
713         assert_eq!(
714             pkt.buf().unwrap().len(),
715             handler_ctx.guest_rxvq.dtable[1].len.get() as usize
716         );
717         assert_eq!(
718             pkt.buf_mut().unwrap().len(),
719             handler_ctx.guest_rxvq.dtable[1].len.get() as usize
720         );
721 
722         for i in 0..pkt.buf().unwrap().len() {
723             pkt.buf_mut().unwrap()[i] = (i % 0x100) as u8;
724             assert_eq!(pkt.buf().unwrap()[i], (i % 0x100) as u8);
725         }
726     }
727 }
728