1 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 // 4 5 /// `VsockPacket` provides a thin wrapper over the buffers exchanged via virtio queues. 6 /// There are two components to a vsock packet, each using its own descriptor in a 7 /// virtio queue: 8 /// - the packet header; and 9 /// - the packet data/buffer. 10 /// There is a 1:1 relation between descriptor chains and packets: the first (chain head) holds 11 /// the header, and an optional second descriptor holds the data. The second descriptor is only 12 /// present for data packets (VSOCK_OP_RW). 13 /// 14 /// `VsockPacket` wraps these two buffers and provides direct access to the data stored 15 /// in guest memory. This is done to avoid unnecessarily copying data from guest memory 16 /// to temporary buffers, before passing it on to the vsock backend. 17 /// 18 use byteorder::{ByteOrder, LittleEndian}; 19 use std::ops::Deref; 20 use std::sync::Arc; 21 22 use super::defs; 23 use super::{Result, VsockError}; 24 use crate::get_host_address_range; 25 use virtio_queue::DescriptorChain; 26 use vm_memory::GuestMemory; 27 use vm_virtio::{AccessPlatform, Translatable}; 28 29 // The vsock packet header is defined by the C struct: 30 // 31 // ```C 32 // struct virtio_vsock_hdr { 33 // le64 src_cid; 34 // le64 dst_cid; 35 // le32 src_port; 36 // le32 dst_port; 37 // le32 len; 38 // le16 type; 39 // le16 op; 40 // le32 flags; 41 // le32 buf_alloc; 42 // le32 fwd_cnt; 43 // }; 44 // ``` 45 // 46 // This structed will occupy the buffer pointed to by the head descriptor. We'll be accessing it 47 // as a byte slice. To that end, we define below the offsets for each field struct, as well as the 48 // packed struct size, as a bunch of `usize` consts. 49 // Note that these offsets are only used privately by the `VsockPacket` struct, the public interface 50 // consisting of getter and setter methods, for each struct field, that will also handle the correct 51 // endianess. 52 53 /// The vsock packet header struct size (when packed). 54 pub const VSOCK_PKT_HDR_SIZE: usize = 44; 55 56 // Source CID. 57 const HDROFF_SRC_CID: usize = 0; 58 59 // Destination CID. 60 const HDROFF_DST_CID: usize = 8; 61 62 // Source port. 63 const HDROFF_SRC_PORT: usize = 16; 64 65 // Destination port. 66 const HDROFF_DST_PORT: usize = 20; 67 68 // Data length (in bytes) - may be 0, if there is no data buffer. 69 const HDROFF_LEN: usize = 24; 70 71 // Socket type. Currently, only connection-oriented streams are defined by the vsock protocol. 72 const HDROFF_TYPE: usize = 28; 73 74 // Operation ID - one of the VSOCK_OP_* values; e.g. 75 // - VSOCK_OP_RW: a data packet; 76 // - VSOCK_OP_REQUEST: connection request; 77 // - VSOCK_OP_RST: forceful connection termination; 78 // etc (see `super::defs::uapi` for the full list). 79 const HDROFF_OP: usize = 30; 80 81 // Additional options (flags) associated with the current operation (`op`). 82 // Currently, only used with shutdown requests (VSOCK_OP_SHUTDOWN). 83 const HDROFF_FLAGS: usize = 32; 84 85 // Size (in bytes) of the packet sender receive buffer (for the connection to which this packet 86 // belongs). 87 const HDROFF_BUF_ALLOC: usize = 36; 88 89 // Number of bytes the sender has received and consumed (for the connection to which this packet 90 // belongs). For instance, for our Unix backend, this counter would be the total number of bytes 91 // we have successfully written to a backing Unix socket. 92 const HDROFF_FWD_CNT: usize = 40; 93 94 /// The vsock packet, implemented as a wrapper over a virtq descriptor chain: 95 /// - the chain head, holding the packet header; and 96 /// - (an optional) data/buffer descriptor, only present for data packets (VSOCK_OP_RW). 97 /// 98 pub struct VsockPacket { 99 hdr: *mut u8, 100 buf: Option<*mut u8>, 101 buf_size: usize, 102 } 103 104 impl VsockPacket { 105 /// Create the packet wrapper from a TX virtq chain head. 106 /// 107 /// The chain head is expected to hold valid packet header data. A following packet buffer 108 /// descriptor can optionally end the chain. Bounds and pointer checks are performed when 109 /// creating the wrapper. 110 /// 111 pub fn from_tx_virtq_head<M>( 112 desc_chain: &mut DescriptorChain<M>, 113 access_platform: Option<&Arc<dyn AccessPlatform>>, 114 ) -> Result<Self> 115 where 116 M: Clone + Deref, 117 M::Target: GuestMemory, 118 { 119 let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?; 120 121 // All buffers in the TX queue must be readable. 122 // 123 if head.is_write_only() { 124 return Err(VsockError::UnreadableDescriptor); 125 } 126 127 // The packet header should fit inside the head descriptor. 128 if head.len() < VSOCK_PKT_HDR_SIZE as u32 { 129 return Err(VsockError::HdrDescTooSmall(head.len())); 130 } 131 132 let mut pkt = Self { 133 hdr: get_host_address_range( 134 desc_chain.memory(), 135 head.addr() 136 .translate_gva(access_platform, head.len() as usize), 137 VSOCK_PKT_HDR_SIZE, 138 ) 139 .ok_or(VsockError::GuestMemory)? as *mut u8, 140 buf: None, 141 buf_size: 0, 142 }; 143 144 // No point looking for a data/buffer descriptor, if the packet is zero-lengthed. 145 if pkt.is_empty() { 146 return Ok(pkt); 147 } 148 149 // Reject weirdly-sized packets. 150 // 151 if pkt.len() > defs::MAX_PKT_BUF_SIZE as u32 { 152 return Err(VsockError::InvalidPktLen(pkt.len())); 153 } 154 155 // If the packet header showed a non-zero length, there should be a data descriptor here. 156 let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?; 157 158 // TX data should be read-only. 159 if buf_desc.is_write_only() { 160 return Err(VsockError::UnreadableDescriptor); 161 } 162 163 // The data buffer should be large enough to fit the size of the data, as described by 164 // the header descriptor. 165 if buf_desc.len() < pkt.len() { 166 return Err(VsockError::BufDescTooSmall); 167 } 168 169 pkt.buf_size = buf_desc.len() as usize; 170 pkt.buf = Some( 171 get_host_address_range( 172 desc_chain.memory(), 173 buf_desc 174 .addr() 175 .translate_gva(access_platform, buf_desc.len() as usize), 176 pkt.buf_size, 177 ) 178 .ok_or(VsockError::GuestMemory)? as *mut u8, 179 ); 180 181 Ok(pkt) 182 } 183 184 /// Create the packet wrapper from an RX virtq chain head. 185 /// 186 /// There must be two descriptors in the chain, both writable: a header descriptor and a data 187 /// descriptor. Bounds and pointer checks are performed when creating the wrapper. 188 /// 189 pub fn from_rx_virtq_head<M>( 190 desc_chain: &mut DescriptorChain<M>, 191 access_platform: Option<&Arc<dyn AccessPlatform>>, 192 ) -> Result<Self> 193 where 194 M: Clone + Deref, 195 M::Target: GuestMemory, 196 { 197 let head = desc_chain.next().ok_or(VsockError::HdrDescMissing)?; 198 199 // All RX buffers must be writable. 200 // 201 if !head.is_write_only() { 202 return Err(VsockError::UnwritableDescriptor); 203 } 204 205 // The packet header should fit inside the head descriptor. 206 if head.len() < VSOCK_PKT_HDR_SIZE as u32 { 207 return Err(VsockError::HdrDescTooSmall(head.len())); 208 } 209 210 // All RX descriptor chains should have a header and a data descriptor. 211 if !head.has_next() { 212 return Err(VsockError::BufDescMissing); 213 } 214 let buf_desc = desc_chain.next().ok_or(VsockError::BufDescMissing)?; 215 let buf_size = buf_desc.len() as usize; 216 217 Ok(Self { 218 hdr: get_host_address_range( 219 desc_chain.memory(), 220 head.addr() 221 .translate_gva(access_platform, head.len() as usize), 222 VSOCK_PKT_HDR_SIZE, 223 ) 224 .ok_or(VsockError::GuestMemory)? as *mut u8, 225 buf: Some( 226 get_host_address_range( 227 desc_chain.memory(), 228 buf_desc 229 .addr() 230 .translate_gva(access_platform, buf_desc.len() as usize), 231 buf_size, 232 ) 233 .ok_or(VsockError::GuestMemory)? as *mut u8, 234 ), 235 buf_size, 236 }) 237 } 238 239 /// Provides in-place, byte-slice, access to the vsock packet header. 240 /// 241 pub fn hdr(&self) -> &[u8] { 242 // This is safe since bound checks have already been performed when creating the packet 243 // from the virtq descriptor. 244 unsafe { std::slice::from_raw_parts(self.hdr as *const u8, VSOCK_PKT_HDR_SIZE) } 245 } 246 247 /// Provides in-place, byte-slice, mutable access to the vsock packet header. 248 /// 249 pub fn hdr_mut(&mut self) -> &mut [u8] { 250 // This is safe since bound checks have already been performed when creating the packet 251 // from the virtq descriptor. 252 unsafe { std::slice::from_raw_parts_mut(self.hdr, VSOCK_PKT_HDR_SIZE) } 253 } 254 255 /// Provides in-place, byte-slice access to the vsock packet data buffer. 256 /// 257 /// Note: control packets (e.g. connection request or reset) have no data buffer associated. 258 /// For those packets, this method will return `None`. 259 /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be 260 /// (and often is) larger than the length of the packet data. The packet data length 261 /// is stored in the packet header, and accessible via `VsockPacket::len()`. 262 pub fn buf(&self) -> Option<&[u8]> { 263 self.buf.map(|ptr| { 264 // This is safe since bound checks have already been performed when creating the packet 265 // from the virtq descriptor. 266 unsafe { std::slice::from_raw_parts(ptr as *const u8, self.buf_size) } 267 }) 268 } 269 270 /// Provides in-place, byte-slice, mutable access to the vsock packet data buffer. 271 /// 272 /// Note: control packets (e.g. connection request or reset) have no data buffer associated. 273 /// For those packets, this method will return `None`. 274 /// Also note: calling `len()` on the returned slice will yield the buffer size, which may be 275 /// (and often is) larger than the length of the packet data. The packet data length 276 /// is stored in the packet header, and accessible via `VsockPacket::len()`. 277 pub fn buf_mut(&mut self) -> Option<&mut [u8]> { 278 self.buf.map(|ptr| { 279 // This is safe since bound checks have already been performed when creating the packet 280 // from the virtq descriptor. 281 unsafe { std::slice::from_raw_parts_mut(ptr, self.buf_size) } 282 }) 283 } 284 285 pub fn src_cid(&self) -> u64 { 286 LittleEndian::read_u64(&self.hdr()[HDROFF_SRC_CID..]) 287 } 288 289 pub fn set_src_cid(&mut self, cid: u64) -> &mut Self { 290 LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_SRC_CID..], cid); 291 self 292 } 293 294 pub fn dst_cid(&self) -> u64 { 295 LittleEndian::read_u64(&self.hdr()[HDROFF_DST_CID..]) 296 } 297 298 pub fn set_dst_cid(&mut self, cid: u64) -> &mut Self { 299 LittleEndian::write_u64(&mut self.hdr_mut()[HDROFF_DST_CID..], cid); 300 self 301 } 302 303 pub fn src_port(&self) -> u32 { 304 LittleEndian::read_u32(&self.hdr()[HDROFF_SRC_PORT..]) 305 } 306 307 pub fn set_src_port(&mut self, port: u32) -> &mut Self { 308 LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_SRC_PORT..], port); 309 self 310 } 311 312 pub fn dst_port(&self) -> u32 { 313 LittleEndian::read_u32(&self.hdr()[HDROFF_DST_PORT..]) 314 } 315 316 pub fn set_dst_port(&mut self, port: u32) -> &mut Self { 317 LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_DST_PORT..], port); 318 self 319 } 320 321 pub fn len(&self) -> u32 { 322 LittleEndian::read_u32(&self.hdr()[HDROFF_LEN..]) 323 } 324 325 pub fn is_empty(&self) -> bool { 326 self.len() == 0 327 } 328 329 pub fn set_len(&mut self, len: u32) -> &mut Self { 330 LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_LEN..], len); 331 self 332 } 333 334 pub fn type_(&self) -> u16 { 335 LittleEndian::read_u16(&self.hdr()[HDROFF_TYPE..]) 336 } 337 338 pub fn set_type(&mut self, type_: u16) -> &mut Self { 339 LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_TYPE..], type_); 340 self 341 } 342 343 pub fn op(&self) -> u16 { 344 LittleEndian::read_u16(&self.hdr()[HDROFF_OP..]) 345 } 346 347 pub fn set_op(&mut self, op: u16) -> &mut Self { 348 LittleEndian::write_u16(&mut self.hdr_mut()[HDROFF_OP..], op); 349 self 350 } 351 352 pub fn flags(&self) -> u32 { 353 LittleEndian::read_u32(&self.hdr()[HDROFF_FLAGS..]) 354 } 355 356 pub fn set_flags(&mut self, flags: u32) -> &mut Self { 357 LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FLAGS..], flags); 358 self 359 } 360 361 pub fn set_flag(&mut self, flag: u32) -> &mut Self { 362 self.set_flags(self.flags() | flag); 363 self 364 } 365 366 pub fn buf_alloc(&self) -> u32 { 367 LittleEndian::read_u32(&self.hdr()[HDROFF_BUF_ALLOC..]) 368 } 369 370 pub fn set_buf_alloc(&mut self, buf_alloc: u32) -> &mut Self { 371 LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_BUF_ALLOC..], buf_alloc); 372 self 373 } 374 375 pub fn fwd_cnt(&self) -> u32 { 376 LittleEndian::read_u32(&self.hdr()[HDROFF_FWD_CNT..]) 377 } 378 379 pub fn set_fwd_cnt(&mut self, fwd_cnt: u32) -> &mut Self { 380 LittleEndian::write_u32(&mut self.hdr_mut()[HDROFF_FWD_CNT..], fwd_cnt); 381 self 382 } 383 } 384 385 #[cfg(test)] 386 mod tests { 387 use super::super::tests::TestContext; 388 use super::*; 389 use crate::vsock::defs::MAX_PKT_BUF_SIZE; 390 use crate::GuestMemoryMmap; 391 use virtio_bindings::bindings::virtio_ring::VRING_DESC_F_WRITE; 392 use virtio_queue::QueueOwnedT; 393 use vm_memory::GuestAddress; 394 use vm_virtio::queue::testing::VirtqDesc as GuestQDesc; 395 396 macro_rules! create_context { 397 ($test_ctx:ident, $handler_ctx:ident) => { 398 let $test_ctx = TestContext::new(); 399 let mut $handler_ctx = $test_ctx.create_epoll_handler_context(); 400 // For TX packets, hdr.len should be set to a valid value. 401 set_pkt_len(1024, &$handler_ctx.guest_txvq.dtable[0], &$test_ctx.mem); 402 }; 403 } 404 405 macro_rules! expect_asm_error { 406 (tx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => { 407 expect_asm_error!($test_ctx, $handler_ctx, $err, from_tx_virtq_head, 1); 408 }; 409 (rx, $test_ctx:expr, $handler_ctx:expr, $err:pat) => { 410 expect_asm_error!($test_ctx, $handler_ctx, $err, from_rx_virtq_head, 0); 411 }; 412 ($test_ctx:expr, $handler_ctx:expr, $err:pat, $ctor:ident, $vq:expr) => { 413 match VsockPacket::$ctor( 414 &mut $handler_ctx.handler.queues[$vq] 415 .iter(&$test_ctx.mem) 416 .unwrap() 417 .next() 418 .unwrap(), 419 None, 420 ) { 421 Err($err) => (), 422 Ok(_) => panic!("Packet assembly should've failed!"), 423 Err(other) => panic!("Packet assembly failed with: {:?}", other), 424 } 425 }; 426 } 427 428 fn set_pkt_len(len: u32, guest_desc: &GuestQDesc, mem: &GuestMemoryMmap) { 429 let hdr_gpa = guest_desc.addr.get(); 430 let hdr_ptr = get_host_address_range(mem, GuestAddress(hdr_gpa), VSOCK_PKT_HDR_SIZE) 431 .unwrap() as *mut u8; 432 let len_ptr = unsafe { hdr_ptr.add(HDROFF_LEN) }; 433 434 LittleEndian::write_u32(unsafe { std::slice::from_raw_parts_mut(len_ptr, 4) }, len); 435 } 436 437 #[test] 438 #[allow(clippy::cognitive_complexity)] 439 fn test_tx_packet_assembly() { 440 // Test case: successful TX packet assembly. 441 { 442 create_context!(test_ctx, handler_ctx); 443 444 let pkt = VsockPacket::from_tx_virtq_head( 445 &mut handler_ctx.handler.queues[1] 446 .iter(&test_ctx.mem) 447 .unwrap() 448 .next() 449 .unwrap(), 450 None, 451 ) 452 .unwrap(); 453 assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE); 454 assert_eq!( 455 pkt.buf().unwrap().len(), 456 handler_ctx.guest_txvq.dtable[1].len.get() as usize 457 ); 458 } 459 460 // Test case: error on write-only hdr descriptor. 461 { 462 create_context!(test_ctx, handler_ctx); 463 handler_ctx.guest_txvq.dtable[0] 464 .flags 465 .set(VRING_DESC_F_WRITE.try_into().unwrap()); 466 expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor); 467 } 468 469 // Test case: header descriptor has insufficient space to hold the packet header. 470 { 471 create_context!(test_ctx, handler_ctx); 472 handler_ctx.guest_txvq.dtable[0] 473 .len 474 .set(VSOCK_PKT_HDR_SIZE as u32 - 1); 475 expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_)); 476 } 477 478 // Test case: zero-length TX packet. 479 { 480 create_context!(test_ctx, handler_ctx); 481 set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem); 482 let mut pkt = VsockPacket::from_tx_virtq_head( 483 &mut handler_ctx.handler.queues[1] 484 .iter(&test_ctx.mem) 485 .unwrap() 486 .next() 487 .unwrap(), 488 None, 489 ) 490 .unwrap(); 491 assert!(pkt.buf().is_none()); 492 assert!(pkt.buf_mut().is_none()); 493 } 494 495 // Test case: TX packet has more data than we can handle. 496 { 497 create_context!(test_ctx, handler_ctx); 498 set_pkt_len( 499 MAX_PKT_BUF_SIZE as u32 + 1, 500 &handler_ctx.guest_txvq.dtable[0], 501 &test_ctx.mem, 502 ); 503 expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::InvalidPktLen(_)); 504 } 505 506 // Test case: 507 // - packet header advertises some data length; and 508 // - the data descriptor is missing. 509 { 510 create_context!(test_ctx, handler_ctx); 511 set_pkt_len(1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem); 512 handler_ctx.guest_txvq.dtable[0].flags.set(0); 513 expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::BufDescMissing); 514 } 515 516 // Test case: error on write-only buf descriptor. 517 { 518 create_context!(test_ctx, handler_ctx); 519 handler_ctx.guest_txvq.dtable[1] 520 .flags 521 .set(VRING_DESC_F_WRITE.try_into().unwrap()); 522 expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::UnreadableDescriptor); 523 } 524 525 // Test case: the buffer descriptor cannot fit all the data advertised by the 526 // packet header `len` field. 527 { 528 create_context!(test_ctx, handler_ctx); 529 set_pkt_len(8 * 1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem); 530 handler_ctx.guest_txvq.dtable[1].len.set(4 * 1024); 531 expect_asm_error!(tx, test_ctx, handler_ctx, VsockError::BufDescTooSmall); 532 } 533 } 534 535 #[test] 536 fn test_rx_packet_assembly() { 537 // Test case: successful RX packet assembly. 538 { 539 create_context!(test_ctx, handler_ctx); 540 let pkt = VsockPacket::from_rx_virtq_head( 541 &mut handler_ctx.handler.queues[0] 542 .iter(&test_ctx.mem) 543 .unwrap() 544 .next() 545 .unwrap(), 546 None, 547 ) 548 .unwrap(); 549 assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE); 550 assert_eq!( 551 pkt.buf().unwrap().len(), 552 handler_ctx.guest_rxvq.dtable[1].len.get() as usize 553 ); 554 } 555 556 // Test case: read-only RX packet header. 557 { 558 create_context!(test_ctx, handler_ctx); 559 handler_ctx.guest_rxvq.dtable[0].flags.set(0); 560 expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::UnwritableDescriptor); 561 } 562 563 // Test case: RX descriptor head cannot fit the entire packet header. 564 { 565 create_context!(test_ctx, handler_ctx); 566 handler_ctx.guest_rxvq.dtable[0] 567 .len 568 .set(VSOCK_PKT_HDR_SIZE as u32 - 1); 569 expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::HdrDescTooSmall(_)); 570 } 571 572 // Test case: RX descriptor chain is missing the packet buffer descriptor. 573 { 574 create_context!(test_ctx, handler_ctx); 575 handler_ctx.guest_rxvq.dtable[0] 576 .flags 577 .set(VRING_DESC_F_WRITE.try_into().unwrap()); 578 expect_asm_error!(rx, test_ctx, handler_ctx, VsockError::BufDescMissing); 579 } 580 } 581 582 #[test] 583 #[allow(clippy::cognitive_complexity)] 584 fn test_packet_hdr_accessors() { 585 const SRC_CID: u64 = 1; 586 const DST_CID: u64 = 2; 587 const SRC_PORT: u32 = 3; 588 const DST_PORT: u32 = 4; 589 const LEN: u32 = 5; 590 const TYPE: u16 = 6; 591 const OP: u16 = 7; 592 const FLAGS: u32 = 8; 593 const BUF_ALLOC: u32 = 9; 594 const FWD_CNT: u32 = 10; 595 596 create_context!(test_ctx, handler_ctx); 597 let mut pkt = VsockPacket::from_rx_virtq_head( 598 &mut handler_ctx.handler.queues[0] 599 .iter(&test_ctx.mem) 600 .unwrap() 601 .next() 602 .unwrap(), 603 None, 604 ) 605 .unwrap(); 606 607 // Test field accessors. 608 pkt.set_src_cid(SRC_CID) 609 .set_dst_cid(DST_CID) 610 .set_src_port(SRC_PORT) 611 .set_dst_port(DST_PORT) 612 .set_len(LEN) 613 .set_type(TYPE) 614 .set_op(OP) 615 .set_flags(FLAGS) 616 .set_buf_alloc(BUF_ALLOC) 617 .set_fwd_cnt(FWD_CNT); 618 619 assert_eq!(pkt.src_cid(), SRC_CID); 620 assert_eq!(pkt.dst_cid(), DST_CID); 621 assert_eq!(pkt.src_port(), SRC_PORT); 622 assert_eq!(pkt.dst_port(), DST_PORT); 623 assert_eq!(pkt.len(), LEN); 624 assert_eq!(pkt.type_(), TYPE); 625 assert_eq!(pkt.op(), OP); 626 assert_eq!(pkt.flags(), FLAGS); 627 assert_eq!(pkt.buf_alloc(), BUF_ALLOC); 628 assert_eq!(pkt.fwd_cnt(), FWD_CNT); 629 630 // Test individual flag setting. 631 let flags = pkt.flags() | 0b1000; 632 pkt.set_flag(0b1000); 633 assert_eq!(pkt.flags(), flags); 634 635 // Test packet header as-slice access. 636 // 637 638 assert_eq!(pkt.hdr().len(), VSOCK_PKT_HDR_SIZE); 639 640 assert_eq!( 641 SRC_CID, 642 LittleEndian::read_u64(&pkt.hdr()[HDROFF_SRC_CID..]) 643 ); 644 assert_eq!( 645 DST_CID, 646 LittleEndian::read_u64(&pkt.hdr()[HDROFF_DST_CID..]) 647 ); 648 assert_eq!( 649 SRC_PORT, 650 LittleEndian::read_u32(&pkt.hdr()[HDROFF_SRC_PORT..]) 651 ); 652 assert_eq!( 653 DST_PORT, 654 LittleEndian::read_u32(&pkt.hdr()[HDROFF_DST_PORT..]) 655 ); 656 assert_eq!(LEN, LittleEndian::read_u32(&pkt.hdr()[HDROFF_LEN..])); 657 assert_eq!(TYPE, LittleEndian::read_u16(&pkt.hdr()[HDROFF_TYPE..])); 658 assert_eq!(OP, LittleEndian::read_u16(&pkt.hdr()[HDROFF_OP..])); 659 assert_eq!(FLAGS, LittleEndian::read_u32(&pkt.hdr()[HDROFF_FLAGS..])); 660 assert_eq!( 661 BUF_ALLOC, 662 LittleEndian::read_u32(&pkt.hdr()[HDROFF_BUF_ALLOC..]) 663 ); 664 assert_eq!( 665 FWD_CNT, 666 LittleEndian::read_u32(&pkt.hdr()[HDROFF_FWD_CNT..]) 667 ); 668 669 assert_eq!(pkt.hdr_mut().len(), VSOCK_PKT_HDR_SIZE); 670 for b in pkt.hdr_mut() { 671 *b = 0; 672 } 673 assert_eq!(pkt.src_cid(), 0); 674 assert_eq!(pkt.dst_cid(), 0); 675 assert_eq!(pkt.src_port(), 0); 676 assert_eq!(pkt.dst_port(), 0); 677 assert_eq!(pkt.len(), 0); 678 assert_eq!(pkt.type_(), 0); 679 assert_eq!(pkt.op(), 0); 680 assert_eq!(pkt.flags(), 0); 681 assert_eq!(pkt.buf_alloc(), 0); 682 assert_eq!(pkt.fwd_cnt(), 0); 683 } 684 685 #[test] 686 fn test_packet_buf() { 687 create_context!(test_ctx, handler_ctx); 688 let mut pkt = VsockPacket::from_rx_virtq_head( 689 &mut handler_ctx.handler.queues[0] 690 .iter(&test_ctx.mem) 691 .unwrap() 692 .next() 693 .unwrap(), 694 None, 695 ) 696 .unwrap(); 697 698 assert_eq!( 699 pkt.buf().unwrap().len(), 700 handler_ctx.guest_rxvq.dtable[1].len.get() as usize 701 ); 702 assert_eq!( 703 pkt.buf_mut().unwrap().len(), 704 handler_ctx.guest_rxvq.dtable[1].len.get() as usize 705 ); 706 707 for i in 0..pkt.buf().unwrap().len() { 708 pkt.buf_mut().unwrap()[i] = (i % 0x100) as u8; 709 assert_eq!(pkt.buf().unwrap()[i], (i % 0x100) as u8); 710 } 711 } 712 } 713