1 /* 2 * QEMU RX packets abstractions 3 * 4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) 5 * 6 * Developed by Daynix Computing LTD (http://www.daynix.com) 7 * 8 * Authors: 9 * Dmitry Fleytman <dmitry@daynix.com> 10 * Tamir Shomer <tamirs@daynix.com> 11 * Yan Vugenfirer <yan@daynix.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2 or later. 14 * See the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "qemu/osdep.h" 19 #include "trace.h" 20 #include "net_rx_pkt.h" 21 #include "net/checksum.h" 22 #include "net/tap.h" 23 24 struct NetRxPkt { 25 struct virtio_net_hdr virt_hdr; 26 uint8_t ehdr_buf[sizeof(struct eth_header) + sizeof(struct vlan_header)]; 27 struct iovec *vec; 28 uint16_t vec_len_total; 29 uint16_t vec_len; 30 uint32_t tot_len; 31 uint16_t tci; 32 size_t ehdr_buf_len; 33 eth_pkt_types_e packet_type; 34 35 /* Analysis results */ 36 bool hasip4; 37 bool hasip6; 38 39 size_t l3hdr_off; 40 size_t l4hdr_off; 41 size_t l5hdr_off; 42 43 eth_ip6_hdr_info ip6hdr_info; 44 eth_ip4_hdr_info ip4hdr_info; 45 eth_l4_hdr_info l4hdr_info; 46 }; 47 48 void net_rx_pkt_init(struct NetRxPkt **pkt) 49 { 50 struct NetRxPkt *p = g_malloc0(sizeof *p); 51 p->vec = NULL; 52 p->vec_len_total = 0; 53 *pkt = p; 54 } 55 56 void net_rx_pkt_uninit(struct NetRxPkt *pkt) 57 { 58 if (pkt->vec_len_total != 0) { 59 g_free(pkt->vec); 60 } 61 62 g_free(pkt); 63 } 64 65 struct virtio_net_hdr *net_rx_pkt_get_vhdr(struct NetRxPkt *pkt) 66 { 67 assert(pkt); 68 return &pkt->virt_hdr; 69 } 70 71 static inline void 72 net_rx_pkt_iovec_realloc(struct NetRxPkt *pkt, 73 int new_iov_len) 74 { 75 if (pkt->vec_len_total < new_iov_len) { 76 g_free(pkt->vec); 77 pkt->vec = g_malloc(sizeof(*pkt->vec) * new_iov_len); 78 pkt->vec_len_total = new_iov_len; 79 } 80 } 81 82 static void 83 net_rx_pkt_pull_data(struct NetRxPkt *pkt, 84 const struct iovec *iov, int iovcnt, 85 size_t ploff) 86 { 87 uint32_t pllen = iov_size(iov, iovcnt) - ploff; 88 89 if (pkt->ehdr_buf_len) { 90 net_rx_pkt_iovec_realloc(pkt, iovcnt + 1); 91 92 pkt->vec[0].iov_base = pkt->ehdr_buf; 93 pkt->vec[0].iov_len = pkt->ehdr_buf_len; 94 95 pkt->tot_len = pllen + pkt->ehdr_buf_len; 96 pkt->vec_len = iov_copy(pkt->vec + 1, pkt->vec_len_total - 1, 97 iov, iovcnt, ploff, pllen) + 1; 98 } else { 99 net_rx_pkt_iovec_realloc(pkt, iovcnt); 100 101 pkt->tot_len = pllen; 102 pkt->vec_len = iov_copy(pkt->vec, pkt->vec_len_total, 103 iov, iovcnt, ploff, pkt->tot_len); 104 } 105 106 eth_get_protocols(pkt->vec, pkt->vec_len, 0, &pkt->hasip4, &pkt->hasip6, 107 &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off, 108 &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info); 109 110 trace_net_rx_pkt_parsed(pkt->hasip4, pkt->hasip6, pkt->l4hdr_info.proto, 111 pkt->l3hdr_off, pkt->l4hdr_off, pkt->l5hdr_off); 112 } 113 114 void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, 115 const struct iovec *iov, int iovcnt, 116 size_t iovoff, bool strip_vlan) 117 { 118 uint16_t tci = 0; 119 uint16_t ploff = iovoff; 120 assert(pkt); 121 122 if (strip_vlan) { 123 pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, pkt->ehdr_buf, 124 &ploff, &tci); 125 } else { 126 pkt->ehdr_buf_len = 0; 127 } 128 129 pkt->tci = tci; 130 131 net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff); 132 } 133 134 void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt, 135 const struct iovec *iov, int iovcnt, 136 size_t iovoff, bool strip_vlan, 137 uint16_t vet) 138 { 139 uint16_t tci = 0; 140 uint16_t ploff = iovoff; 141 assert(pkt); 142 143 if (strip_vlan) { 144 pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff, vet, 145 pkt->ehdr_buf, 146 &ploff, &tci); 147 } else { 148 pkt->ehdr_buf_len = 0; 149 } 150 151 pkt->tci = tci; 152 153 net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff); 154 } 155 156 void net_rx_pkt_dump(struct NetRxPkt *pkt) 157 { 158 #ifdef NET_RX_PKT_DEBUG 159 assert(pkt); 160 161 printf("RX PKT: tot_len: %d, ehdr_buf_len: %lu, vlan_tag: %d\n", 162 pkt->tot_len, pkt->ehdr_buf_len, pkt->tci); 163 #endif 164 } 165 166 void net_rx_pkt_set_packet_type(struct NetRxPkt *pkt, 167 eth_pkt_types_e packet_type) 168 { 169 assert(pkt); 170 171 pkt->packet_type = packet_type; 172 173 } 174 175 eth_pkt_types_e net_rx_pkt_get_packet_type(struct NetRxPkt *pkt) 176 { 177 assert(pkt); 178 179 return pkt->packet_type; 180 } 181 182 size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt) 183 { 184 assert(pkt); 185 186 return pkt->tot_len; 187 } 188 189 void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, 190 const struct iovec *iov, size_t iovcnt, 191 size_t iovoff) 192 { 193 assert(pkt); 194 195 eth_get_protocols(iov, iovcnt, iovoff, &pkt->hasip4, &pkt->hasip6, 196 &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off, 197 &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info); 198 } 199 200 void net_rx_pkt_get_protocols(struct NetRxPkt *pkt, 201 bool *hasip4, bool *hasip6, 202 EthL4HdrProto *l4hdr_proto) 203 { 204 assert(pkt); 205 206 *hasip4 = pkt->hasip4; 207 *hasip6 = pkt->hasip6; 208 *l4hdr_proto = pkt->l4hdr_info.proto; 209 } 210 211 size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt) 212 { 213 assert(pkt); 214 return pkt->l3hdr_off; 215 } 216 217 size_t net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt *pkt) 218 { 219 assert(pkt); 220 return pkt->l4hdr_off; 221 } 222 223 size_t net_rx_pkt_get_l5_hdr_offset(struct NetRxPkt *pkt) 224 { 225 assert(pkt); 226 return pkt->l5hdr_off; 227 } 228 229 eth_ip6_hdr_info *net_rx_pkt_get_ip6_info(struct NetRxPkt *pkt) 230 { 231 return &pkt->ip6hdr_info; 232 } 233 234 eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt) 235 { 236 return &pkt->ip4hdr_info; 237 } 238 239 static inline void 240 _net_rx_rss_add_chunk(uint8_t *rss_input, size_t *bytes_written, 241 void *ptr, size_t size) 242 { 243 memcpy(&rss_input[*bytes_written], ptr, size); 244 trace_net_rx_pkt_rss_add_chunk(ptr, size, *bytes_written); 245 *bytes_written += size; 246 } 247 248 static inline void 249 _net_rx_rss_prepare_ip4(uint8_t *rss_input, 250 struct NetRxPkt *pkt, 251 size_t *bytes_written) 252 { 253 struct ip_header *ip4_hdr = &pkt->ip4hdr_info.ip4_hdr; 254 255 _net_rx_rss_add_chunk(rss_input, bytes_written, 256 &ip4_hdr->ip_src, sizeof(uint32_t)); 257 258 _net_rx_rss_add_chunk(rss_input, bytes_written, 259 &ip4_hdr->ip_dst, sizeof(uint32_t)); 260 } 261 262 static inline void 263 _net_rx_rss_prepare_ip6(uint8_t *rss_input, 264 struct NetRxPkt *pkt, 265 bool ipv6ex, size_t *bytes_written) 266 { 267 eth_ip6_hdr_info *ip6info = &pkt->ip6hdr_info; 268 269 _net_rx_rss_add_chunk(rss_input, bytes_written, 270 (ipv6ex && ip6info->rss_ex_src_valid) ? &ip6info->rss_ex_src 271 : &ip6info->ip6_hdr.ip6_src, 272 sizeof(struct in6_address)); 273 274 _net_rx_rss_add_chunk(rss_input, bytes_written, 275 (ipv6ex && ip6info->rss_ex_dst_valid) ? &ip6info->rss_ex_dst 276 : &ip6info->ip6_hdr.ip6_dst, 277 sizeof(struct in6_address)); 278 } 279 280 static inline void 281 _net_rx_rss_prepare_tcp(uint8_t *rss_input, 282 struct NetRxPkt *pkt, 283 size_t *bytes_written) 284 { 285 struct tcp_header *tcphdr = &pkt->l4hdr_info.hdr.tcp; 286 287 _net_rx_rss_add_chunk(rss_input, bytes_written, 288 &tcphdr->th_sport, sizeof(uint16_t)); 289 290 _net_rx_rss_add_chunk(rss_input, bytes_written, 291 &tcphdr->th_dport, sizeof(uint16_t)); 292 } 293 294 static inline void 295 _net_rx_rss_prepare_udp(uint8_t *rss_input, 296 struct NetRxPkt *pkt, 297 size_t *bytes_written) 298 { 299 struct udp_header *udphdr = &pkt->l4hdr_info.hdr.udp; 300 301 _net_rx_rss_add_chunk(rss_input, bytes_written, 302 &udphdr->uh_sport, sizeof(uint16_t)); 303 304 _net_rx_rss_add_chunk(rss_input, bytes_written, 305 &udphdr->uh_dport, sizeof(uint16_t)); 306 } 307 308 uint32_t 309 net_rx_pkt_calc_rss_hash(struct NetRxPkt *pkt, 310 NetRxPktRssType type, 311 uint8_t *key) 312 { 313 uint8_t rss_input[36]; 314 size_t rss_length = 0; 315 uint32_t rss_hash = 0; 316 net_toeplitz_key key_data; 317 318 switch (type) { 319 case NetPktRssIpV4: 320 assert(pkt->hasip4); 321 trace_net_rx_pkt_rss_ip4(); 322 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length); 323 break; 324 case NetPktRssIpV4Tcp: 325 assert(pkt->hasip4); 326 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP); 327 trace_net_rx_pkt_rss_ip4_tcp(); 328 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length); 329 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length); 330 break; 331 case NetPktRssIpV6Tcp: 332 assert(pkt->hasip6); 333 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP); 334 trace_net_rx_pkt_rss_ip6_tcp(); 335 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length); 336 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length); 337 break; 338 case NetPktRssIpV6: 339 assert(pkt->hasip6); 340 trace_net_rx_pkt_rss_ip6(); 341 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length); 342 break; 343 case NetPktRssIpV6Ex: 344 assert(pkt->hasip6); 345 trace_net_rx_pkt_rss_ip6_ex(); 346 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length); 347 break; 348 case NetPktRssIpV6TcpEx: 349 assert(pkt->hasip6); 350 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP); 351 trace_net_rx_pkt_rss_ip6_ex_tcp(); 352 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length); 353 _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length); 354 break; 355 case NetPktRssIpV4Udp: 356 assert(pkt->hasip4); 357 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP); 358 trace_net_rx_pkt_rss_ip4_udp(); 359 _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length); 360 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length); 361 break; 362 case NetPktRssIpV6Udp: 363 assert(pkt->hasip6); 364 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP); 365 trace_net_rx_pkt_rss_ip6_udp(); 366 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length); 367 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length); 368 break; 369 case NetPktRssIpV6UdpEx: 370 assert(pkt->hasip6); 371 assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP); 372 trace_net_rx_pkt_rss_ip6_ex_udp(); 373 _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length); 374 _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length); 375 break; 376 default: 377 assert(false); 378 break; 379 } 380 381 net_toeplitz_key_init(&key_data, key); 382 net_toeplitz_add(&rss_hash, rss_input, rss_length, &key_data); 383 384 trace_net_rx_pkt_rss_hash(rss_length, rss_hash); 385 386 return rss_hash; 387 } 388 389 uint16_t net_rx_pkt_get_ip_id(struct NetRxPkt *pkt) 390 { 391 assert(pkt); 392 393 if (pkt->hasip4) { 394 return be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_id); 395 } 396 397 return 0; 398 } 399 400 bool net_rx_pkt_is_tcp_ack(struct NetRxPkt *pkt) 401 { 402 assert(pkt); 403 404 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP) { 405 return TCP_HEADER_FLAGS(&pkt->l4hdr_info.hdr.tcp) & TCP_FLAG_ACK; 406 } 407 408 return false; 409 } 410 411 bool net_rx_pkt_has_tcp_data(struct NetRxPkt *pkt) 412 { 413 assert(pkt); 414 415 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP) { 416 return pkt->l4hdr_info.has_tcp_data; 417 } 418 419 return false; 420 } 421 422 struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt) 423 { 424 assert(pkt); 425 426 return pkt->vec; 427 } 428 429 uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt) 430 { 431 assert(pkt); 432 433 return pkt->vec_len; 434 } 435 436 void net_rx_pkt_set_vhdr(struct NetRxPkt *pkt, 437 struct virtio_net_hdr *vhdr) 438 { 439 assert(pkt); 440 441 memcpy(&pkt->virt_hdr, vhdr, sizeof pkt->virt_hdr); 442 } 443 444 void net_rx_pkt_set_vhdr_iovec(struct NetRxPkt *pkt, 445 const struct iovec *iov, int iovcnt) 446 { 447 assert(pkt); 448 449 iov_to_buf(iov, iovcnt, 0, &pkt->virt_hdr, sizeof pkt->virt_hdr); 450 } 451 452 void net_rx_pkt_unset_vhdr(struct NetRxPkt *pkt) 453 { 454 assert(pkt); 455 456 memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr)); 457 } 458 459 bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt) 460 { 461 assert(pkt); 462 463 return pkt->ehdr_buf_len ? true : false; 464 } 465 466 uint16_t net_rx_pkt_get_vlan_tag(struct NetRxPkt *pkt) 467 { 468 assert(pkt); 469 470 return pkt->tci; 471 } 472 473 bool net_rx_pkt_validate_l3_csum(struct NetRxPkt *pkt, bool *csum_valid) 474 { 475 uint32_t cntr; 476 uint16_t csum; 477 uint32_t csl; 478 479 trace_net_rx_pkt_l3_csum_validate_entry(); 480 481 if (!pkt->hasip4) { 482 trace_net_rx_pkt_l3_csum_validate_not_ip4(); 483 return false; 484 } 485 486 csl = pkt->l4hdr_off - pkt->l3hdr_off; 487 488 cntr = net_checksum_add_iov(pkt->vec, pkt->vec_len, 489 pkt->l3hdr_off, 490 csl, 0); 491 492 csum = net_checksum_finish(cntr); 493 494 *csum_valid = (csum == 0); 495 496 trace_net_rx_pkt_l3_csum_validate_csum(pkt->l3hdr_off, csl, 497 cntr, csum, *csum_valid); 498 499 return true; 500 } 501 502 static uint16_t 503 _net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt) 504 { 505 uint32_t cntr; 506 uint16_t csum; 507 uint16_t csl; 508 uint32_t cso; 509 510 trace_net_rx_pkt_l4_csum_calc_entry(); 511 512 if (pkt->hasip4) { 513 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP) { 514 csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen); 515 trace_net_rx_pkt_l4_csum_calc_ip4_udp(); 516 } else { 517 csl = be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_len) - 518 IP_HDR_GET_LEN(&pkt->ip4hdr_info.ip4_hdr); 519 trace_net_rx_pkt_l4_csum_calc_ip4_tcp(); 520 } 521 522 cntr = eth_calc_ip4_pseudo_hdr_csum(&pkt->ip4hdr_info.ip4_hdr, 523 csl, &cso); 524 trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl); 525 } else { 526 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP) { 527 csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen); 528 trace_net_rx_pkt_l4_csum_calc_ip6_udp(); 529 } else { 530 struct ip6_header *ip6hdr = &pkt->ip6hdr_info.ip6_hdr; 531 size_t full_ip6hdr_len = pkt->l4hdr_off - pkt->l3hdr_off; 532 size_t ip6opts_len = full_ip6hdr_len - sizeof(struct ip6_header); 533 534 csl = be16_to_cpu(ip6hdr->ip6_ctlun.ip6_un1.ip6_un1_plen) - 535 ip6opts_len; 536 trace_net_rx_pkt_l4_csum_calc_ip6_tcp(); 537 } 538 539 cntr = eth_calc_ip6_pseudo_hdr_csum(&pkt->ip6hdr_info.ip6_hdr, csl, 540 pkt->ip6hdr_info.l4proto, &cso); 541 trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl); 542 } 543 544 cntr += net_checksum_add_iov(pkt->vec, pkt->vec_len, 545 pkt->l4hdr_off, csl, cso); 546 547 csum = net_checksum_finish_nozero(cntr); 548 549 trace_net_rx_pkt_l4_csum_calc_csum(pkt->l4hdr_off, csl, cntr, csum); 550 551 return csum; 552 } 553 554 bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid) 555 { 556 uint16_t csum; 557 558 trace_net_rx_pkt_l4_csum_validate_entry(); 559 560 if (pkt->l4hdr_info.proto != ETH_L4_HDR_PROTO_TCP && 561 pkt->l4hdr_info.proto != ETH_L4_HDR_PROTO_UDP) { 562 trace_net_rx_pkt_l4_csum_validate_not_xxp(); 563 return false; 564 } 565 566 if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP && 567 pkt->l4hdr_info.hdr.udp.uh_sum == 0) { 568 trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum(); 569 return false; 570 } 571 572 if (pkt->hasip4 && pkt->ip4hdr_info.fragment) { 573 trace_net_rx_pkt_l4_csum_validate_ip4_fragment(); 574 return false; 575 } 576 577 csum = _net_rx_pkt_calc_l4_csum(pkt); 578 579 *csum_valid = ((csum == 0) || (csum == 0xFFFF)); 580 581 trace_net_rx_pkt_l4_csum_validate_csum(*csum_valid); 582 583 return true; 584 } 585 586 bool net_rx_pkt_fix_l4_csum(struct NetRxPkt *pkt) 587 { 588 uint16_t csum = 0; 589 uint32_t l4_cso; 590 591 trace_net_rx_pkt_l4_csum_fix_entry(); 592 593 switch (pkt->l4hdr_info.proto) { 594 case ETH_L4_HDR_PROTO_TCP: 595 l4_cso = offsetof(struct tcp_header, th_sum); 596 trace_net_rx_pkt_l4_csum_fix_tcp(l4_cso); 597 break; 598 599 case ETH_L4_HDR_PROTO_UDP: 600 if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) { 601 trace_net_rx_pkt_l4_csum_fix_udp_with_no_checksum(); 602 return false; 603 } 604 l4_cso = offsetof(struct udp_header, uh_sum); 605 trace_net_rx_pkt_l4_csum_fix_udp(l4_cso); 606 break; 607 608 default: 609 trace_net_rx_pkt_l4_csum_fix_not_xxp(); 610 return false; 611 } 612 613 if (pkt->hasip4 && pkt->ip4hdr_info.fragment) { 614 trace_net_rx_pkt_l4_csum_fix_ip4_fragment(); 615 return false; 616 } 617 618 /* Set zero to checksum word */ 619 iov_from_buf(pkt->vec, pkt->vec_len, 620 pkt->l4hdr_off + l4_cso, 621 &csum, sizeof(csum)); 622 623 /* Calculate L4 checksum */ 624 csum = cpu_to_be16(_net_rx_pkt_calc_l4_csum(pkt)); 625 626 /* Set calculated checksum to checksum word */ 627 iov_from_buf(pkt->vec, pkt->vec_len, 628 pkt->l4hdr_off + l4_cso, 629 &csum, sizeof(csum)); 630 631 trace_net_rx_pkt_l4_csum_fix_csum(pkt->l4hdr_off + l4_cso, csum); 632 633 return true; 634 } 635