1 /* 2 * QEMU TX packets abstractions 3 * 4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) 5 * 6 * Developed by Daynix Computing LTD (http://www.daynix.com) 7 * 8 * Authors: 9 * Dmitry Fleytman <dmitry@daynix.com> 10 * Tamir Shomer <tamirs@daynix.com> 11 * Yan Vugenfirer <yan@daynix.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2 or later. 14 * See the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "qemu/osdep.h" 19 #include "net_tx_pkt.h" 20 #include "net/eth.h" 21 #include "net/checksum.h" 22 #include "net/tap.h" 23 #include "net/net.h" 24 #include "hw/pci/pci_device.h" 25 26 enum { 27 NET_TX_PKT_VHDR_FRAG = 0, 28 NET_TX_PKT_L2HDR_FRAG, 29 NET_TX_PKT_L3HDR_FRAG, 30 NET_TX_PKT_PL_START_FRAG 31 }; 32 33 /* TX packet private context */ 34 struct NetTxPkt { 35 PCIDevice *pci_dev; 36 37 struct virtio_net_hdr virt_hdr; 38 39 struct iovec *raw; 40 uint32_t raw_frags; 41 uint32_t max_raw_frags; 42 43 struct iovec *vec; 44 45 uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN]; 46 uint8_t l3_hdr[ETH_MAX_IP_DGRAM_LEN]; 47 48 uint32_t payload_len; 49 50 uint32_t payload_frags; 51 uint32_t max_payload_frags; 52 53 uint16_t hdr_len; 54 eth_pkt_types_e packet_type; 55 uint8_t l4proto; 56 }; 57 58 void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, 59 uint32_t max_frags) 60 { 61 struct NetTxPkt *p = g_malloc0(sizeof *p); 62 63 p->pci_dev = pci_dev; 64 65 p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG); 66 67 p->raw = g_new(struct iovec, max_frags); 68 69 p->max_payload_frags = max_frags; 70 p->max_raw_frags = max_frags; 71 p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr; 72 p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof p->virt_hdr; 73 p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr; 74 p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr; 75 76 *pkt = p; 77 } 78 79 void net_tx_pkt_uninit(struct NetTxPkt *pkt) 80 { 81 if (pkt) { 82 g_free(pkt->vec); 83 g_free(pkt->raw); 84 g_free(pkt); 85 } 86 } 87 88 void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt) 89 { 90 uint16_t csum; 91 assert(pkt); 92 struct ip_header *ip_hdr; 93 ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 94 95 ip_hdr->ip_len = cpu_to_be16(pkt->payload_len + 96 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); 97 98 ip_hdr->ip_sum = 0; 99 csum = net_raw_checksum((uint8_t *)ip_hdr, 100 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); 101 ip_hdr->ip_sum = cpu_to_be16(csum); 102 } 103 104 void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) 105 { 106 uint16_t csum; 107 uint32_t cntr, cso; 108 assert(pkt); 109 uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; 110 void *ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 111 112 if (pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len > 113 ETH_MAX_IP_DGRAM_LEN) { 114 return; 115 } 116 117 if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 || 118 gso_type == VIRTIO_NET_HDR_GSO_UDP) { 119 /* Calculate IP header checksum */ 120 net_tx_pkt_update_ip_hdr_checksum(pkt); 121 122 /* Calculate IP pseudo header checksum */ 123 cntr = eth_calc_ip4_pseudo_hdr_csum(ip_hdr, pkt->payload_len, &cso); 124 csum = cpu_to_be16(~net_checksum_finish(cntr)); 125 } else if (gso_type == VIRTIO_NET_HDR_GSO_TCPV6) { 126 /* Calculate IP pseudo header checksum */ 127 cntr = eth_calc_ip6_pseudo_hdr_csum(ip_hdr, pkt->payload_len, 128 IP_PROTO_TCP, &cso); 129 csum = cpu_to_be16(~net_checksum_finish(cntr)); 130 } else { 131 return; 132 } 133 134 iov_from_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, 135 pkt->virt_hdr.csum_offset, &csum, sizeof(csum)); 136 } 137 138 static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt) 139 { 140 pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len + 141 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; 142 } 143 144 static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt) 145 { 146 struct iovec *l2_hdr, *l3_hdr; 147 size_t bytes_read; 148 size_t full_ip6hdr_len; 149 uint16_t l3_proto; 150 151 assert(pkt); 152 153 l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; 154 l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG]; 155 156 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base, 157 ETH_MAX_L2_HDR_LEN); 158 if (bytes_read < sizeof(struct eth_header)) { 159 l2_hdr->iov_len = 0; 160 return false; 161 } 162 163 l2_hdr->iov_len = sizeof(struct eth_header); 164 switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) { 165 case ETH_P_VLAN: 166 l2_hdr->iov_len += sizeof(struct vlan_header); 167 break; 168 case ETH_P_DVLAN: 169 l2_hdr->iov_len += 2 * sizeof(struct vlan_header); 170 break; 171 } 172 173 if (bytes_read < l2_hdr->iov_len) { 174 l2_hdr->iov_len = 0; 175 l3_hdr->iov_len = 0; 176 pkt->packet_type = ETH_PKT_UCAST; 177 return false; 178 } else { 179 l2_hdr->iov_len = ETH_MAX_L2_HDR_LEN; 180 l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base); 181 pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base); 182 } 183 184 l3_proto = eth_get_l3_proto(l2_hdr, 1, l2_hdr->iov_len); 185 186 switch (l3_proto) { 187 case ETH_P_IP: 188 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 189 l3_hdr->iov_base, sizeof(struct ip_header)); 190 191 if (bytes_read < sizeof(struct ip_header)) { 192 l3_hdr->iov_len = 0; 193 return false; 194 } 195 196 l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base); 197 198 if (l3_hdr->iov_len < sizeof(struct ip_header)) { 199 l3_hdr->iov_len = 0; 200 return false; 201 } 202 203 pkt->l4proto = IP_HDR_GET_P(l3_hdr->iov_base); 204 205 if (IP_HDR_GET_LEN(l3_hdr->iov_base) != sizeof(struct ip_header)) { 206 /* copy optional IPv4 header data if any*/ 207 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 208 l2_hdr->iov_len + sizeof(struct ip_header), 209 l3_hdr->iov_base + sizeof(struct ip_header), 210 l3_hdr->iov_len - sizeof(struct ip_header)); 211 if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) { 212 l3_hdr->iov_len = 0; 213 return false; 214 } 215 } 216 217 break; 218 219 case ETH_P_IPV6: 220 { 221 eth_ip6_hdr_info hdrinfo; 222 223 if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 224 &hdrinfo)) { 225 l3_hdr->iov_len = 0; 226 return false; 227 } 228 229 pkt->l4proto = hdrinfo.l4proto; 230 full_ip6hdr_len = hdrinfo.full_hdr_len; 231 232 if (full_ip6hdr_len > ETH_MAX_IP_DGRAM_LEN) { 233 l3_hdr->iov_len = 0; 234 return false; 235 } 236 237 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 238 l3_hdr->iov_base, full_ip6hdr_len); 239 240 if (bytes_read < full_ip6hdr_len) { 241 l3_hdr->iov_len = 0; 242 return false; 243 } else { 244 l3_hdr->iov_len = full_ip6hdr_len; 245 } 246 break; 247 } 248 default: 249 l3_hdr->iov_len = 0; 250 break; 251 } 252 253 net_tx_pkt_calculate_hdr_len(pkt); 254 return true; 255 } 256 257 static void net_tx_pkt_rebuild_payload(struct NetTxPkt *pkt) 258 { 259 pkt->payload_len = iov_size(pkt->raw, pkt->raw_frags) - pkt->hdr_len; 260 pkt->payload_frags = iov_copy(&pkt->vec[NET_TX_PKT_PL_START_FRAG], 261 pkt->max_payload_frags, 262 pkt->raw, pkt->raw_frags, 263 pkt->hdr_len, pkt->payload_len); 264 } 265 266 bool net_tx_pkt_parse(struct NetTxPkt *pkt) 267 { 268 if (net_tx_pkt_parse_headers(pkt)) { 269 net_tx_pkt_rebuild_payload(pkt); 270 return true; 271 } else { 272 return false; 273 } 274 } 275 276 struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt) 277 { 278 assert(pkt); 279 return &pkt->virt_hdr; 280 } 281 282 static uint8_t net_tx_pkt_get_gso_type(struct NetTxPkt *pkt, 283 bool tso_enable) 284 { 285 uint8_t rc = VIRTIO_NET_HDR_GSO_NONE; 286 uint16_t l3_proto; 287 288 l3_proto = eth_get_l3_proto(&pkt->vec[NET_TX_PKT_L2HDR_FRAG], 1, 289 pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len); 290 291 if (!tso_enable) { 292 goto func_exit; 293 } 294 295 rc = eth_get_gso_type(l3_proto, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, 296 pkt->l4proto); 297 298 func_exit: 299 return rc; 300 } 301 302 bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, 303 bool csum_enable, uint32_t gso_size) 304 { 305 struct tcp_hdr l4hdr; 306 size_t bytes_read; 307 assert(pkt); 308 309 /* csum has to be enabled if tso is. */ 310 assert(csum_enable || !tso_enable); 311 312 pkt->virt_hdr.gso_type = net_tx_pkt_get_gso_type(pkt, tso_enable); 313 314 switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 315 case VIRTIO_NET_HDR_GSO_NONE: 316 pkt->virt_hdr.hdr_len = 0; 317 pkt->virt_hdr.gso_size = 0; 318 break; 319 320 case VIRTIO_NET_HDR_GSO_UDP: 321 pkt->virt_hdr.gso_size = gso_size; 322 pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header); 323 break; 324 325 case VIRTIO_NET_HDR_GSO_TCPV4: 326 case VIRTIO_NET_HDR_GSO_TCPV6: 327 bytes_read = iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], 328 pkt->payload_frags, 0, &l4hdr, sizeof(l4hdr)); 329 if (bytes_read < sizeof(l4hdr)) { 330 return false; 331 } 332 333 pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t); 334 pkt->virt_hdr.gso_size = gso_size; 335 break; 336 337 default: 338 g_assert_not_reached(); 339 } 340 341 if (csum_enable) { 342 switch (pkt->l4proto) { 343 case IP_PROTO_TCP: 344 pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 345 pkt->virt_hdr.csum_start = pkt->hdr_len; 346 pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum); 347 break; 348 case IP_PROTO_UDP: 349 pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 350 pkt->virt_hdr.csum_start = pkt->hdr_len; 351 pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum); 352 break; 353 default: 354 break; 355 } 356 } 357 358 return true; 359 } 360 361 void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt, 362 uint16_t vlan, uint16_t vlan_ethtype) 363 { 364 bool is_new; 365 assert(pkt); 366 367 eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, 368 vlan, vlan_ethtype, &is_new); 369 370 /* update l2hdrlen */ 371 if (is_new) { 372 pkt->hdr_len += sizeof(struct vlan_header); 373 pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len += 374 sizeof(struct vlan_header); 375 } 376 } 377 378 bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, 379 size_t len) 380 { 381 hwaddr mapped_len = 0; 382 struct iovec *ventry; 383 assert(pkt); 384 385 if (pkt->raw_frags >= pkt->max_raw_frags) { 386 return false; 387 } 388 389 if (!len) { 390 return true; 391 } 392 393 ventry = &pkt->raw[pkt->raw_frags]; 394 mapped_len = len; 395 396 ventry->iov_base = pci_dma_map(pkt->pci_dev, pa, 397 &mapped_len, DMA_DIRECTION_TO_DEVICE); 398 399 if ((ventry->iov_base != NULL) && (len == mapped_len)) { 400 ventry->iov_len = mapped_len; 401 pkt->raw_frags++; 402 return true; 403 } else { 404 return false; 405 } 406 } 407 408 bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt) 409 { 410 return pkt->raw_frags > 0; 411 } 412 413 eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt) 414 { 415 assert(pkt); 416 417 return pkt->packet_type; 418 } 419 420 size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt) 421 { 422 assert(pkt); 423 424 return pkt->hdr_len + pkt->payload_len; 425 } 426 427 void net_tx_pkt_dump(struct NetTxPkt *pkt) 428 { 429 #ifdef NET_TX_PKT_DEBUG 430 assert(pkt); 431 432 printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, " 433 "l3hdr_len: %lu, payload_len: %u\n", pkt->hdr_len, pkt->packet_type, 434 pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len, 435 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len, pkt->payload_len); 436 #endif 437 } 438 439 void net_tx_pkt_reset(struct NetTxPkt *pkt) 440 { 441 int i; 442 443 /* no assert, as reset can be called before tx_pkt_init */ 444 if (!pkt) { 445 return; 446 } 447 448 memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr)); 449 450 assert(pkt->vec); 451 452 pkt->payload_len = 0; 453 pkt->payload_frags = 0; 454 455 if (pkt->max_raw_frags > 0) { 456 assert(pkt->raw); 457 for (i = 0; i < pkt->raw_frags; i++) { 458 assert(pkt->raw[i].iov_base); 459 pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base, 460 pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0); 461 } 462 } 463 pkt->raw_frags = 0; 464 465 pkt->hdr_len = 0; 466 pkt->l4proto = 0; 467 } 468 469 static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt) 470 { 471 struct iovec *iov = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; 472 uint32_t csum_cntr; 473 uint16_t csum = 0; 474 uint32_t cso; 475 /* num of iovec without vhdr */ 476 uint32_t iov_len = pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1; 477 uint16_t csl; 478 size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset; 479 uint16_t l3_proto = eth_get_l3_proto(iov, 1, iov->iov_len); 480 481 /* Put zero to checksum field */ 482 iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); 483 484 /* Calculate L4 TCP/UDP checksum */ 485 csl = pkt->payload_len; 486 487 csum_cntr = 0; 488 cso = 0; 489 /* add pseudo header to csum */ 490 if (l3_proto == ETH_P_IP) { 491 csum_cntr = eth_calc_ip4_pseudo_hdr_csum( 492 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, 493 csl, &cso); 494 } else if (l3_proto == ETH_P_IPV6) { 495 csum_cntr = eth_calc_ip6_pseudo_hdr_csum( 496 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, 497 csl, pkt->l4proto, &cso); 498 } 499 500 /* data checksum */ 501 csum_cntr += 502 net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl, cso); 503 504 /* Put the checksum obtained into the packet */ 505 csum = cpu_to_be16(net_checksum_finish_nozero(csum_cntr)); 506 iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); 507 } 508 509 #define NET_MAX_FRAG_SG_LIST (64) 510 511 static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt, 512 int *src_idx, size_t *src_offset, struct iovec *dst, int *dst_idx) 513 { 514 size_t fetched = 0; 515 struct iovec *src = pkt->vec; 516 517 *dst_idx = NET_TX_PKT_PL_START_FRAG; 518 519 while (fetched < IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size)) { 520 521 /* no more place in fragment iov */ 522 if (*dst_idx == NET_MAX_FRAG_SG_LIST) { 523 break; 524 } 525 526 /* no more data in iovec */ 527 if (*src_idx == (pkt->payload_frags + NET_TX_PKT_PL_START_FRAG)) { 528 break; 529 } 530 531 532 dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset; 533 dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset, 534 IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size) - fetched); 535 536 *src_offset += dst[*dst_idx].iov_len; 537 fetched += dst[*dst_idx].iov_len; 538 539 if (*src_offset == src[*src_idx].iov_len) { 540 *src_offset = 0; 541 (*src_idx)++; 542 } 543 544 (*dst_idx)++; 545 } 546 547 return fetched; 548 } 549 550 static void net_tx_pkt_sendv( 551 void *opaque, const struct iovec *iov, int iov_cnt, 552 const struct iovec *virt_iov, int virt_iov_cnt) 553 { 554 NetClientState *nc = opaque; 555 556 if (qemu_get_using_vnet_hdr(nc->peer)) { 557 qemu_sendv_packet(nc, virt_iov, virt_iov_cnt); 558 } else { 559 qemu_sendv_packet(nc, iov, iov_cnt); 560 } 561 } 562 563 static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, 564 NetTxPktCallback callback, 565 void *context) 566 { 567 struct iovec fragment[NET_MAX_FRAG_SG_LIST]; 568 size_t fragment_len = 0; 569 bool more_frags = false; 570 571 /* some pointers for shorter code */ 572 void *l2_iov_base, *l3_iov_base; 573 size_t l2_iov_len, l3_iov_len; 574 int src_idx = NET_TX_PKT_PL_START_FRAG, dst_idx; 575 size_t src_offset = 0; 576 size_t fragment_offset = 0; 577 struct virtio_net_hdr virt_hdr = { 578 .flags = pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM ? 579 VIRTIO_NET_HDR_F_DATA_VALID : 0 580 }; 581 582 l2_iov_base = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base; 583 l2_iov_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len; 584 l3_iov_base = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 585 l3_iov_len = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; 586 587 /* Copy headers */ 588 fragment[NET_TX_PKT_VHDR_FRAG].iov_base = &virt_hdr; 589 fragment[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof(virt_hdr); 590 fragment[NET_TX_PKT_L2HDR_FRAG].iov_base = l2_iov_base; 591 fragment[NET_TX_PKT_L2HDR_FRAG].iov_len = l2_iov_len; 592 fragment[NET_TX_PKT_L3HDR_FRAG].iov_base = l3_iov_base; 593 fragment[NET_TX_PKT_L3HDR_FRAG].iov_len = l3_iov_len; 594 595 596 /* Put as much data as possible and send */ 597 do { 598 fragment_len = net_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset, 599 fragment, &dst_idx); 600 601 more_frags = (fragment_offset + fragment_len < pkt->payload_len); 602 603 eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base, 604 l3_iov_len, fragment_len, fragment_offset, more_frags); 605 606 eth_fix_ip4_checksum(l3_iov_base, l3_iov_len); 607 608 callback(context, 609 fragment + NET_TX_PKT_L2HDR_FRAG, dst_idx - NET_TX_PKT_L2HDR_FRAG, 610 fragment + NET_TX_PKT_VHDR_FRAG, dst_idx - NET_TX_PKT_VHDR_FRAG); 611 612 fragment_offset += fragment_len; 613 614 } while (fragment_len && more_frags); 615 616 return true; 617 } 618 619 bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc) 620 { 621 bool offload = qemu_get_using_vnet_hdr(nc->peer); 622 return net_tx_pkt_send_custom(pkt, offload, net_tx_pkt_sendv, nc); 623 } 624 625 bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, 626 NetTxPktCallback callback, void *context) 627 { 628 assert(pkt); 629 630 if (!offload && pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 631 net_tx_pkt_do_sw_csum(pkt); 632 } 633 634 /* 635 * Since underlying infrastructure does not support IP datagrams longer 636 * than 64K we should drop such packets and don't even try to send 637 */ 638 if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) { 639 if (pkt->payload_len > 640 ETH_MAX_IP_DGRAM_LEN - 641 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) { 642 return false; 643 } 644 } 645 646 if (offload || pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) { 647 net_tx_pkt_fix_ip6_payload_len(pkt); 648 callback(context, pkt->vec + NET_TX_PKT_L2HDR_FRAG, 649 pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_L2HDR_FRAG, 650 pkt->vec + NET_TX_PKT_VHDR_FRAG, 651 pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_VHDR_FRAG); 652 return true; 653 } 654 655 return net_tx_pkt_do_sw_fragmentation(pkt, callback, context); 656 } 657 658 void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt) 659 { 660 struct iovec *l2 = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; 661 if (eth_get_l3_proto(l2, 1, l2->iov_len) == ETH_P_IPV6) { 662 struct ip6_header *ip6 = (struct ip6_header *) pkt->l3_hdr; 663 /* 664 * TODO: if qemu would support >64K packets - add jumbo option check 665 * something like that: 666 * 'if (ip6->ip6_plen == 0 && !has_jumbo_option(ip6)) {' 667 */ 668 if (ip6->ip6_plen == 0) { 669 if (pkt->payload_len <= ETH_MAX_IP_DGRAM_LEN) { 670 ip6->ip6_plen = htons(pkt->payload_len); 671 } 672 /* 673 * TODO: if qemu would support >64K packets 674 * add jumbo option for packets greater then 65,535 bytes 675 */ 676 } 677 } 678 } 679