1e263cd49SDmitry Fleytman /* 2605d52e6SDmitry Fleytman * QEMU TX packets abstractions 3e263cd49SDmitry Fleytman * 4e263cd49SDmitry Fleytman * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) 5e263cd49SDmitry Fleytman * 6e263cd49SDmitry Fleytman * Developed by Daynix Computing LTD (http://www.daynix.com) 7e263cd49SDmitry Fleytman * 8e263cd49SDmitry Fleytman * Authors: 9e263cd49SDmitry Fleytman * Dmitry Fleytman <dmitry@daynix.com> 10e263cd49SDmitry Fleytman * Tamir Shomer <tamirs@daynix.com> 11e263cd49SDmitry Fleytman * Yan Vugenfirer <yan@daynix.com> 12e263cd49SDmitry Fleytman * 13e263cd49SDmitry Fleytman * This work is licensed under the terms of the GNU GPL, version 2 or later. 14e263cd49SDmitry Fleytman * See the COPYING file in the top-level directory. 15e263cd49SDmitry Fleytman * 16e263cd49SDmitry Fleytman */ 17e263cd49SDmitry Fleytman 18e9abfcb5SPaolo Bonzini #include "qemu/osdep.h" 19605d52e6SDmitry Fleytman #include "net_tx_pkt.h" 20e263cd49SDmitry Fleytman #include "net/eth.h" 21e263cd49SDmitry Fleytman #include "net/checksum.h" 22e263cd49SDmitry Fleytman #include "net/tap.h" 23e263cd49SDmitry Fleytman #include "net/net.h" 24edf5ca5dSMarkus Armbruster #include "hw/pci/pci_device.h" 25e263cd49SDmitry Fleytman 26e263cd49SDmitry Fleytman enum { 27605d52e6SDmitry Fleytman NET_TX_PKT_VHDR_FRAG = 0, 28605d52e6SDmitry Fleytman NET_TX_PKT_L2HDR_FRAG, 29605d52e6SDmitry Fleytman NET_TX_PKT_L3HDR_FRAG, 30605d52e6SDmitry Fleytman NET_TX_PKT_PL_START_FRAG 31e263cd49SDmitry Fleytman }; 32e263cd49SDmitry Fleytman 33e263cd49SDmitry Fleytman /* TX packet private context */ 34605d52e6SDmitry Fleytman struct NetTxPkt { 3511171010SDmitry Fleytman PCIDevice *pci_dev; 3611171010SDmitry Fleytman 37e263cd49SDmitry Fleytman struct virtio_net_hdr virt_hdr; 38e263cd49SDmitry Fleytman 39e263cd49SDmitry Fleytman struct iovec *raw; 40e263cd49SDmitry Fleytman uint32_t raw_frags; 41e263cd49SDmitry Fleytman uint32_t max_raw_frags; 42e263cd49SDmitry Fleytman 43e263cd49SDmitry Fleytman struct iovec *vec; 44e263cd49SDmitry Fleytman 45e263cd49SDmitry Fleytman uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN]; 46eb700029SDmitry Fleytman uint8_t l3_hdr[ETH_MAX_IP_DGRAM_LEN]; 47e263cd49SDmitry Fleytman 48e263cd49SDmitry Fleytman uint32_t payload_len; 49e263cd49SDmitry Fleytman 50e263cd49SDmitry Fleytman uint32_t payload_frags; 51e263cd49SDmitry Fleytman uint32_t max_payload_frags; 52e263cd49SDmitry Fleytman 53e263cd49SDmitry Fleytman uint16_t hdr_len; 54e263cd49SDmitry Fleytman eth_pkt_types_e packet_type; 55e263cd49SDmitry Fleytman uint8_t l4proto; 56e263cd49SDmitry Fleytman }; 57e263cd49SDmitry Fleytman 5811171010SDmitry Fleytman void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, 5955daf493SAkihiko Odaki uint32_t max_frags) 60e263cd49SDmitry Fleytman { 61605d52e6SDmitry Fleytman struct NetTxPkt *p = g_malloc0(sizeof *p); 62e263cd49SDmitry Fleytman 6311171010SDmitry Fleytman p->pci_dev = pci_dev; 6411171010SDmitry Fleytman 6547882fa4SLi Qiang p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG); 66e263cd49SDmitry Fleytman 6747882fa4SLi Qiang p->raw = g_new(struct iovec, max_frags); 68e263cd49SDmitry Fleytman 69e263cd49SDmitry Fleytman p->max_payload_frags = max_frags; 70e263cd49SDmitry Fleytman p->max_raw_frags = max_frags; 71605d52e6SDmitry Fleytman p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr; 7255daf493SAkihiko Odaki p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof p->virt_hdr; 73605d52e6SDmitry Fleytman p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr; 74eb700029SDmitry Fleytman p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr; 75e263cd49SDmitry Fleytman 76e263cd49SDmitry Fleytman *pkt = p; 77e263cd49SDmitry Fleytman } 78e263cd49SDmitry Fleytman 79605d52e6SDmitry Fleytman void net_tx_pkt_uninit(struct NetTxPkt *pkt) 80e263cd49SDmitry Fleytman { 81e263cd49SDmitry Fleytman if (pkt) { 82e263cd49SDmitry Fleytman g_free(pkt->vec); 83e263cd49SDmitry Fleytman g_free(pkt->raw); 84e263cd49SDmitry Fleytman g_free(pkt); 85e263cd49SDmitry Fleytman } 86e263cd49SDmitry Fleytman } 87e263cd49SDmitry Fleytman 88eb700029SDmitry Fleytman void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt) 89eb700029SDmitry Fleytman { 90eb700029SDmitry Fleytman uint16_t csum; 91eb700029SDmitry Fleytman assert(pkt); 92eb700029SDmitry Fleytman struct ip_header *ip_hdr; 93eb700029SDmitry Fleytman ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 94eb700029SDmitry Fleytman 95eb700029SDmitry Fleytman ip_hdr->ip_len = cpu_to_be16(pkt->payload_len + 96eb700029SDmitry Fleytman pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); 97eb700029SDmitry Fleytman 98eb700029SDmitry Fleytman ip_hdr->ip_sum = 0; 99eb700029SDmitry Fleytman csum = net_raw_checksum((uint8_t *)ip_hdr, 100eb700029SDmitry Fleytman pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); 101eb700029SDmitry Fleytman ip_hdr->ip_sum = cpu_to_be16(csum); 102eb700029SDmitry Fleytman } 103eb700029SDmitry Fleytman 104605d52e6SDmitry Fleytman void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) 105e263cd49SDmitry Fleytman { 106e263cd49SDmitry Fleytman uint16_t csum; 107eb700029SDmitry Fleytman uint32_t cntr, cso; 108e263cd49SDmitry Fleytman assert(pkt); 109e263cd49SDmitry Fleytman uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; 110eb700029SDmitry Fleytman void *ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 111e263cd49SDmitry Fleytman 112605d52e6SDmitry Fleytman if (pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len > 113e263cd49SDmitry Fleytman ETH_MAX_IP_DGRAM_LEN) { 114e263cd49SDmitry Fleytman return; 115e263cd49SDmitry Fleytman } 116e263cd49SDmitry Fleytman 117eb700029SDmitry Fleytman if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 || 118eb700029SDmitry Fleytman gso_type == VIRTIO_NET_HDR_GSO_UDP) { 119e263cd49SDmitry Fleytman /* Calculate IP header checksum */ 120eb700029SDmitry Fleytman net_tx_pkt_update_ip_hdr_checksum(pkt); 121e263cd49SDmitry Fleytman 122e263cd49SDmitry Fleytman /* Calculate IP pseudo header checksum */ 123eb700029SDmitry Fleytman cntr = eth_calc_ip4_pseudo_hdr_csum(ip_hdr, pkt->payload_len, &cso); 124eb700029SDmitry Fleytman csum = cpu_to_be16(~net_checksum_finish(cntr)); 125eb700029SDmitry Fleytman } else if (gso_type == VIRTIO_NET_HDR_GSO_TCPV6) { 126eb700029SDmitry Fleytman /* Calculate IP pseudo header checksum */ 127eb700029SDmitry Fleytman cntr = eth_calc_ip6_pseudo_hdr_csum(ip_hdr, pkt->payload_len, 128eb700029SDmitry Fleytman IP_PROTO_TCP, &cso); 129eb700029SDmitry Fleytman csum = cpu_to_be16(~net_checksum_finish(cntr)); 130eb700029SDmitry Fleytman } else { 131eb700029SDmitry Fleytman return; 132eb700029SDmitry Fleytman } 133eb700029SDmitry Fleytman 134605d52e6SDmitry Fleytman iov_from_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, 135e263cd49SDmitry Fleytman pkt->virt_hdr.csum_offset, &csum, sizeof(csum)); 136e263cd49SDmitry Fleytman } 137e263cd49SDmitry Fleytman 138605d52e6SDmitry Fleytman static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt) 139e263cd49SDmitry Fleytman { 140605d52e6SDmitry Fleytman pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len + 141605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; 142e263cd49SDmitry Fleytman } 143e263cd49SDmitry Fleytman 144605d52e6SDmitry Fleytman static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt) 145e263cd49SDmitry Fleytman { 146e263cd49SDmitry Fleytman struct iovec *l2_hdr, *l3_hdr; 147e263cd49SDmitry Fleytman size_t bytes_read; 148e263cd49SDmitry Fleytman size_t full_ip6hdr_len; 149e263cd49SDmitry Fleytman uint16_t l3_proto; 150e263cd49SDmitry Fleytman 151e263cd49SDmitry Fleytman assert(pkt); 152e263cd49SDmitry Fleytman 153605d52e6SDmitry Fleytman l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; 154605d52e6SDmitry Fleytman l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG]; 155e263cd49SDmitry Fleytman 156e263cd49SDmitry Fleytman bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base, 157e263cd49SDmitry Fleytman ETH_MAX_L2_HDR_LEN); 158a7278b36SDana Rubin if (bytes_read < sizeof(struct eth_header)) { 159e263cd49SDmitry Fleytman l2_hdr->iov_len = 0; 160e263cd49SDmitry Fleytman return false; 161a7278b36SDana Rubin } 162a7278b36SDana Rubin 163a7278b36SDana Rubin l2_hdr->iov_len = sizeof(struct eth_header); 164a7278b36SDana Rubin switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) { 165a7278b36SDana Rubin case ETH_P_VLAN: 166a7278b36SDana Rubin l2_hdr->iov_len += sizeof(struct vlan_header); 167a7278b36SDana Rubin break; 168a7278b36SDana Rubin case ETH_P_DVLAN: 169a7278b36SDana Rubin l2_hdr->iov_len += 2 * sizeof(struct vlan_header); 170a7278b36SDana Rubin break; 171a7278b36SDana Rubin } 172a7278b36SDana Rubin 173a7278b36SDana Rubin if (bytes_read < l2_hdr->iov_len) { 174a7278b36SDana Rubin l2_hdr->iov_len = 0; 175eb700029SDmitry Fleytman l3_hdr->iov_len = 0; 176eb700029SDmitry Fleytman pkt->packet_type = ETH_PKT_UCAST; 177a7278b36SDana Rubin return false; 178eb700029SDmitry Fleytman } else { 179eb700029SDmitry Fleytman l2_hdr->iov_len = ETH_MAX_L2_HDR_LEN; 180eb700029SDmitry Fleytman l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base); 181eb700029SDmitry Fleytman pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base); 182e263cd49SDmitry Fleytman } 183e263cd49SDmitry Fleytman 184eb700029SDmitry Fleytman l3_proto = eth_get_l3_proto(l2_hdr, 1, l2_hdr->iov_len); 185e263cd49SDmitry Fleytman 186e263cd49SDmitry Fleytman switch (l3_proto) { 187e263cd49SDmitry Fleytman case ETH_P_IP: 188e263cd49SDmitry Fleytman bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 189e263cd49SDmitry Fleytman l3_hdr->iov_base, sizeof(struct ip_header)); 190e263cd49SDmitry Fleytman 191e263cd49SDmitry Fleytman if (bytes_read < sizeof(struct ip_header)) { 192e263cd49SDmitry Fleytman l3_hdr->iov_len = 0; 193e263cd49SDmitry Fleytman return false; 194e263cd49SDmitry Fleytman } 195e263cd49SDmitry Fleytman 196e263cd49SDmitry Fleytman l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base); 197eb700029SDmitry Fleytman 198eb700029SDmitry Fleytman if (l3_hdr->iov_len < sizeof(struct ip_header)) { 199eb700029SDmitry Fleytman l3_hdr->iov_len = 0; 200eb700029SDmitry Fleytman return false; 201eb700029SDmitry Fleytman } 202eb700029SDmitry Fleytman 2034f51e1d3SMarc-André Lureau pkt->l4proto = IP_HDR_GET_P(l3_hdr->iov_base); 204e263cd49SDmitry Fleytman 205eb700029SDmitry Fleytman if (IP_HDR_GET_LEN(l3_hdr->iov_base) != sizeof(struct ip_header)) { 206eb700029SDmitry Fleytman /* copy optional IPv4 header data if any*/ 207e263cd49SDmitry Fleytman bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 208e263cd49SDmitry Fleytman l2_hdr->iov_len + sizeof(struct ip_header), 209e263cd49SDmitry Fleytman l3_hdr->iov_base + sizeof(struct ip_header), 210e263cd49SDmitry Fleytman l3_hdr->iov_len - sizeof(struct ip_header)); 211e263cd49SDmitry Fleytman if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) { 212e263cd49SDmitry Fleytman l3_hdr->iov_len = 0; 213e263cd49SDmitry Fleytman return false; 214e263cd49SDmitry Fleytman } 215eb700029SDmitry Fleytman } 216eb700029SDmitry Fleytman 217e263cd49SDmitry Fleytman break; 218e263cd49SDmitry Fleytman 219e263cd49SDmitry Fleytman case ETH_P_IPV6: 220eb700029SDmitry Fleytman { 221eb700029SDmitry Fleytman eth_ip6_hdr_info hdrinfo; 222eb700029SDmitry Fleytman 223e263cd49SDmitry Fleytman if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 224eb700029SDmitry Fleytman &hdrinfo)) { 225e263cd49SDmitry Fleytman l3_hdr->iov_len = 0; 226e263cd49SDmitry Fleytman return false; 227e263cd49SDmitry Fleytman } 228e263cd49SDmitry Fleytman 229eb700029SDmitry Fleytman pkt->l4proto = hdrinfo.l4proto; 230eb700029SDmitry Fleytman full_ip6hdr_len = hdrinfo.full_hdr_len; 231eb700029SDmitry Fleytman 232eb700029SDmitry Fleytman if (full_ip6hdr_len > ETH_MAX_IP_DGRAM_LEN) { 233eb700029SDmitry Fleytman l3_hdr->iov_len = 0; 234eb700029SDmitry Fleytman return false; 235eb700029SDmitry Fleytman } 236e263cd49SDmitry Fleytman 237e263cd49SDmitry Fleytman bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 238e263cd49SDmitry Fleytman l3_hdr->iov_base, full_ip6hdr_len); 239e263cd49SDmitry Fleytman 240e263cd49SDmitry Fleytman if (bytes_read < full_ip6hdr_len) { 241e263cd49SDmitry Fleytman l3_hdr->iov_len = 0; 242e263cd49SDmitry Fleytman return false; 243e263cd49SDmitry Fleytman } else { 244e263cd49SDmitry Fleytman l3_hdr->iov_len = full_ip6hdr_len; 245e263cd49SDmitry Fleytman } 246e263cd49SDmitry Fleytman break; 247eb700029SDmitry Fleytman } 248e263cd49SDmitry Fleytman default: 249e263cd49SDmitry Fleytman l3_hdr->iov_len = 0; 250e263cd49SDmitry Fleytman break; 251e263cd49SDmitry Fleytman } 252e263cd49SDmitry Fleytman 253605d52e6SDmitry Fleytman net_tx_pkt_calculate_hdr_len(pkt); 254e263cd49SDmitry Fleytman return true; 255e263cd49SDmitry Fleytman } 256e263cd49SDmitry Fleytman 257eb700029SDmitry Fleytman static void net_tx_pkt_rebuild_payload(struct NetTxPkt *pkt) 258e263cd49SDmitry Fleytman { 259eb700029SDmitry Fleytman pkt->payload_len = iov_size(pkt->raw, pkt->raw_frags) - pkt->hdr_len; 260605d52e6SDmitry Fleytman pkt->payload_frags = iov_copy(&pkt->vec[NET_TX_PKT_PL_START_FRAG], 261e263cd49SDmitry Fleytman pkt->max_payload_frags, 262e263cd49SDmitry Fleytman pkt->raw, pkt->raw_frags, 263eb700029SDmitry Fleytman pkt->hdr_len, pkt->payload_len); 264e263cd49SDmitry Fleytman } 265e263cd49SDmitry Fleytman 266605d52e6SDmitry Fleytman bool net_tx_pkt_parse(struct NetTxPkt *pkt) 267e263cd49SDmitry Fleytman { 268eb700029SDmitry Fleytman if (net_tx_pkt_parse_headers(pkt)) { 269605d52e6SDmitry Fleytman net_tx_pkt_rebuild_payload(pkt); 270eb700029SDmitry Fleytman return true; 271eb700029SDmitry Fleytman } else { 272eb700029SDmitry Fleytman return false; 273eb700029SDmitry Fleytman } 274e263cd49SDmitry Fleytman } 275e263cd49SDmitry Fleytman 276605d52e6SDmitry Fleytman struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt) 277e263cd49SDmitry Fleytman { 278e263cd49SDmitry Fleytman assert(pkt); 279e263cd49SDmitry Fleytman return &pkt->virt_hdr; 280e263cd49SDmitry Fleytman } 281e263cd49SDmitry Fleytman 282605d52e6SDmitry Fleytman static uint8_t net_tx_pkt_get_gso_type(struct NetTxPkt *pkt, 283e263cd49SDmitry Fleytman bool tso_enable) 284e263cd49SDmitry Fleytman { 285e263cd49SDmitry Fleytman uint8_t rc = VIRTIO_NET_HDR_GSO_NONE; 286e263cd49SDmitry Fleytman uint16_t l3_proto; 287e263cd49SDmitry Fleytman 288eb700029SDmitry Fleytman l3_proto = eth_get_l3_proto(&pkt->vec[NET_TX_PKT_L2HDR_FRAG], 1, 289605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len); 290e263cd49SDmitry Fleytman 291e263cd49SDmitry Fleytman if (!tso_enable) { 292e263cd49SDmitry Fleytman goto func_exit; 293e263cd49SDmitry Fleytman } 294e263cd49SDmitry Fleytman 295605d52e6SDmitry Fleytman rc = eth_get_gso_type(l3_proto, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, 296e263cd49SDmitry Fleytman pkt->l4proto); 297e263cd49SDmitry Fleytman 298e263cd49SDmitry Fleytman func_exit: 299e263cd49SDmitry Fleytman return rc; 300e263cd49SDmitry Fleytman } 301e263cd49SDmitry Fleytman 302f9a9eb16SAkihiko Odaki bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, 303e263cd49SDmitry Fleytman bool csum_enable, uint32_t gso_size) 304e263cd49SDmitry Fleytman { 305e263cd49SDmitry Fleytman struct tcp_hdr l4hdr; 306f9a9eb16SAkihiko Odaki size_t bytes_read; 307e263cd49SDmitry Fleytman assert(pkt); 308e263cd49SDmitry Fleytman 309e263cd49SDmitry Fleytman /* csum has to be enabled if tso is. */ 310e263cd49SDmitry Fleytman assert(csum_enable || !tso_enable); 311e263cd49SDmitry Fleytman 312605d52e6SDmitry Fleytman pkt->virt_hdr.gso_type = net_tx_pkt_get_gso_type(pkt, tso_enable); 313e263cd49SDmitry Fleytman 314e263cd49SDmitry Fleytman switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 315e263cd49SDmitry Fleytman case VIRTIO_NET_HDR_GSO_NONE: 316e263cd49SDmitry Fleytman pkt->virt_hdr.hdr_len = 0; 317e263cd49SDmitry Fleytman pkt->virt_hdr.gso_size = 0; 318e263cd49SDmitry Fleytman break; 319e263cd49SDmitry Fleytman 320e263cd49SDmitry Fleytman case VIRTIO_NET_HDR_GSO_UDP: 321eb700029SDmitry Fleytman pkt->virt_hdr.gso_size = gso_size; 322e263cd49SDmitry Fleytman pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header); 323e263cd49SDmitry Fleytman break; 324e263cd49SDmitry Fleytman 325e263cd49SDmitry Fleytman case VIRTIO_NET_HDR_GSO_TCPV4: 326e263cd49SDmitry Fleytman case VIRTIO_NET_HDR_GSO_TCPV6: 327f9a9eb16SAkihiko Odaki bytes_read = iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], 328f9a9eb16SAkihiko Odaki pkt->payload_frags, 0, &l4hdr, sizeof(l4hdr)); 329*02ef5fdcSAkihiko Odaki if (bytes_read < sizeof(l4hdr) || 330*02ef5fdcSAkihiko Odaki l4hdr.th_off * sizeof(uint32_t) < sizeof(l4hdr)) { 331f9a9eb16SAkihiko Odaki return false; 332f9a9eb16SAkihiko Odaki } 333f9a9eb16SAkihiko Odaki 334e263cd49SDmitry Fleytman pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t); 335eb700029SDmitry Fleytman pkt->virt_hdr.gso_size = gso_size; 336e263cd49SDmitry Fleytman break; 337e263cd49SDmitry Fleytman 338e263cd49SDmitry Fleytman default: 339dfc6f865SStefan Weil g_assert_not_reached(); 340e263cd49SDmitry Fleytman } 341e263cd49SDmitry Fleytman 342e263cd49SDmitry Fleytman if (csum_enable) { 343e263cd49SDmitry Fleytman switch (pkt->l4proto) { 344e263cd49SDmitry Fleytman case IP_PROTO_TCP: 345e263cd49SDmitry Fleytman pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 346e263cd49SDmitry Fleytman pkt->virt_hdr.csum_start = pkt->hdr_len; 347e263cd49SDmitry Fleytman pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum); 348e263cd49SDmitry Fleytman break; 349e263cd49SDmitry Fleytman case IP_PROTO_UDP: 350e263cd49SDmitry Fleytman pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 351e263cd49SDmitry Fleytman pkt->virt_hdr.csum_start = pkt->hdr_len; 352e263cd49SDmitry Fleytman pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum); 353e263cd49SDmitry Fleytman break; 354e263cd49SDmitry Fleytman default: 355e263cd49SDmitry Fleytman break; 356e263cd49SDmitry Fleytman } 357e263cd49SDmitry Fleytman } 358f9a9eb16SAkihiko Odaki 359f9a9eb16SAkihiko Odaki return true; 360e263cd49SDmitry Fleytman } 361e263cd49SDmitry Fleytman 362eb700029SDmitry Fleytman void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt, 363eb700029SDmitry Fleytman uint16_t vlan, uint16_t vlan_ethtype) 364e263cd49SDmitry Fleytman { 365e263cd49SDmitry Fleytman bool is_new; 366e263cd49SDmitry Fleytman assert(pkt); 367e263cd49SDmitry Fleytman 368eb700029SDmitry Fleytman eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, 369eb700029SDmitry Fleytman vlan, vlan_ethtype, &is_new); 370e263cd49SDmitry Fleytman 371e263cd49SDmitry Fleytman /* update l2hdrlen */ 372e263cd49SDmitry Fleytman if (is_new) { 373e263cd49SDmitry Fleytman pkt->hdr_len += sizeof(struct vlan_header); 374605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len += 375e263cd49SDmitry Fleytman sizeof(struct vlan_header); 376e263cd49SDmitry Fleytman } 377e263cd49SDmitry Fleytman } 378e263cd49SDmitry Fleytman 379605d52e6SDmitry Fleytman bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, 380e263cd49SDmitry Fleytman size_t len) 381e263cd49SDmitry Fleytman { 382e263cd49SDmitry Fleytman hwaddr mapped_len = 0; 383e263cd49SDmitry Fleytman struct iovec *ventry; 384e263cd49SDmitry Fleytman assert(pkt); 385035e69b0SMauro Matteo Cascella 386035e69b0SMauro Matteo Cascella if (pkt->raw_frags >= pkt->max_raw_frags) { 387035e69b0SMauro Matteo Cascella return false; 388035e69b0SMauro Matteo Cascella } 389e263cd49SDmitry Fleytman 390e263cd49SDmitry Fleytman if (!len) { 391e263cd49SDmitry Fleytman return true; 392e263cd49SDmitry Fleytman } 393e263cd49SDmitry Fleytman 394e263cd49SDmitry Fleytman ventry = &pkt->raw[pkt->raw_frags]; 395e263cd49SDmitry Fleytman mapped_len = len; 396e263cd49SDmitry Fleytman 39711171010SDmitry Fleytman ventry->iov_base = pci_dma_map(pkt->pci_dev, pa, 39811171010SDmitry Fleytman &mapped_len, DMA_DIRECTION_TO_DEVICE); 399e263cd49SDmitry Fleytman 400eb700029SDmitry Fleytman if ((ventry->iov_base != NULL) && (len == mapped_len)) { 401eb700029SDmitry Fleytman ventry->iov_len = mapped_len; 402eb700029SDmitry Fleytman pkt->raw_frags++; 403eb700029SDmitry Fleytman return true; 404eb700029SDmitry Fleytman } else { 405e263cd49SDmitry Fleytman return false; 406e263cd49SDmitry Fleytman } 407eb700029SDmitry Fleytman } 408e263cd49SDmitry Fleytman 409eb700029SDmitry Fleytman bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt) 410eb700029SDmitry Fleytman { 411eb700029SDmitry Fleytman return pkt->raw_frags > 0; 412e263cd49SDmitry Fleytman } 413e263cd49SDmitry Fleytman 414605d52e6SDmitry Fleytman eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt) 415e263cd49SDmitry Fleytman { 416e263cd49SDmitry Fleytman assert(pkt); 417e263cd49SDmitry Fleytman 418e263cd49SDmitry Fleytman return pkt->packet_type; 419e263cd49SDmitry Fleytman } 420e263cd49SDmitry Fleytman 421605d52e6SDmitry Fleytman size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt) 422e263cd49SDmitry Fleytman { 423e263cd49SDmitry Fleytman assert(pkt); 424e263cd49SDmitry Fleytman 425e263cd49SDmitry Fleytman return pkt->hdr_len + pkt->payload_len; 426e263cd49SDmitry Fleytman } 427e263cd49SDmitry Fleytman 428605d52e6SDmitry Fleytman void net_tx_pkt_dump(struct NetTxPkt *pkt) 429e263cd49SDmitry Fleytman { 430605d52e6SDmitry Fleytman #ifdef NET_TX_PKT_DEBUG 431e263cd49SDmitry Fleytman assert(pkt); 432e263cd49SDmitry Fleytman 433e263cd49SDmitry Fleytman printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, " 434e263cd49SDmitry Fleytman "l3hdr_len: %lu, payload_len: %u\n", pkt->hdr_len, pkt->packet_type, 435605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len, 436605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len, pkt->payload_len); 437e263cd49SDmitry Fleytman #endif 438e263cd49SDmitry Fleytman } 439e263cd49SDmitry Fleytman 440605d52e6SDmitry Fleytman void net_tx_pkt_reset(struct NetTxPkt *pkt) 441e263cd49SDmitry Fleytman { 442e263cd49SDmitry Fleytman int i; 443e263cd49SDmitry Fleytman 444e263cd49SDmitry Fleytman /* no assert, as reset can be called before tx_pkt_init */ 445e263cd49SDmitry Fleytman if (!pkt) { 446e263cd49SDmitry Fleytman return; 447e263cd49SDmitry Fleytman } 448e263cd49SDmitry Fleytman 449e263cd49SDmitry Fleytman memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr)); 450e263cd49SDmitry Fleytman 451e263cd49SDmitry Fleytman assert(pkt->vec); 452eb700029SDmitry Fleytman 453e263cd49SDmitry Fleytman pkt->payload_len = 0; 454e263cd49SDmitry Fleytman pkt->payload_frags = 0; 455e263cd49SDmitry Fleytman 456283f0a05SThomas Huth if (pkt->max_raw_frags > 0) { 457e263cd49SDmitry Fleytman assert(pkt->raw); 458e263cd49SDmitry Fleytman for (i = 0; i < pkt->raw_frags; i++) { 459e263cd49SDmitry Fleytman assert(pkt->raw[i].iov_base); 460283f0a05SThomas Huth pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base, 461283f0a05SThomas Huth pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0); 462283f0a05SThomas Huth } 463e263cd49SDmitry Fleytman } 464e263cd49SDmitry Fleytman pkt->raw_frags = 0; 465e263cd49SDmitry Fleytman 466e263cd49SDmitry Fleytman pkt->hdr_len = 0; 467e263cd49SDmitry Fleytman pkt->l4proto = 0; 468e263cd49SDmitry Fleytman } 469e263cd49SDmitry Fleytman 470*02ef5fdcSAkihiko Odaki static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt, 471*02ef5fdcSAkihiko Odaki struct iovec *iov, uint32_t iov_len, 472*02ef5fdcSAkihiko Odaki uint16_t csl) 473e263cd49SDmitry Fleytman { 474e263cd49SDmitry Fleytman uint32_t csum_cntr; 475e263cd49SDmitry Fleytman uint16_t csum = 0; 476eb700029SDmitry Fleytman uint32_t cso; 477e263cd49SDmitry Fleytman /* num of iovec without vhdr */ 478e263cd49SDmitry Fleytman size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset; 4799a8d9492SAndrew uint16_t l3_proto = eth_get_l3_proto(iov, 1, iov->iov_len); 480e263cd49SDmitry Fleytman 481e263cd49SDmitry Fleytman /* Put zero to checksum field */ 482e263cd49SDmitry Fleytman iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); 483e263cd49SDmitry Fleytman 484e263cd49SDmitry Fleytman /* Calculate L4 TCP/UDP checksum */ 4859a8d9492SAndrew csum_cntr = 0; 4869a8d9492SAndrew cso = 0; 487e263cd49SDmitry Fleytman /* add pseudo header to csum */ 4889a8d9492SAndrew if (l3_proto == ETH_P_IP) { 4899a8d9492SAndrew csum_cntr = eth_calc_ip4_pseudo_hdr_csum( 4909a8d9492SAndrew pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, 4919a8d9492SAndrew csl, &cso); 4929a8d9492SAndrew } else if (l3_proto == ETH_P_IPV6) { 4939a8d9492SAndrew csum_cntr = eth_calc_ip6_pseudo_hdr_csum( 4949a8d9492SAndrew pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, 4959a8d9492SAndrew csl, pkt->l4proto, &cso); 4969a8d9492SAndrew } 497eb700029SDmitry Fleytman 498eb700029SDmitry Fleytman /* data checksum */ 499eb700029SDmitry Fleytman csum_cntr += 500eb700029SDmitry Fleytman net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl, cso); 501e263cd49SDmitry Fleytman 502e263cd49SDmitry Fleytman /* Put the checksum obtained into the packet */ 5030dacea92SEd Swierk csum = cpu_to_be16(net_checksum_finish_nozero(csum_cntr)); 504e263cd49SDmitry Fleytman iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); 505e263cd49SDmitry Fleytman } 506e263cd49SDmitry Fleytman 507605d52e6SDmitry Fleytman #define NET_MAX_FRAG_SG_LIST (64) 508e263cd49SDmitry Fleytman 509605d52e6SDmitry Fleytman static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt, 510*02ef5fdcSAkihiko Odaki int *src_idx, size_t *src_offset, size_t src_len, 511*02ef5fdcSAkihiko Odaki struct iovec *dst, int *dst_idx) 512e263cd49SDmitry Fleytman { 513e263cd49SDmitry Fleytman size_t fetched = 0; 514e263cd49SDmitry Fleytman struct iovec *src = pkt->vec; 515e263cd49SDmitry Fleytman 516*02ef5fdcSAkihiko Odaki while (fetched < src_len) { 517e263cd49SDmitry Fleytman 518e263cd49SDmitry Fleytman /* no more place in fragment iov */ 519605d52e6SDmitry Fleytman if (*dst_idx == NET_MAX_FRAG_SG_LIST) { 520e263cd49SDmitry Fleytman break; 521e263cd49SDmitry Fleytman } 522e263cd49SDmitry Fleytman 523e263cd49SDmitry Fleytman /* no more data in iovec */ 524605d52e6SDmitry Fleytman if (*src_idx == (pkt->payload_frags + NET_TX_PKT_PL_START_FRAG)) { 525e263cd49SDmitry Fleytman break; 526e263cd49SDmitry Fleytman } 527e263cd49SDmitry Fleytman 528e263cd49SDmitry Fleytman 529e263cd49SDmitry Fleytman dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset; 530e263cd49SDmitry Fleytman dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset, 531*02ef5fdcSAkihiko Odaki src_len - fetched); 532e263cd49SDmitry Fleytman 533e263cd49SDmitry Fleytman *src_offset += dst[*dst_idx].iov_len; 534e263cd49SDmitry Fleytman fetched += dst[*dst_idx].iov_len; 535e263cd49SDmitry Fleytman 536e263cd49SDmitry Fleytman if (*src_offset == src[*src_idx].iov_len) { 537e263cd49SDmitry Fleytman *src_offset = 0; 538e263cd49SDmitry Fleytman (*src_idx)++; 539e263cd49SDmitry Fleytman } 540e263cd49SDmitry Fleytman 541e263cd49SDmitry Fleytman (*dst_idx)++; 542e263cd49SDmitry Fleytman } 543e263cd49SDmitry Fleytman 544e263cd49SDmitry Fleytman return fetched; 545e263cd49SDmitry Fleytman } 546e263cd49SDmitry Fleytman 547ffbd2dbdSAkihiko Odaki static void net_tx_pkt_sendv( 548ffbd2dbdSAkihiko Odaki void *opaque, const struct iovec *iov, int iov_cnt, 549ffbd2dbdSAkihiko Odaki const struct iovec *virt_iov, int virt_iov_cnt) 550eb700029SDmitry Fleytman { 551ffbd2dbdSAkihiko Odaki NetClientState *nc = opaque; 552ffbd2dbdSAkihiko Odaki 553ffbd2dbdSAkihiko Odaki if (qemu_get_using_vnet_hdr(nc->peer)) { 554ffbd2dbdSAkihiko Odaki qemu_sendv_packet(nc, virt_iov, virt_iov_cnt); 555eb700029SDmitry Fleytman } else { 556eb700029SDmitry Fleytman qemu_sendv_packet(nc, iov, iov_cnt); 557eb700029SDmitry Fleytman } 558eb700029SDmitry Fleytman } 559eb700029SDmitry Fleytman 560*02ef5fdcSAkihiko Odaki static bool net_tx_pkt_tcp_fragment_init(struct NetTxPkt *pkt, 561*02ef5fdcSAkihiko Odaki struct iovec *fragment, 562*02ef5fdcSAkihiko Odaki int *pl_idx, 563*02ef5fdcSAkihiko Odaki size_t *l4hdr_len, 564*02ef5fdcSAkihiko Odaki int *src_idx, 565*02ef5fdcSAkihiko Odaki size_t *src_offset, 566*02ef5fdcSAkihiko Odaki size_t *src_len) 567*02ef5fdcSAkihiko Odaki { 568*02ef5fdcSAkihiko Odaki struct iovec *l4 = fragment + NET_TX_PKT_PL_START_FRAG; 569*02ef5fdcSAkihiko Odaki size_t bytes_read = 0; 570*02ef5fdcSAkihiko Odaki struct tcp_hdr *th; 571*02ef5fdcSAkihiko Odaki 572*02ef5fdcSAkihiko Odaki if (!pkt->payload_frags) { 573*02ef5fdcSAkihiko Odaki return false; 574*02ef5fdcSAkihiko Odaki } 575*02ef5fdcSAkihiko Odaki 576*02ef5fdcSAkihiko Odaki l4->iov_len = pkt->virt_hdr.hdr_len - pkt->hdr_len; 577*02ef5fdcSAkihiko Odaki l4->iov_base = g_malloc(l4->iov_len); 578*02ef5fdcSAkihiko Odaki 579*02ef5fdcSAkihiko Odaki *src_idx = NET_TX_PKT_PL_START_FRAG; 580*02ef5fdcSAkihiko Odaki while (pkt->vec[*src_idx].iov_len < l4->iov_len - bytes_read) { 581*02ef5fdcSAkihiko Odaki memcpy((char *)l4->iov_base + bytes_read, pkt->vec[*src_idx].iov_base, 582*02ef5fdcSAkihiko Odaki pkt->vec[*src_idx].iov_len); 583*02ef5fdcSAkihiko Odaki 584*02ef5fdcSAkihiko Odaki bytes_read += pkt->vec[*src_idx].iov_len; 585*02ef5fdcSAkihiko Odaki 586*02ef5fdcSAkihiko Odaki (*src_idx)++; 587*02ef5fdcSAkihiko Odaki if (*src_idx >= pkt->payload_frags + NET_TX_PKT_PL_START_FRAG) { 588*02ef5fdcSAkihiko Odaki g_free(l4->iov_base); 589*02ef5fdcSAkihiko Odaki return false; 590*02ef5fdcSAkihiko Odaki } 591*02ef5fdcSAkihiko Odaki } 592*02ef5fdcSAkihiko Odaki 593*02ef5fdcSAkihiko Odaki *src_offset = l4->iov_len - bytes_read; 594*02ef5fdcSAkihiko Odaki memcpy((char *)l4->iov_base + bytes_read, pkt->vec[*src_idx].iov_base, 595*02ef5fdcSAkihiko Odaki *src_offset); 596*02ef5fdcSAkihiko Odaki 597*02ef5fdcSAkihiko Odaki th = l4->iov_base; 598*02ef5fdcSAkihiko Odaki th->th_flags &= ~(TH_FIN | TH_PUSH); 599*02ef5fdcSAkihiko Odaki 600*02ef5fdcSAkihiko Odaki *pl_idx = NET_TX_PKT_PL_START_FRAG + 1; 601*02ef5fdcSAkihiko Odaki *l4hdr_len = l4->iov_len; 602*02ef5fdcSAkihiko Odaki *src_len = pkt->virt_hdr.gso_size; 603*02ef5fdcSAkihiko Odaki 604*02ef5fdcSAkihiko Odaki return true; 605*02ef5fdcSAkihiko Odaki } 606*02ef5fdcSAkihiko Odaki 607*02ef5fdcSAkihiko Odaki static void net_tx_pkt_tcp_fragment_deinit(struct iovec *fragment) 608*02ef5fdcSAkihiko Odaki { 609*02ef5fdcSAkihiko Odaki g_free(fragment[NET_TX_PKT_PL_START_FRAG].iov_base); 610*02ef5fdcSAkihiko Odaki } 611*02ef5fdcSAkihiko Odaki 612*02ef5fdcSAkihiko Odaki static void net_tx_pkt_tcp_fragment_fix(struct NetTxPkt *pkt, 613*02ef5fdcSAkihiko Odaki struct iovec *fragment, 614*02ef5fdcSAkihiko Odaki size_t fragment_len, 615*02ef5fdcSAkihiko Odaki uint8_t gso_type) 616*02ef5fdcSAkihiko Odaki { 617*02ef5fdcSAkihiko Odaki struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG; 618*02ef5fdcSAkihiko Odaki struct iovec *l4hdr = fragment + NET_TX_PKT_PL_START_FRAG; 619*02ef5fdcSAkihiko Odaki struct ip_header *ip = l3hdr->iov_base; 620*02ef5fdcSAkihiko Odaki struct ip6_header *ip6 = l3hdr->iov_base; 621*02ef5fdcSAkihiko Odaki size_t len = l3hdr->iov_len + l4hdr->iov_len + fragment_len; 622*02ef5fdcSAkihiko Odaki 623*02ef5fdcSAkihiko Odaki switch (gso_type) { 624*02ef5fdcSAkihiko Odaki case VIRTIO_NET_HDR_GSO_TCPV4: 625*02ef5fdcSAkihiko Odaki ip->ip_len = cpu_to_be16(len); 626*02ef5fdcSAkihiko Odaki eth_fix_ip4_checksum(l3hdr->iov_base, l3hdr->iov_len); 627*02ef5fdcSAkihiko Odaki break; 628*02ef5fdcSAkihiko Odaki 629*02ef5fdcSAkihiko Odaki case VIRTIO_NET_HDR_GSO_TCPV6: 630*02ef5fdcSAkihiko Odaki len -= sizeof(struct ip6_header); 631*02ef5fdcSAkihiko Odaki ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = cpu_to_be16(len); 632*02ef5fdcSAkihiko Odaki break; 633*02ef5fdcSAkihiko Odaki } 634*02ef5fdcSAkihiko Odaki } 635*02ef5fdcSAkihiko Odaki 636*02ef5fdcSAkihiko Odaki static void net_tx_pkt_tcp_fragment_advance(struct NetTxPkt *pkt, 637*02ef5fdcSAkihiko Odaki struct iovec *fragment, 638*02ef5fdcSAkihiko Odaki size_t fragment_len, 639*02ef5fdcSAkihiko Odaki uint8_t gso_type) 640*02ef5fdcSAkihiko Odaki { 641*02ef5fdcSAkihiko Odaki struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG; 642*02ef5fdcSAkihiko Odaki struct iovec *l4hdr = fragment + NET_TX_PKT_PL_START_FRAG; 643*02ef5fdcSAkihiko Odaki struct ip_header *ip = l3hdr->iov_base; 644*02ef5fdcSAkihiko Odaki struct tcp_hdr *th = l4hdr->iov_base; 645*02ef5fdcSAkihiko Odaki 646*02ef5fdcSAkihiko Odaki if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4) { 647*02ef5fdcSAkihiko Odaki ip->ip_id = cpu_to_be16(be16_to_cpu(ip->ip_id) + 1); 648*02ef5fdcSAkihiko Odaki } 649*02ef5fdcSAkihiko Odaki 650*02ef5fdcSAkihiko Odaki th->th_seq = cpu_to_be32(be32_to_cpu(th->th_seq) + fragment_len); 651*02ef5fdcSAkihiko Odaki th->th_flags &= ~TH_CWR; 652*02ef5fdcSAkihiko Odaki } 653*02ef5fdcSAkihiko Odaki 654*02ef5fdcSAkihiko Odaki static void net_tx_pkt_udp_fragment_init(struct NetTxPkt *pkt, 655*02ef5fdcSAkihiko Odaki int *pl_idx, 656*02ef5fdcSAkihiko Odaki size_t *l4hdr_len, 657*02ef5fdcSAkihiko Odaki int *src_idx, size_t *src_offset, 658*02ef5fdcSAkihiko Odaki size_t *src_len) 659*02ef5fdcSAkihiko Odaki { 660*02ef5fdcSAkihiko Odaki *pl_idx = NET_TX_PKT_PL_START_FRAG; 661*02ef5fdcSAkihiko Odaki *l4hdr_len = 0; 662*02ef5fdcSAkihiko Odaki *src_idx = NET_TX_PKT_PL_START_FRAG; 663*02ef5fdcSAkihiko Odaki *src_offset = 0; 664*02ef5fdcSAkihiko Odaki *src_len = IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size); 665*02ef5fdcSAkihiko Odaki } 666*02ef5fdcSAkihiko Odaki 667*02ef5fdcSAkihiko Odaki static void net_tx_pkt_udp_fragment_fix(struct NetTxPkt *pkt, 668*02ef5fdcSAkihiko Odaki struct iovec *fragment, 669*02ef5fdcSAkihiko Odaki size_t fragment_offset, 670*02ef5fdcSAkihiko Odaki size_t fragment_len) 671*02ef5fdcSAkihiko Odaki { 672*02ef5fdcSAkihiko Odaki bool more_frags = fragment_offset + fragment_len < pkt->payload_len; 673*02ef5fdcSAkihiko Odaki uint16_t orig_flags; 674*02ef5fdcSAkihiko Odaki struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG; 675*02ef5fdcSAkihiko Odaki struct ip_header *ip = l3hdr->iov_base; 676*02ef5fdcSAkihiko Odaki uint16_t frag_off_units = fragment_offset / IP_FRAG_UNIT_SIZE; 677*02ef5fdcSAkihiko Odaki uint16_t new_ip_off; 678*02ef5fdcSAkihiko Odaki 679*02ef5fdcSAkihiko Odaki assert(fragment_offset % IP_FRAG_UNIT_SIZE == 0); 680*02ef5fdcSAkihiko Odaki assert((frag_off_units & ~IP_OFFMASK) == 0); 681*02ef5fdcSAkihiko Odaki 682*02ef5fdcSAkihiko Odaki orig_flags = be16_to_cpu(ip->ip_off) & ~(IP_OFFMASK | IP_MF); 683*02ef5fdcSAkihiko Odaki new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0); 684*02ef5fdcSAkihiko Odaki ip->ip_off = cpu_to_be16(new_ip_off); 685*02ef5fdcSAkihiko Odaki ip->ip_len = cpu_to_be16(l3hdr->iov_len + fragment_len); 686*02ef5fdcSAkihiko Odaki 687*02ef5fdcSAkihiko Odaki eth_fix_ip4_checksum(l3hdr->iov_base, l3hdr->iov_len); 688*02ef5fdcSAkihiko Odaki } 689*02ef5fdcSAkihiko Odaki 690605d52e6SDmitry Fleytman static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, 691ffbd2dbdSAkihiko Odaki NetTxPktCallback callback, 692ffbd2dbdSAkihiko Odaki void *context) 693e263cd49SDmitry Fleytman { 694*02ef5fdcSAkihiko Odaki uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; 695e263cd49SDmitry Fleytman 696*02ef5fdcSAkihiko Odaki struct iovec fragment[NET_MAX_FRAG_SG_LIST]; 697*02ef5fdcSAkihiko Odaki size_t fragment_len; 698*02ef5fdcSAkihiko Odaki size_t l4hdr_len; 699*02ef5fdcSAkihiko Odaki size_t src_len; 700*02ef5fdcSAkihiko Odaki 701*02ef5fdcSAkihiko Odaki int src_idx, dst_idx, pl_idx; 702*02ef5fdcSAkihiko Odaki size_t src_offset; 703e263cd49SDmitry Fleytman size_t fragment_offset = 0; 704ffbd2dbdSAkihiko Odaki struct virtio_net_hdr virt_hdr = { 705ffbd2dbdSAkihiko Odaki .flags = pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM ? 706ffbd2dbdSAkihiko Odaki VIRTIO_NET_HDR_F_DATA_VALID : 0 707ffbd2dbdSAkihiko Odaki }; 708e263cd49SDmitry Fleytman 709e263cd49SDmitry Fleytman /* Copy headers */ 710ffbd2dbdSAkihiko Odaki fragment[NET_TX_PKT_VHDR_FRAG].iov_base = &virt_hdr; 711ffbd2dbdSAkihiko Odaki fragment[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof(virt_hdr); 712*02ef5fdcSAkihiko Odaki fragment[NET_TX_PKT_L2HDR_FRAG] = pkt->vec[NET_TX_PKT_L2HDR_FRAG]; 713*02ef5fdcSAkihiko Odaki fragment[NET_TX_PKT_L3HDR_FRAG] = pkt->vec[NET_TX_PKT_L3HDR_FRAG]; 714e263cd49SDmitry Fleytman 715*02ef5fdcSAkihiko Odaki switch (gso_type) { 716*02ef5fdcSAkihiko Odaki case VIRTIO_NET_HDR_GSO_TCPV4: 717*02ef5fdcSAkihiko Odaki case VIRTIO_NET_HDR_GSO_TCPV6: 718*02ef5fdcSAkihiko Odaki if (!net_tx_pkt_tcp_fragment_init(pkt, fragment, &pl_idx, &l4hdr_len, 719*02ef5fdcSAkihiko Odaki &src_idx, &src_offset, &src_len)) { 720*02ef5fdcSAkihiko Odaki return false; 721*02ef5fdcSAkihiko Odaki } 722*02ef5fdcSAkihiko Odaki break; 723*02ef5fdcSAkihiko Odaki 724*02ef5fdcSAkihiko Odaki case VIRTIO_NET_HDR_GSO_UDP: 725*02ef5fdcSAkihiko Odaki net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG], 726*02ef5fdcSAkihiko Odaki pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1, 727*02ef5fdcSAkihiko Odaki pkt->payload_len); 728*02ef5fdcSAkihiko Odaki net_tx_pkt_udp_fragment_init(pkt, &pl_idx, &l4hdr_len, 729*02ef5fdcSAkihiko Odaki &src_idx, &src_offset, &src_len); 730*02ef5fdcSAkihiko Odaki break; 731*02ef5fdcSAkihiko Odaki 732*02ef5fdcSAkihiko Odaki default: 733*02ef5fdcSAkihiko Odaki abort(); 734*02ef5fdcSAkihiko Odaki } 735e263cd49SDmitry Fleytman 736e263cd49SDmitry Fleytman /* Put as much data as possible and send */ 737*02ef5fdcSAkihiko Odaki while (true) { 738*02ef5fdcSAkihiko Odaki dst_idx = pl_idx; 739*02ef5fdcSAkihiko Odaki fragment_len = net_tx_pkt_fetch_fragment(pkt, 740*02ef5fdcSAkihiko Odaki &src_idx, &src_offset, src_len, fragment, &dst_idx); 741*02ef5fdcSAkihiko Odaki if (!fragment_len) { 742*02ef5fdcSAkihiko Odaki break; 743*02ef5fdcSAkihiko Odaki } 744e263cd49SDmitry Fleytman 745*02ef5fdcSAkihiko Odaki switch (gso_type) { 746*02ef5fdcSAkihiko Odaki case VIRTIO_NET_HDR_GSO_TCPV4: 747*02ef5fdcSAkihiko Odaki case VIRTIO_NET_HDR_GSO_TCPV6: 748*02ef5fdcSAkihiko Odaki net_tx_pkt_tcp_fragment_fix(pkt, fragment, fragment_len, gso_type); 749*02ef5fdcSAkihiko Odaki net_tx_pkt_do_sw_csum(pkt, fragment + NET_TX_PKT_L2HDR_FRAG, 750*02ef5fdcSAkihiko Odaki dst_idx - NET_TX_PKT_L2HDR_FRAG, 751*02ef5fdcSAkihiko Odaki l4hdr_len + fragment_len); 752*02ef5fdcSAkihiko Odaki break; 753e263cd49SDmitry Fleytman 754*02ef5fdcSAkihiko Odaki case VIRTIO_NET_HDR_GSO_UDP: 755*02ef5fdcSAkihiko Odaki net_tx_pkt_udp_fragment_fix(pkt, fragment, fragment_offset, 756*02ef5fdcSAkihiko Odaki fragment_len); 757*02ef5fdcSAkihiko Odaki break; 758*02ef5fdcSAkihiko Odaki } 759e263cd49SDmitry Fleytman 760ffbd2dbdSAkihiko Odaki callback(context, 761ffbd2dbdSAkihiko Odaki fragment + NET_TX_PKT_L2HDR_FRAG, dst_idx - NET_TX_PKT_L2HDR_FRAG, 762ffbd2dbdSAkihiko Odaki fragment + NET_TX_PKT_VHDR_FRAG, dst_idx - NET_TX_PKT_VHDR_FRAG); 763e263cd49SDmitry Fleytman 764*02ef5fdcSAkihiko Odaki if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 || 765*02ef5fdcSAkihiko Odaki gso_type == VIRTIO_NET_HDR_GSO_TCPV6) { 766*02ef5fdcSAkihiko Odaki net_tx_pkt_tcp_fragment_advance(pkt, fragment, fragment_len, 767*02ef5fdcSAkihiko Odaki gso_type); 768*02ef5fdcSAkihiko Odaki } 769e263cd49SDmitry Fleytman 770*02ef5fdcSAkihiko Odaki fragment_offset += fragment_len; 771*02ef5fdcSAkihiko Odaki } 772*02ef5fdcSAkihiko Odaki 773*02ef5fdcSAkihiko Odaki if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 || 774*02ef5fdcSAkihiko Odaki gso_type == VIRTIO_NET_HDR_GSO_TCPV6) { 775*02ef5fdcSAkihiko Odaki net_tx_pkt_tcp_fragment_deinit(fragment); 776*02ef5fdcSAkihiko Odaki } 777e263cd49SDmitry Fleytman 778e263cd49SDmitry Fleytman return true; 779e263cd49SDmitry Fleytman } 780e263cd49SDmitry Fleytman 781605d52e6SDmitry Fleytman bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc) 782e263cd49SDmitry Fleytman { 783ffbd2dbdSAkihiko Odaki bool offload = qemu_get_using_vnet_hdr(nc->peer); 784ffbd2dbdSAkihiko Odaki return net_tx_pkt_send_custom(pkt, offload, net_tx_pkt_sendv, nc); 785ffbd2dbdSAkihiko Odaki } 78655daf493SAkihiko Odaki 787ffbd2dbdSAkihiko Odaki bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, 788ffbd2dbdSAkihiko Odaki NetTxPktCallback callback, void *context) 789ffbd2dbdSAkihiko Odaki { 790e263cd49SDmitry Fleytman assert(pkt); 791e263cd49SDmitry Fleytman 792e263cd49SDmitry Fleytman /* 793e263cd49SDmitry Fleytman * Since underlying infrastructure does not support IP datagrams longer 794e263cd49SDmitry Fleytman * than 64K we should drop such packets and don't even try to send 795e263cd49SDmitry Fleytman */ 796e263cd49SDmitry Fleytman if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) { 797e263cd49SDmitry Fleytman if (pkt->payload_len > 798e263cd49SDmitry Fleytman ETH_MAX_IP_DGRAM_LEN - 799605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) { 800e263cd49SDmitry Fleytman return false; 801e263cd49SDmitry Fleytman } 802e263cd49SDmitry Fleytman } 803e263cd49SDmitry Fleytman 804ffbd2dbdSAkihiko Odaki if (offload || pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) { 805*02ef5fdcSAkihiko Odaki if (!offload && pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 806*02ef5fdcSAkihiko Odaki net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG], 807*02ef5fdcSAkihiko Odaki pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1, 808*02ef5fdcSAkihiko Odaki pkt->payload_len); 809*02ef5fdcSAkihiko Odaki } 810*02ef5fdcSAkihiko Odaki 811e219d309SAndrew net_tx_pkt_fix_ip6_payload_len(pkt); 812ffbd2dbdSAkihiko Odaki callback(context, pkt->vec + NET_TX_PKT_L2HDR_FRAG, 813ffbd2dbdSAkihiko Odaki pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_L2HDR_FRAG, 814ffbd2dbdSAkihiko Odaki pkt->vec + NET_TX_PKT_VHDR_FRAG, 815ffbd2dbdSAkihiko Odaki pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_VHDR_FRAG); 816e263cd49SDmitry Fleytman return true; 817e263cd49SDmitry Fleytman } 818e263cd49SDmitry Fleytman 819ffbd2dbdSAkihiko Odaki return net_tx_pkt_do_sw_fragmentation(pkt, callback, context); 820eb700029SDmitry Fleytman } 821e219d309SAndrew 822e219d309SAndrew void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt) 823e219d309SAndrew { 824e219d309SAndrew struct iovec *l2 = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; 825e219d309SAndrew if (eth_get_l3_proto(l2, 1, l2->iov_len) == ETH_P_IPV6) { 826e219d309SAndrew struct ip6_header *ip6 = (struct ip6_header *) pkt->l3_hdr; 827e219d309SAndrew /* 828e219d309SAndrew * TODO: if qemu would support >64K packets - add jumbo option check 829e219d309SAndrew * something like that: 830e219d309SAndrew * 'if (ip6->ip6_plen == 0 && !has_jumbo_option(ip6)) {' 831e219d309SAndrew */ 832e219d309SAndrew if (ip6->ip6_plen == 0) { 833e219d309SAndrew if (pkt->payload_len <= ETH_MAX_IP_DGRAM_LEN) { 834e219d309SAndrew ip6->ip6_plen = htons(pkt->payload_len); 835e219d309SAndrew } 836e219d309SAndrew /* 837e219d309SAndrew * TODO: if qemu would support >64K packets 838e219d309SAndrew * add jumbo option for packets greater then 65,535 bytes 839e219d309SAndrew */ 840e219d309SAndrew } 841e219d309SAndrew } 842e219d309SAndrew } 843