1e263cd49SDmitry Fleytman /* 2605d52e6SDmitry Fleytman * QEMU TX packets abstractions 3e263cd49SDmitry Fleytman * 4e263cd49SDmitry Fleytman * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) 5e263cd49SDmitry Fleytman * 6e263cd49SDmitry Fleytman * Developed by Daynix Computing LTD (http://www.daynix.com) 7e263cd49SDmitry Fleytman * 8e263cd49SDmitry Fleytman * Authors: 9e263cd49SDmitry Fleytman * Dmitry Fleytman <dmitry@daynix.com> 10e263cd49SDmitry Fleytman * Tamir Shomer <tamirs@daynix.com> 11e263cd49SDmitry Fleytman * Yan Vugenfirer <yan@daynix.com> 12e263cd49SDmitry Fleytman * 13e263cd49SDmitry Fleytman * This work is licensed under the terms of the GNU GPL, version 2 or later. 14e263cd49SDmitry Fleytman * See the COPYING file in the top-level directory. 15e263cd49SDmitry Fleytman * 16e263cd49SDmitry Fleytman */ 17e263cd49SDmitry Fleytman 18e9abfcb5SPaolo Bonzini #include "qemu/osdep.h" 19605d52e6SDmitry Fleytman #include "net_tx_pkt.h" 20e263cd49SDmitry Fleytman #include "net/eth.h" 21e263cd49SDmitry Fleytman #include "net/checksum.h" 22e263cd49SDmitry Fleytman #include "net/tap.h" 23e263cd49SDmitry Fleytman #include "net/net.h" 2411171010SDmitry Fleytman #include "hw/pci/pci.h" 25e263cd49SDmitry Fleytman 26e263cd49SDmitry Fleytman enum { 27605d52e6SDmitry Fleytman NET_TX_PKT_VHDR_FRAG = 0, 28605d52e6SDmitry Fleytman NET_TX_PKT_L2HDR_FRAG, 29605d52e6SDmitry Fleytman NET_TX_PKT_L3HDR_FRAG, 30605d52e6SDmitry Fleytman NET_TX_PKT_PL_START_FRAG 31e263cd49SDmitry Fleytman }; 32e263cd49SDmitry Fleytman 33e263cd49SDmitry Fleytman /* TX packet private context */ 34605d52e6SDmitry Fleytman struct NetTxPkt { 3511171010SDmitry Fleytman PCIDevice *pci_dev; 3611171010SDmitry Fleytman 37e263cd49SDmitry Fleytman struct virtio_net_hdr virt_hdr; 38e263cd49SDmitry Fleytman bool has_virt_hdr; 39e263cd49SDmitry Fleytman 40e263cd49SDmitry Fleytman struct iovec *raw; 41e263cd49SDmitry Fleytman uint32_t raw_frags; 42e263cd49SDmitry Fleytman uint32_t max_raw_frags; 43e263cd49SDmitry Fleytman 44e263cd49SDmitry Fleytman struct iovec *vec; 45e263cd49SDmitry Fleytman 46e263cd49SDmitry Fleytman uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN]; 47eb700029SDmitry Fleytman uint8_t l3_hdr[ETH_MAX_IP_DGRAM_LEN]; 48e263cd49SDmitry Fleytman 49e263cd49SDmitry Fleytman uint32_t payload_len; 50e263cd49SDmitry Fleytman 51e263cd49SDmitry Fleytman uint32_t payload_frags; 52e263cd49SDmitry Fleytman uint32_t max_payload_frags; 53e263cd49SDmitry Fleytman 54e263cd49SDmitry Fleytman uint16_t hdr_len; 55e263cd49SDmitry Fleytman eth_pkt_types_e packet_type; 56e263cd49SDmitry Fleytman uint8_t l4proto; 57eb700029SDmitry Fleytman 58eb700029SDmitry Fleytman bool is_loopback; 59e263cd49SDmitry Fleytman }; 60e263cd49SDmitry Fleytman 6111171010SDmitry Fleytman void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, 6211171010SDmitry Fleytman uint32_t max_frags, bool has_virt_hdr) 63e263cd49SDmitry Fleytman { 64605d52e6SDmitry Fleytman struct NetTxPkt *p = g_malloc0(sizeof *p); 65e263cd49SDmitry Fleytman 6611171010SDmitry Fleytman p->pci_dev = pci_dev; 6711171010SDmitry Fleytman 6847882fa4SLi Qiang p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG); 69e263cd49SDmitry Fleytman 7047882fa4SLi Qiang p->raw = g_new(struct iovec, max_frags); 71e263cd49SDmitry Fleytman 72e263cd49SDmitry Fleytman p->max_payload_frags = max_frags; 73e263cd49SDmitry Fleytman p->max_raw_frags = max_frags; 74e263cd49SDmitry Fleytman p->has_virt_hdr = has_virt_hdr; 75605d52e6SDmitry Fleytman p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr; 76605d52e6SDmitry Fleytman p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = 77e263cd49SDmitry Fleytman p->has_virt_hdr ? sizeof p->virt_hdr : 0; 78605d52e6SDmitry Fleytman p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr; 79eb700029SDmitry Fleytman p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr; 80e263cd49SDmitry Fleytman 81e263cd49SDmitry Fleytman *pkt = p; 82e263cd49SDmitry Fleytman } 83e263cd49SDmitry Fleytman 84605d52e6SDmitry Fleytman void net_tx_pkt_uninit(struct NetTxPkt *pkt) 85e263cd49SDmitry Fleytman { 86e263cd49SDmitry Fleytman if (pkt) { 87e263cd49SDmitry Fleytman g_free(pkt->vec); 88e263cd49SDmitry Fleytman g_free(pkt->raw); 89e263cd49SDmitry Fleytman g_free(pkt); 90e263cd49SDmitry Fleytman } 91e263cd49SDmitry Fleytman } 92e263cd49SDmitry Fleytman 93eb700029SDmitry Fleytman void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt) 94eb700029SDmitry Fleytman { 95eb700029SDmitry Fleytman uint16_t csum; 96eb700029SDmitry Fleytman assert(pkt); 97eb700029SDmitry Fleytman struct ip_header *ip_hdr; 98eb700029SDmitry Fleytman ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 99eb700029SDmitry Fleytman 100eb700029SDmitry Fleytman ip_hdr->ip_len = cpu_to_be16(pkt->payload_len + 101eb700029SDmitry Fleytman pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); 102eb700029SDmitry Fleytman 103eb700029SDmitry Fleytman ip_hdr->ip_sum = 0; 104eb700029SDmitry Fleytman csum = net_raw_checksum((uint8_t *)ip_hdr, 105eb700029SDmitry Fleytman pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); 106eb700029SDmitry Fleytman ip_hdr->ip_sum = cpu_to_be16(csum); 107eb700029SDmitry Fleytman } 108eb700029SDmitry Fleytman 109605d52e6SDmitry Fleytman void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) 110e263cd49SDmitry Fleytman { 111e263cd49SDmitry Fleytman uint16_t csum; 112eb700029SDmitry Fleytman uint32_t cntr, cso; 113e263cd49SDmitry Fleytman assert(pkt); 114e263cd49SDmitry Fleytman uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; 115eb700029SDmitry Fleytman void *ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 116e263cd49SDmitry Fleytman 117605d52e6SDmitry Fleytman if (pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len > 118e263cd49SDmitry Fleytman ETH_MAX_IP_DGRAM_LEN) { 119e263cd49SDmitry Fleytman return; 120e263cd49SDmitry Fleytman } 121e263cd49SDmitry Fleytman 122eb700029SDmitry Fleytman if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 || 123eb700029SDmitry Fleytman gso_type == VIRTIO_NET_HDR_GSO_UDP) { 124e263cd49SDmitry Fleytman /* Calculate IP header checksum */ 125eb700029SDmitry Fleytman net_tx_pkt_update_ip_hdr_checksum(pkt); 126e263cd49SDmitry Fleytman 127e263cd49SDmitry Fleytman /* Calculate IP pseudo header checksum */ 128eb700029SDmitry Fleytman cntr = eth_calc_ip4_pseudo_hdr_csum(ip_hdr, pkt->payload_len, &cso); 129eb700029SDmitry Fleytman csum = cpu_to_be16(~net_checksum_finish(cntr)); 130eb700029SDmitry Fleytman } else if (gso_type == VIRTIO_NET_HDR_GSO_TCPV6) { 131eb700029SDmitry Fleytman /* Calculate IP pseudo header checksum */ 132eb700029SDmitry Fleytman cntr = eth_calc_ip6_pseudo_hdr_csum(ip_hdr, pkt->payload_len, 133eb700029SDmitry Fleytman IP_PROTO_TCP, &cso); 134eb700029SDmitry Fleytman csum = cpu_to_be16(~net_checksum_finish(cntr)); 135eb700029SDmitry Fleytman } else { 136eb700029SDmitry Fleytman return; 137eb700029SDmitry Fleytman } 138eb700029SDmitry Fleytman 139605d52e6SDmitry Fleytman iov_from_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, 140e263cd49SDmitry Fleytman pkt->virt_hdr.csum_offset, &csum, sizeof(csum)); 141e263cd49SDmitry Fleytman } 142e263cd49SDmitry Fleytman 143605d52e6SDmitry Fleytman static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt) 144e263cd49SDmitry Fleytman { 145605d52e6SDmitry Fleytman pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len + 146605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; 147e263cd49SDmitry Fleytman } 148e263cd49SDmitry Fleytman 149605d52e6SDmitry Fleytman static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt) 150e263cd49SDmitry Fleytman { 151e263cd49SDmitry Fleytman struct iovec *l2_hdr, *l3_hdr; 152e263cd49SDmitry Fleytman size_t bytes_read; 153e263cd49SDmitry Fleytman size_t full_ip6hdr_len; 154e263cd49SDmitry Fleytman uint16_t l3_proto; 155e263cd49SDmitry Fleytman 156e263cd49SDmitry Fleytman assert(pkt); 157e263cd49SDmitry Fleytman 158605d52e6SDmitry Fleytman l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; 159605d52e6SDmitry Fleytman l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG]; 160e263cd49SDmitry Fleytman 161e263cd49SDmitry Fleytman bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base, 162e263cd49SDmitry Fleytman ETH_MAX_L2_HDR_LEN); 163a7278b36SDana Rubin if (bytes_read < sizeof(struct eth_header)) { 164e263cd49SDmitry Fleytman l2_hdr->iov_len = 0; 165e263cd49SDmitry Fleytman return false; 166a7278b36SDana Rubin } 167a7278b36SDana Rubin 168a7278b36SDana Rubin l2_hdr->iov_len = sizeof(struct eth_header); 169a7278b36SDana Rubin switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) { 170a7278b36SDana Rubin case ETH_P_VLAN: 171a7278b36SDana Rubin l2_hdr->iov_len += sizeof(struct vlan_header); 172a7278b36SDana Rubin break; 173a7278b36SDana Rubin case ETH_P_DVLAN: 174a7278b36SDana Rubin l2_hdr->iov_len += 2 * sizeof(struct vlan_header); 175a7278b36SDana Rubin break; 176a7278b36SDana Rubin } 177a7278b36SDana Rubin 178a7278b36SDana Rubin if (bytes_read < l2_hdr->iov_len) { 179a7278b36SDana Rubin l2_hdr->iov_len = 0; 180eb700029SDmitry Fleytman l3_hdr->iov_len = 0; 181eb700029SDmitry Fleytman pkt->packet_type = ETH_PKT_UCAST; 182a7278b36SDana Rubin return false; 183eb700029SDmitry Fleytman } else { 184eb700029SDmitry Fleytman l2_hdr->iov_len = ETH_MAX_L2_HDR_LEN; 185eb700029SDmitry Fleytman l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base); 186eb700029SDmitry Fleytman pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base); 187e263cd49SDmitry Fleytman } 188e263cd49SDmitry Fleytman 189eb700029SDmitry Fleytman l3_proto = eth_get_l3_proto(l2_hdr, 1, l2_hdr->iov_len); 190e263cd49SDmitry Fleytman 191e263cd49SDmitry Fleytman switch (l3_proto) { 192e263cd49SDmitry Fleytman case ETH_P_IP: 193e263cd49SDmitry Fleytman bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 194e263cd49SDmitry Fleytman l3_hdr->iov_base, sizeof(struct ip_header)); 195e263cd49SDmitry Fleytman 196e263cd49SDmitry Fleytman if (bytes_read < sizeof(struct ip_header)) { 197e263cd49SDmitry Fleytman l3_hdr->iov_len = 0; 198e263cd49SDmitry Fleytman return false; 199e263cd49SDmitry Fleytman } 200e263cd49SDmitry Fleytman 201e263cd49SDmitry Fleytman l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base); 202eb700029SDmitry Fleytman 203eb700029SDmitry Fleytman if (l3_hdr->iov_len < sizeof(struct ip_header)) { 204eb700029SDmitry Fleytman l3_hdr->iov_len = 0; 205eb700029SDmitry Fleytman return false; 206eb700029SDmitry Fleytman } 207eb700029SDmitry Fleytman 208*4f51e1d3SMarc-André Lureau pkt->l4proto = IP_HDR_GET_P(l3_hdr->iov_base); 209e263cd49SDmitry Fleytman 210eb700029SDmitry Fleytman if (IP_HDR_GET_LEN(l3_hdr->iov_base) != sizeof(struct ip_header)) { 211eb700029SDmitry Fleytman /* copy optional IPv4 header data if any*/ 212e263cd49SDmitry Fleytman bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 213e263cd49SDmitry Fleytman l2_hdr->iov_len + sizeof(struct ip_header), 214e263cd49SDmitry Fleytman l3_hdr->iov_base + sizeof(struct ip_header), 215e263cd49SDmitry Fleytman l3_hdr->iov_len - sizeof(struct ip_header)); 216e263cd49SDmitry Fleytman if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) { 217e263cd49SDmitry Fleytman l3_hdr->iov_len = 0; 218e263cd49SDmitry Fleytman return false; 219e263cd49SDmitry Fleytman } 220eb700029SDmitry Fleytman } 221eb700029SDmitry Fleytman 222e263cd49SDmitry Fleytman break; 223e263cd49SDmitry Fleytman 224e263cd49SDmitry Fleytman case ETH_P_IPV6: 225eb700029SDmitry Fleytman { 226eb700029SDmitry Fleytman eth_ip6_hdr_info hdrinfo; 227eb700029SDmitry Fleytman 228e263cd49SDmitry Fleytman if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 229eb700029SDmitry Fleytman &hdrinfo)) { 230e263cd49SDmitry Fleytman l3_hdr->iov_len = 0; 231e263cd49SDmitry Fleytman return false; 232e263cd49SDmitry Fleytman } 233e263cd49SDmitry Fleytman 234eb700029SDmitry Fleytman pkt->l4proto = hdrinfo.l4proto; 235eb700029SDmitry Fleytman full_ip6hdr_len = hdrinfo.full_hdr_len; 236eb700029SDmitry Fleytman 237eb700029SDmitry Fleytman if (full_ip6hdr_len > ETH_MAX_IP_DGRAM_LEN) { 238eb700029SDmitry Fleytman l3_hdr->iov_len = 0; 239eb700029SDmitry Fleytman return false; 240eb700029SDmitry Fleytman } 241e263cd49SDmitry Fleytman 242e263cd49SDmitry Fleytman bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, 243e263cd49SDmitry Fleytman l3_hdr->iov_base, full_ip6hdr_len); 244e263cd49SDmitry Fleytman 245e263cd49SDmitry Fleytman if (bytes_read < full_ip6hdr_len) { 246e263cd49SDmitry Fleytman l3_hdr->iov_len = 0; 247e263cd49SDmitry Fleytman return false; 248e263cd49SDmitry Fleytman } else { 249e263cd49SDmitry Fleytman l3_hdr->iov_len = full_ip6hdr_len; 250e263cd49SDmitry Fleytman } 251e263cd49SDmitry Fleytman break; 252eb700029SDmitry Fleytman } 253e263cd49SDmitry Fleytman default: 254e263cd49SDmitry Fleytman l3_hdr->iov_len = 0; 255e263cd49SDmitry Fleytman break; 256e263cd49SDmitry Fleytman } 257e263cd49SDmitry Fleytman 258605d52e6SDmitry Fleytman net_tx_pkt_calculate_hdr_len(pkt); 259e263cd49SDmitry Fleytman return true; 260e263cd49SDmitry Fleytman } 261e263cd49SDmitry Fleytman 262eb700029SDmitry Fleytman static void net_tx_pkt_rebuild_payload(struct NetTxPkt *pkt) 263e263cd49SDmitry Fleytman { 264eb700029SDmitry Fleytman pkt->payload_len = iov_size(pkt->raw, pkt->raw_frags) - pkt->hdr_len; 265605d52e6SDmitry Fleytman pkt->payload_frags = iov_copy(&pkt->vec[NET_TX_PKT_PL_START_FRAG], 266e263cd49SDmitry Fleytman pkt->max_payload_frags, 267e263cd49SDmitry Fleytman pkt->raw, pkt->raw_frags, 268eb700029SDmitry Fleytman pkt->hdr_len, pkt->payload_len); 269e263cd49SDmitry Fleytman } 270e263cd49SDmitry Fleytman 271605d52e6SDmitry Fleytman bool net_tx_pkt_parse(struct NetTxPkt *pkt) 272e263cd49SDmitry Fleytman { 273eb700029SDmitry Fleytman if (net_tx_pkt_parse_headers(pkt)) { 274605d52e6SDmitry Fleytman net_tx_pkt_rebuild_payload(pkt); 275eb700029SDmitry Fleytman return true; 276eb700029SDmitry Fleytman } else { 277eb700029SDmitry Fleytman return false; 278eb700029SDmitry Fleytman } 279e263cd49SDmitry Fleytman } 280e263cd49SDmitry Fleytman 281605d52e6SDmitry Fleytman struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt) 282e263cd49SDmitry Fleytman { 283e263cd49SDmitry Fleytman assert(pkt); 284e263cd49SDmitry Fleytman return &pkt->virt_hdr; 285e263cd49SDmitry Fleytman } 286e263cd49SDmitry Fleytman 287605d52e6SDmitry Fleytman static uint8_t net_tx_pkt_get_gso_type(struct NetTxPkt *pkt, 288e263cd49SDmitry Fleytman bool tso_enable) 289e263cd49SDmitry Fleytman { 290e263cd49SDmitry Fleytman uint8_t rc = VIRTIO_NET_HDR_GSO_NONE; 291e263cd49SDmitry Fleytman uint16_t l3_proto; 292e263cd49SDmitry Fleytman 293eb700029SDmitry Fleytman l3_proto = eth_get_l3_proto(&pkt->vec[NET_TX_PKT_L2HDR_FRAG], 1, 294605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len); 295e263cd49SDmitry Fleytman 296e263cd49SDmitry Fleytman if (!tso_enable) { 297e263cd49SDmitry Fleytman goto func_exit; 298e263cd49SDmitry Fleytman } 299e263cd49SDmitry Fleytman 300605d52e6SDmitry Fleytman rc = eth_get_gso_type(l3_proto, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, 301e263cd49SDmitry Fleytman pkt->l4proto); 302e263cd49SDmitry Fleytman 303e263cd49SDmitry Fleytman func_exit: 304e263cd49SDmitry Fleytman return rc; 305e263cd49SDmitry Fleytman } 306e263cd49SDmitry Fleytman 307605d52e6SDmitry Fleytman void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, 308e263cd49SDmitry Fleytman bool csum_enable, uint32_t gso_size) 309e263cd49SDmitry Fleytman { 310e263cd49SDmitry Fleytman struct tcp_hdr l4hdr; 311e263cd49SDmitry Fleytman assert(pkt); 312e263cd49SDmitry Fleytman 313e263cd49SDmitry Fleytman /* csum has to be enabled if tso is. */ 314e263cd49SDmitry Fleytman assert(csum_enable || !tso_enable); 315e263cd49SDmitry Fleytman 316605d52e6SDmitry Fleytman pkt->virt_hdr.gso_type = net_tx_pkt_get_gso_type(pkt, tso_enable); 317e263cd49SDmitry Fleytman 318e263cd49SDmitry Fleytman switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 319e263cd49SDmitry Fleytman case VIRTIO_NET_HDR_GSO_NONE: 320e263cd49SDmitry Fleytman pkt->virt_hdr.hdr_len = 0; 321e263cd49SDmitry Fleytman pkt->virt_hdr.gso_size = 0; 322e263cd49SDmitry Fleytman break; 323e263cd49SDmitry Fleytman 324e263cd49SDmitry Fleytman case VIRTIO_NET_HDR_GSO_UDP: 325eb700029SDmitry Fleytman pkt->virt_hdr.gso_size = gso_size; 326e263cd49SDmitry Fleytman pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header); 327e263cd49SDmitry Fleytman break; 328e263cd49SDmitry Fleytman 329e263cd49SDmitry Fleytman case VIRTIO_NET_HDR_GSO_TCPV4: 330e263cd49SDmitry Fleytman case VIRTIO_NET_HDR_GSO_TCPV6: 331605d52e6SDmitry Fleytman iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, 332e263cd49SDmitry Fleytman 0, &l4hdr, sizeof(l4hdr)); 333e263cd49SDmitry Fleytman pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t); 334eb700029SDmitry Fleytman pkt->virt_hdr.gso_size = gso_size; 335e263cd49SDmitry Fleytman break; 336e263cd49SDmitry Fleytman 337e263cd49SDmitry Fleytman default: 338dfc6f865SStefan Weil g_assert_not_reached(); 339e263cd49SDmitry Fleytman } 340e263cd49SDmitry Fleytman 341e263cd49SDmitry Fleytman if (csum_enable) { 342e263cd49SDmitry Fleytman switch (pkt->l4proto) { 343e263cd49SDmitry Fleytman case IP_PROTO_TCP: 344e263cd49SDmitry Fleytman pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 345e263cd49SDmitry Fleytman pkt->virt_hdr.csum_start = pkt->hdr_len; 346e263cd49SDmitry Fleytman pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum); 347e263cd49SDmitry Fleytman break; 348e263cd49SDmitry Fleytman case IP_PROTO_UDP: 349e263cd49SDmitry Fleytman pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 350e263cd49SDmitry Fleytman pkt->virt_hdr.csum_start = pkt->hdr_len; 351e263cd49SDmitry Fleytman pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum); 352e263cd49SDmitry Fleytman break; 353e263cd49SDmitry Fleytman default: 354e263cd49SDmitry Fleytman break; 355e263cd49SDmitry Fleytman } 356e263cd49SDmitry Fleytman } 357e263cd49SDmitry Fleytman } 358e263cd49SDmitry Fleytman 359eb700029SDmitry Fleytman void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt, 360eb700029SDmitry Fleytman uint16_t vlan, uint16_t vlan_ethtype) 361e263cd49SDmitry Fleytman { 362e263cd49SDmitry Fleytman bool is_new; 363e263cd49SDmitry Fleytman assert(pkt); 364e263cd49SDmitry Fleytman 365eb700029SDmitry Fleytman eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, 366eb700029SDmitry Fleytman vlan, vlan_ethtype, &is_new); 367e263cd49SDmitry Fleytman 368e263cd49SDmitry Fleytman /* update l2hdrlen */ 369e263cd49SDmitry Fleytman if (is_new) { 370e263cd49SDmitry Fleytman pkt->hdr_len += sizeof(struct vlan_header); 371605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len += 372e263cd49SDmitry Fleytman sizeof(struct vlan_header); 373e263cd49SDmitry Fleytman } 374e263cd49SDmitry Fleytman } 375e263cd49SDmitry Fleytman 376605d52e6SDmitry Fleytman bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, 377e263cd49SDmitry Fleytman size_t len) 378e263cd49SDmitry Fleytman { 379e263cd49SDmitry Fleytman hwaddr mapped_len = 0; 380e263cd49SDmitry Fleytman struct iovec *ventry; 381e263cd49SDmitry Fleytman assert(pkt); 382e263cd49SDmitry Fleytman assert(pkt->max_raw_frags > pkt->raw_frags); 383e263cd49SDmitry Fleytman 384e263cd49SDmitry Fleytman if (!len) { 385e263cd49SDmitry Fleytman return true; 386e263cd49SDmitry Fleytman } 387e263cd49SDmitry Fleytman 388e263cd49SDmitry Fleytman ventry = &pkt->raw[pkt->raw_frags]; 389e263cd49SDmitry Fleytman mapped_len = len; 390e263cd49SDmitry Fleytman 39111171010SDmitry Fleytman ventry->iov_base = pci_dma_map(pkt->pci_dev, pa, 39211171010SDmitry Fleytman &mapped_len, DMA_DIRECTION_TO_DEVICE); 393e263cd49SDmitry Fleytman 394eb700029SDmitry Fleytman if ((ventry->iov_base != NULL) && (len == mapped_len)) { 395eb700029SDmitry Fleytman ventry->iov_len = mapped_len; 396eb700029SDmitry Fleytman pkt->raw_frags++; 397eb700029SDmitry Fleytman return true; 398eb700029SDmitry Fleytman } else { 399e263cd49SDmitry Fleytman return false; 400e263cd49SDmitry Fleytman } 401eb700029SDmitry Fleytman } 402e263cd49SDmitry Fleytman 403eb700029SDmitry Fleytman bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt) 404eb700029SDmitry Fleytman { 405eb700029SDmitry Fleytman return pkt->raw_frags > 0; 406e263cd49SDmitry Fleytman } 407e263cd49SDmitry Fleytman 408605d52e6SDmitry Fleytman eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt) 409e263cd49SDmitry Fleytman { 410e263cd49SDmitry Fleytman assert(pkt); 411e263cd49SDmitry Fleytman 412e263cd49SDmitry Fleytman return pkt->packet_type; 413e263cd49SDmitry Fleytman } 414e263cd49SDmitry Fleytman 415605d52e6SDmitry Fleytman size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt) 416e263cd49SDmitry Fleytman { 417e263cd49SDmitry Fleytman assert(pkt); 418e263cd49SDmitry Fleytman 419e263cd49SDmitry Fleytman return pkt->hdr_len + pkt->payload_len; 420e263cd49SDmitry Fleytman } 421e263cd49SDmitry Fleytman 422605d52e6SDmitry Fleytman void net_tx_pkt_dump(struct NetTxPkt *pkt) 423e263cd49SDmitry Fleytman { 424605d52e6SDmitry Fleytman #ifdef NET_TX_PKT_DEBUG 425e263cd49SDmitry Fleytman assert(pkt); 426e263cd49SDmitry Fleytman 427e263cd49SDmitry Fleytman printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, " 428e263cd49SDmitry Fleytman "l3hdr_len: %lu, payload_len: %u\n", pkt->hdr_len, pkt->packet_type, 429605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len, 430605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len, pkt->payload_len); 431e263cd49SDmitry Fleytman #endif 432e263cd49SDmitry Fleytman } 433e263cd49SDmitry Fleytman 434605d52e6SDmitry Fleytman void net_tx_pkt_reset(struct NetTxPkt *pkt) 435e263cd49SDmitry Fleytman { 436e263cd49SDmitry Fleytman int i; 437e263cd49SDmitry Fleytman 438e263cd49SDmitry Fleytman /* no assert, as reset can be called before tx_pkt_init */ 439e263cd49SDmitry Fleytman if (!pkt) { 440e263cd49SDmitry Fleytman return; 441e263cd49SDmitry Fleytman } 442e263cd49SDmitry Fleytman 443e263cd49SDmitry Fleytman memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr)); 444e263cd49SDmitry Fleytman 445e263cd49SDmitry Fleytman assert(pkt->vec); 446eb700029SDmitry Fleytman 447e263cd49SDmitry Fleytman pkt->payload_len = 0; 448e263cd49SDmitry Fleytman pkt->payload_frags = 0; 449e263cd49SDmitry Fleytman 450e263cd49SDmitry Fleytman assert(pkt->raw); 451e263cd49SDmitry Fleytman for (i = 0; i < pkt->raw_frags; i++) { 452e263cd49SDmitry Fleytman assert(pkt->raw[i].iov_base); 45311171010SDmitry Fleytman pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base, pkt->raw[i].iov_len, 45411171010SDmitry Fleytman DMA_DIRECTION_TO_DEVICE, 0); 455e263cd49SDmitry Fleytman } 456e263cd49SDmitry Fleytman pkt->raw_frags = 0; 457e263cd49SDmitry Fleytman 458e263cd49SDmitry Fleytman pkt->hdr_len = 0; 459e263cd49SDmitry Fleytman pkt->l4proto = 0; 460e263cd49SDmitry Fleytman } 461e263cd49SDmitry Fleytman 462605d52e6SDmitry Fleytman static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt) 463e263cd49SDmitry Fleytman { 464605d52e6SDmitry Fleytman struct iovec *iov = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; 465e263cd49SDmitry Fleytman uint32_t csum_cntr; 466e263cd49SDmitry Fleytman uint16_t csum = 0; 467eb700029SDmitry Fleytman uint32_t cso; 468e263cd49SDmitry Fleytman /* num of iovec without vhdr */ 469605d52e6SDmitry Fleytman uint32_t iov_len = pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1; 470e263cd49SDmitry Fleytman uint16_t csl; 471e263cd49SDmitry Fleytman struct ip_header *iphdr; 472e263cd49SDmitry Fleytman size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset; 473e263cd49SDmitry Fleytman 474e263cd49SDmitry Fleytman /* Put zero to checksum field */ 475e263cd49SDmitry Fleytman iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); 476e263cd49SDmitry Fleytman 477e263cd49SDmitry Fleytman /* Calculate L4 TCP/UDP checksum */ 478e263cd49SDmitry Fleytman csl = pkt->payload_len; 479e263cd49SDmitry Fleytman 480e263cd49SDmitry Fleytman /* add pseudo header to csum */ 481605d52e6SDmitry Fleytman iphdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 482eb700029SDmitry Fleytman csum_cntr = eth_calc_ip4_pseudo_hdr_csum(iphdr, csl, &cso); 483eb700029SDmitry Fleytman 484eb700029SDmitry Fleytman /* data checksum */ 485eb700029SDmitry Fleytman csum_cntr += 486eb700029SDmitry Fleytman net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl, cso); 487e263cd49SDmitry Fleytman 488e263cd49SDmitry Fleytman /* Put the checksum obtained into the packet */ 4890dacea92SEd Swierk csum = cpu_to_be16(net_checksum_finish_nozero(csum_cntr)); 490e263cd49SDmitry Fleytman iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); 491e263cd49SDmitry Fleytman } 492e263cd49SDmitry Fleytman 493e263cd49SDmitry Fleytman enum { 494605d52e6SDmitry Fleytman NET_TX_PKT_FRAGMENT_L2_HDR_POS = 0, 495605d52e6SDmitry Fleytman NET_TX_PKT_FRAGMENT_L3_HDR_POS, 496605d52e6SDmitry Fleytman NET_TX_PKT_FRAGMENT_HEADER_NUM 497e263cd49SDmitry Fleytman }; 498e263cd49SDmitry Fleytman 499605d52e6SDmitry Fleytman #define NET_MAX_FRAG_SG_LIST (64) 500e263cd49SDmitry Fleytman 501605d52e6SDmitry Fleytman static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt, 502e263cd49SDmitry Fleytman int *src_idx, size_t *src_offset, struct iovec *dst, int *dst_idx) 503e263cd49SDmitry Fleytman { 504e263cd49SDmitry Fleytman size_t fetched = 0; 505e263cd49SDmitry Fleytman struct iovec *src = pkt->vec; 506e263cd49SDmitry Fleytman 507605d52e6SDmitry Fleytman *dst_idx = NET_TX_PKT_FRAGMENT_HEADER_NUM; 508e263cd49SDmitry Fleytman 509eb700029SDmitry Fleytman while (fetched < IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size)) { 510e263cd49SDmitry Fleytman 511e263cd49SDmitry Fleytman /* no more place in fragment iov */ 512605d52e6SDmitry Fleytman if (*dst_idx == NET_MAX_FRAG_SG_LIST) { 513e263cd49SDmitry Fleytman break; 514e263cd49SDmitry Fleytman } 515e263cd49SDmitry Fleytman 516e263cd49SDmitry Fleytman /* no more data in iovec */ 517605d52e6SDmitry Fleytman if (*src_idx == (pkt->payload_frags + NET_TX_PKT_PL_START_FRAG)) { 518e263cd49SDmitry Fleytman break; 519e263cd49SDmitry Fleytman } 520e263cd49SDmitry Fleytman 521e263cd49SDmitry Fleytman 522e263cd49SDmitry Fleytman dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset; 523e263cd49SDmitry Fleytman dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset, 524eb700029SDmitry Fleytman IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size) - fetched); 525e263cd49SDmitry Fleytman 526e263cd49SDmitry Fleytman *src_offset += dst[*dst_idx].iov_len; 527e263cd49SDmitry Fleytman fetched += dst[*dst_idx].iov_len; 528e263cd49SDmitry Fleytman 529e263cd49SDmitry Fleytman if (*src_offset == src[*src_idx].iov_len) { 530e263cd49SDmitry Fleytman *src_offset = 0; 531e263cd49SDmitry Fleytman (*src_idx)++; 532e263cd49SDmitry Fleytman } 533e263cd49SDmitry Fleytman 534e263cd49SDmitry Fleytman (*dst_idx)++; 535e263cd49SDmitry Fleytman } 536e263cd49SDmitry Fleytman 537e263cd49SDmitry Fleytman return fetched; 538e263cd49SDmitry Fleytman } 539e263cd49SDmitry Fleytman 540eb700029SDmitry Fleytman static inline void net_tx_pkt_sendv(struct NetTxPkt *pkt, 541eb700029SDmitry Fleytman NetClientState *nc, const struct iovec *iov, int iov_cnt) 542eb700029SDmitry Fleytman { 543eb700029SDmitry Fleytman if (pkt->is_loopback) { 544eb700029SDmitry Fleytman nc->info->receive_iov(nc, iov, iov_cnt); 545eb700029SDmitry Fleytman } else { 546eb700029SDmitry Fleytman qemu_sendv_packet(nc, iov, iov_cnt); 547eb700029SDmitry Fleytman } 548eb700029SDmitry Fleytman } 549eb700029SDmitry Fleytman 550605d52e6SDmitry Fleytman static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, 551e263cd49SDmitry Fleytman NetClientState *nc) 552e263cd49SDmitry Fleytman { 553605d52e6SDmitry Fleytman struct iovec fragment[NET_MAX_FRAG_SG_LIST]; 554e263cd49SDmitry Fleytman size_t fragment_len = 0; 555e263cd49SDmitry Fleytman bool more_frags = false; 556e263cd49SDmitry Fleytman 557e263cd49SDmitry Fleytman /* some pointers for shorter code */ 558e263cd49SDmitry Fleytman void *l2_iov_base, *l3_iov_base; 559e263cd49SDmitry Fleytman size_t l2_iov_len, l3_iov_len; 560605d52e6SDmitry Fleytman int src_idx = NET_TX_PKT_PL_START_FRAG, dst_idx; 561e263cd49SDmitry Fleytman size_t src_offset = 0; 562e263cd49SDmitry Fleytman size_t fragment_offset = 0; 563e263cd49SDmitry Fleytman 564605d52e6SDmitry Fleytman l2_iov_base = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base; 565605d52e6SDmitry Fleytman l2_iov_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len; 566605d52e6SDmitry Fleytman l3_iov_base = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; 567605d52e6SDmitry Fleytman l3_iov_len = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; 568e263cd49SDmitry Fleytman 569e263cd49SDmitry Fleytman /* Copy headers */ 570605d52e6SDmitry Fleytman fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_base = l2_iov_base; 571605d52e6SDmitry Fleytman fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_len = l2_iov_len; 572605d52e6SDmitry Fleytman fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_base = l3_iov_base; 573605d52e6SDmitry Fleytman fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_len = l3_iov_len; 574e263cd49SDmitry Fleytman 575e263cd49SDmitry Fleytman 576e263cd49SDmitry Fleytman /* Put as much data as possible and send */ 577e263cd49SDmitry Fleytman do { 578605d52e6SDmitry Fleytman fragment_len = net_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset, 579e263cd49SDmitry Fleytman fragment, &dst_idx); 580e263cd49SDmitry Fleytman 581e263cd49SDmitry Fleytman more_frags = (fragment_offset + fragment_len < pkt->payload_len); 582e263cd49SDmitry Fleytman 583e263cd49SDmitry Fleytman eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base, 584e263cd49SDmitry Fleytman l3_iov_len, fragment_len, fragment_offset, more_frags); 585e263cd49SDmitry Fleytman 586e263cd49SDmitry Fleytman eth_fix_ip4_checksum(l3_iov_base, l3_iov_len); 587e263cd49SDmitry Fleytman 588eb700029SDmitry Fleytman net_tx_pkt_sendv(pkt, nc, fragment, dst_idx); 589e263cd49SDmitry Fleytman 590e263cd49SDmitry Fleytman fragment_offset += fragment_len; 591e263cd49SDmitry Fleytman 592ead315e4SPrasad J Pandit } while (fragment_len && more_frags); 593e263cd49SDmitry Fleytman 594e263cd49SDmitry Fleytman return true; 595e263cd49SDmitry Fleytman } 596e263cd49SDmitry Fleytman 597605d52e6SDmitry Fleytman bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc) 598e263cd49SDmitry Fleytman { 599e263cd49SDmitry Fleytman assert(pkt); 600e263cd49SDmitry Fleytman 601e263cd49SDmitry Fleytman if (!pkt->has_virt_hdr && 602e263cd49SDmitry Fleytman pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 603605d52e6SDmitry Fleytman net_tx_pkt_do_sw_csum(pkt); 604e263cd49SDmitry Fleytman } 605e263cd49SDmitry Fleytman 606e263cd49SDmitry Fleytman /* 607e263cd49SDmitry Fleytman * Since underlying infrastructure does not support IP datagrams longer 608e263cd49SDmitry Fleytman * than 64K we should drop such packets and don't even try to send 609e263cd49SDmitry Fleytman */ 610e263cd49SDmitry Fleytman if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) { 611e263cd49SDmitry Fleytman if (pkt->payload_len > 612e263cd49SDmitry Fleytman ETH_MAX_IP_DGRAM_LEN - 613605d52e6SDmitry Fleytman pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) { 614e263cd49SDmitry Fleytman return false; 615e263cd49SDmitry Fleytman } 616e263cd49SDmitry Fleytman } 617e263cd49SDmitry Fleytman 618e263cd49SDmitry Fleytman if (pkt->has_virt_hdr || 619e263cd49SDmitry Fleytman pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) { 620eb700029SDmitry Fleytman net_tx_pkt_sendv(pkt, nc, pkt->vec, 621605d52e6SDmitry Fleytman pkt->payload_frags + NET_TX_PKT_PL_START_FRAG); 622e263cd49SDmitry Fleytman return true; 623e263cd49SDmitry Fleytman } 624e263cd49SDmitry Fleytman 625605d52e6SDmitry Fleytman return net_tx_pkt_do_sw_fragmentation(pkt, nc); 626e263cd49SDmitry Fleytman } 627eb700029SDmitry Fleytman 628eb700029SDmitry Fleytman bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc) 629eb700029SDmitry Fleytman { 630eb700029SDmitry Fleytman bool res; 631eb700029SDmitry Fleytman 632eb700029SDmitry Fleytman pkt->is_loopback = true; 633eb700029SDmitry Fleytman res = net_tx_pkt_send(pkt, nc); 634eb700029SDmitry Fleytman pkt->is_loopback = false; 635eb700029SDmitry Fleytman 636eb700029SDmitry Fleytman return res; 637eb700029SDmitry Fleytman } 638