Lines Matching +full:xdp +full:- +full:rx +full:- +full:metadata
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
20 * driver en-queues packets for transmit by advancing the write
42 * nfp_nfd3_tx_ring_stop() - stop tx ring
63 * nfp_nfd3_tx_tso() - Set up Tx descriptor for LSO
64 * @r_vec: per-ring structure
70 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
83 if (!skb->encapsulation) { in nfp_nfd3_tx_tso()
86 l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? in nfp_nfd3_tx_tso()
91 l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? in nfp_nfd3_tx_tso()
96 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs; in nfp_nfd3_tx_tso()
97 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1); in nfp_nfd3_tx_tso()
99 mss = skb_shinfo(skb)->gso_size & NFD3_DESC_TX_MSS_MASK; in nfp_nfd3_tx_tso()
100 txd->l3_offset = l3_offset - md_bytes; in nfp_nfd3_tx_tso()
101 txd->l4_offset = l4_offset - md_bytes; in nfp_nfd3_tx_tso()
102 txd->lso_hdrlen = hdrlen - md_bytes; in nfp_nfd3_tx_tso()
103 txd->mss = cpu_to_le16(mss); in nfp_nfd3_tx_tso()
104 txd->flags |= NFD3_DESC_TX_LSO; in nfp_nfd3_tx_tso()
106 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfd3_tx_tso()
107 r_vec->tx_lso++; in nfp_nfd3_tx_tso()
108 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfd3_tx_tso()
112 * nfp_nfd3_tx_csum() - Set TX CSUM offload flags in TX descriptor
114 * @r_vec: per-ring structure
131 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) in nfp_nfd3_tx_csum()
134 if (skb->ip_summed != CHECKSUM_PARTIAL) in nfp_nfd3_tx_csum()
137 txd->flags |= NFD3_DESC_TX_CSUM; in nfp_nfd3_tx_csum()
138 if (skb->encapsulation) in nfp_nfd3_tx_csum()
139 txd->flags |= NFD3_DESC_TX_ENCAP; in nfp_nfd3_tx_csum()
141 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); in nfp_nfd3_tx_csum()
142 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); in nfp_nfd3_tx_csum()
144 if (iph->version == 4) { in nfp_nfd3_tx_csum()
145 txd->flags |= NFD3_DESC_TX_IP4_CSUM; in nfp_nfd3_tx_csum()
146 l4_hdr = iph->protocol; in nfp_nfd3_tx_csum()
147 } else if (ipv6h->version == 6) { in nfp_nfd3_tx_csum()
148 l4_hdr = ipv6h->nexthdr; in nfp_nfd3_tx_csum()
150 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version); in nfp_nfd3_tx_csum()
156 txd->flags |= NFD3_DESC_TX_TCP_CSUM; in nfp_nfd3_tx_csum()
159 txd->flags |= NFD3_DESC_TX_UDP_CSUM; in nfp_nfd3_tx_csum()
166 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfd3_tx_csum()
167 if (skb->encapsulation) in nfp_nfd3_tx_csum()
168 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt; in nfp_nfd3_tx_csum()
170 r_vec->hw_csum_tx += txbuf->pkt_cnt; in nfp_nfd3_tx_csum()
171 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfd3_tx_csum()
189 if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) in nfp_nfd3_prep_tx_meta()
192 vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2); in nfp_nfd3_prep_tx_meta()
204 return -ENOMEM; in nfp_nfd3_prep_tx_meta()
208 data -= NFP_NET_META_PORTID_SIZE; in nfp_nfd3_prep_tx_meta()
209 put_unaligned_be32(md_dst->u.port_info.port_id, data); in nfp_nfd3_prep_tx_meta()
216 data -= NFP_NET_META_CONN_HANDLE_SIZE; in nfp_nfd3_prep_tx_meta()
222 data -= NFP_NET_META_VLAN_SIZE; in nfp_nfd3_prep_tx_meta()
223 /* data type of skb->vlan_proto is __be16 in nfp_nfd3_prep_tx_meta()
224 * so it fills metadata without calling put_unaligned_be16 in nfp_nfd3_prep_tx_meta()
226 memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto)); in nfp_nfd3_prep_tx_meta()
227 put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto)); in nfp_nfd3_prep_tx_meta()
232 data -= NFP_NET_META_IPSEC_SIZE; in nfp_nfd3_prep_tx_meta()
234 data -= NFP_NET_META_IPSEC_SIZE; in nfp_nfd3_prep_tx_meta()
236 data -= NFP_NET_META_IPSEC_SIZE; in nfp_nfd3_prep_tx_meta()
237 put_unaligned_be32(offload_info.handle - 1, data); in nfp_nfd3_prep_tx_meta()
242 data -= sizeof(meta_id); in nfp_nfd3_prep_tx_meta()
249 * nfp_nfd3_tx() - Main transmit entry point
272 dp = &nn->dp; in nfp_nfd3_tx()
274 tx_ring = &dp->tx_rings[qidx]; in nfp_nfd3_tx()
275 r_vec = tx_ring->r_vec; in nfp_nfd3_tx()
277 nr_frags = skb_shinfo(skb)->nr_frags; in nfp_nfd3_tx()
281 qidx, tx_ring->wr_p, tx_ring->rd_p); in nfp_nfd3_tx()
282 nd_q = netdev_get_tx_queue(dp->netdev, qidx); in nfp_nfd3_tx()
285 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfd3_tx()
286 r_vec->tx_busy++; in nfp_nfd3_tx()
287 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfd3_tx()
302 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), in nfp_nfd3_tx()
304 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_nfd3_tx()
307 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfd3_tx()
310 txbuf = &tx_ring->txbufs[wr_idx]; in nfp_nfd3_tx()
311 txbuf->skb = skb; in nfp_nfd3_tx()
312 txbuf->dma_addr = dma_addr; in nfp_nfd3_tx()
313 txbuf->fidx = -1; in nfp_nfd3_tx()
314 txbuf->pkt_cnt = 1; in nfp_nfd3_tx()
315 txbuf->real_len = skb->len; in nfp_nfd3_tx()
318 txd = &tx_ring->txds[wr_idx]; in nfp_nfd3_tx()
319 txd->offset_eop = (nr_frags ? 0 : NFD3_DESC_TX_EOP) | md_bytes; in nfp_nfd3_tx()
320 txd->dma_len = cpu_to_le16(skb_headlen(skb)); in nfp_nfd3_tx()
322 txd->data_len = cpu_to_le16(skb->len); in nfp_nfd3_tx()
324 txd->flags = 0; in nfp_nfd3_tx()
325 txd->mss = 0; in nfp_nfd3_tx()
326 txd->lso_hdrlen = 0; in nfp_nfd3_tx()
328 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */ in nfp_nfd3_tx()
334 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { in nfp_nfd3_tx()
335 txd->flags |= NFD3_DESC_TX_VLAN; in nfp_nfd3_tx()
336 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); in nfp_nfd3_tx()
344 second_half = txd->vals8[1]; in nfp_nfd3_tx()
347 frag = &skb_shinfo(skb)->frags[f]; in nfp_nfd3_tx()
350 dma_addr = skb_frag_dma_map(dp->dev, frag, 0, in nfp_nfd3_tx()
352 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_nfd3_tx()
356 tx_ring->txbufs[wr_idx].skb = skb; in nfp_nfd3_tx()
357 tx_ring->txbufs[wr_idx].dma_addr = dma_addr; in nfp_nfd3_tx()
358 tx_ring->txbufs[wr_idx].fidx = f; in nfp_nfd3_tx()
360 txd = &tx_ring->txds[wr_idx]; in nfp_nfd3_tx()
361 txd->dma_len = cpu_to_le16(fsize); in nfp_nfd3_tx()
363 txd->offset_eop = md_bytes | in nfp_nfd3_tx()
364 ((f == nr_frags - 1) ? NFD3_DESC_TX_EOP : 0); in nfp_nfd3_tx()
365 txd->vals8[1] = second_half; in nfp_nfd3_tx()
368 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfd3_tx()
369 r_vec->tx_gather++; in nfp_nfd3_tx()
370 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfd3_tx()
375 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); in nfp_nfd3_tx()
377 tx_ring->wr_p += nr_frags + 1; in nfp_nfd3_tx()
381 tx_ring->wr_ptr_add += nr_frags + 1; in nfp_nfd3_tx()
382 if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more())) in nfp_nfd3_tx()
388 while (--f >= 0) { in nfp_nfd3_tx()
389 frag = &skb_shinfo(skb)->frags[f]; in nfp_nfd3_tx()
390 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, in nfp_nfd3_tx()
392 tx_ring->txbufs[wr_idx].skb = NULL; in nfp_nfd3_tx()
393 tx_ring->txbufs[wr_idx].dma_addr = 0; in nfp_nfd3_tx()
394 tx_ring->txbufs[wr_idx].fidx = -2; in nfp_nfd3_tx()
395 wr_idx = wr_idx - 1; in nfp_nfd3_tx()
397 wr_idx += tx_ring->cnt; in nfp_nfd3_tx()
399 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, in nfp_nfd3_tx()
401 tx_ring->txbufs[wr_idx].skb = NULL; in nfp_nfd3_tx()
402 tx_ring->txbufs[wr_idx].dma_addr = 0; in nfp_nfd3_tx()
403 tx_ring->txbufs[wr_idx].fidx = -2; in nfp_nfd3_tx()
408 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfd3_tx()
409 r_vec->tx_errors++; in nfp_nfd3_tx()
410 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfd3_tx()
417 * nfp_nfd3_tx_complete() - Handled completed TX packets
423 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; in nfp_nfd3_tx_complete()
424 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_nfd3_tx_complete()
430 if (tx_ring->wr_p == tx_ring->rd_p) in nfp_nfd3_tx_complete()
436 if (qcp_rd_p == tx_ring->qcp_rd_p) in nfp_nfd3_tx_complete()
439 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); in nfp_nfd3_tx_complete()
441 while (todo--) { in nfp_nfd3_tx_complete()
448 idx = D_IDX(tx_ring, tx_ring->rd_p++); in nfp_nfd3_tx_complete()
449 tx_buf = &tx_ring->txbufs[idx]; in nfp_nfd3_tx_complete()
451 skb = tx_buf->skb; in nfp_nfd3_tx_complete()
455 nr_frags = skb_shinfo(skb)->nr_frags; in nfp_nfd3_tx_complete()
456 fidx = tx_buf->fidx; in nfp_nfd3_tx_complete()
458 if (fidx == -1) { in nfp_nfd3_tx_complete()
460 dma_unmap_single(dp->dev, tx_buf->dma_addr, in nfp_nfd3_tx_complete()
463 done_pkts += tx_buf->pkt_cnt; in nfp_nfd3_tx_complete()
464 done_bytes += tx_buf->real_len; in nfp_nfd3_tx_complete()
467 frag = &skb_shinfo(skb)->frags[fidx]; in nfp_nfd3_tx_complete()
468 dma_unmap_page(dp->dev, tx_buf->dma_addr, in nfp_nfd3_tx_complete()
473 if (fidx == nr_frags - 1) in nfp_nfd3_tx_complete()
476 tx_buf->dma_addr = 0; in nfp_nfd3_tx_complete()
477 tx_buf->skb = NULL; in nfp_nfd3_tx_complete()
478 tx_buf->fidx = -2; in nfp_nfd3_tx_complete()
481 tx_ring->qcp_rd_p = qcp_rd_p; in nfp_nfd3_tx_complete()
483 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfd3_tx_complete()
484 r_vec->tx_bytes += done_bytes; in nfp_nfd3_tx_complete()
485 r_vec->tx_pkts += done_pkts; in nfp_nfd3_tx_complete()
486 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfd3_tx_complete()
488 if (!dp->netdev) in nfp_nfd3_tx_complete()
491 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); in nfp_nfd3_tx_complete()
494 /* Make sure TX thread will see updated tx_ring->rd_p */ in nfp_nfd3_tx_complete()
501 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, in nfp_nfd3_tx_complete()
503 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); in nfp_nfd3_tx_complete()
508 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; in nfp_nfd3_xdp_complete()
509 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_nfd3_xdp_complete()
518 if (qcp_rd_p == tx_ring->qcp_rd_p) in nfp_nfd3_xdp_complete()
521 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); in nfp_nfd3_xdp_complete()
526 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo); in nfp_nfd3_xdp_complete()
529 while (todo--) { in nfp_nfd3_xdp_complete()
530 idx = D_IDX(tx_ring, tx_ring->rd_p); in nfp_nfd3_xdp_complete()
531 tx_ring->rd_p++; in nfp_nfd3_xdp_complete()
533 done_bytes += tx_ring->txbufs[idx].real_len; in nfp_nfd3_xdp_complete()
536 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfd3_xdp_complete()
537 r_vec->tx_bytes += done_bytes; in nfp_nfd3_xdp_complete()
538 r_vec->tx_pkts += done_pkts; in nfp_nfd3_xdp_complete()
539 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfd3_xdp_complete()
541 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, in nfp_nfd3_xdp_complete()
542 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", in nfp_nfd3_xdp_complete()
543 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); in nfp_nfd3_xdp_complete()
556 if (!dp->xdp_prog) { in nfp_nfd3_napi_alloc_one()
557 frag = napi_alloc_frag(dp->fl_bufsz); in nfp_nfd3_napi_alloc_one()
570 if (dma_mapping_error(dp->dev, *dma_addr)) { in nfp_nfd3_napi_alloc_one()
571 nfp_net_free_frag(frag, dp->xdp_prog); in nfp_nfd3_napi_alloc_one()
572 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); in nfp_nfd3_napi_alloc_one()
580 * nfp_nfd3_rx_give_one() - Put mapped skb on the software and hardware rings
582 * @rx_ring: RX ring structure
593 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); in nfp_nfd3_rx_give_one()
598 rx_ring->rxbufs[wr_idx].frag = frag; in nfp_nfd3_rx_give_one()
599 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; in nfp_nfd3_rx_give_one()
602 rx_ring->rxds[wr_idx].fld.reserved = 0; in nfp_nfd3_rx_give_one()
603 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; in nfp_nfd3_rx_give_one()
604 /* DMA address is expanded to 48-bit width in freelist for NFP3800, in nfp_nfd3_rx_give_one()
606 * a 40-bit address since the top 8 bits are get set to 0. in nfp_nfd3_rx_give_one()
608 nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld, in nfp_nfd3_rx_give_one()
609 dma_addr + dp->rx_dma_off); in nfp_nfd3_rx_give_one()
611 rx_ring->wr_p++; in nfp_nfd3_rx_give_one()
612 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) { in nfp_nfd3_rx_give_one()
617 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH); in nfp_nfd3_rx_give_one()
622 * nfp_nfd3_rx_ring_fill_freelist() - Give buffers from the ring to FW
624 * @rx_ring: RX ring to fill
631 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) in nfp_nfd3_rx_ring_fill_freelist()
634 for (i = 0; i < rx_ring->cnt - 1; i++) in nfp_nfd3_rx_ring_fill_freelist()
635 nfp_nfd3_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, in nfp_nfd3_rx_ring_fill_freelist()
636 rx_ring->rxbufs[i].dma_addr); in nfp_nfd3_rx_ring_fill_freelist()
640 * nfp_nfd3_rx_csum_has_errors() - group check if rxd has any csum errors
641 * @flags: RX descriptor flags field in CPU byte order
654 * nfp_nfd3_rx_csum() - set SKB checksum field based on RX descriptor flags
656 * @r_vec: per-ring structure
657 * @rxd: Pointer to RX descriptor
658 * @meta: Parsed metadata prepend
668 if (!(dp->netdev->features & NETIF_F_RXCSUM)) in nfp_nfd3_rx_csum()
671 if (meta->csum_type) { in nfp_nfd3_rx_csum()
672 skb->ip_summed = meta->csum_type; in nfp_nfd3_rx_csum()
673 skb->csum = meta->csum; in nfp_nfd3_rx_csum()
674 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfd3_rx_csum()
675 r_vec->hw_csum_rx_complete++; in nfp_nfd3_rx_csum()
676 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfd3_rx_csum()
680 if (nfp_nfd3_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { in nfp_nfd3_rx_csum()
681 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfd3_rx_csum()
682 r_vec->hw_csum_rx_error++; in nfp_nfd3_rx_csum()
683 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfd3_rx_csum()
691 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK || in nfp_nfd3_rx_csum()
692 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) { in nfp_nfd3_rx_csum()
694 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfd3_rx_csum()
695 r_vec->hw_csum_rx_ok++; in nfp_nfd3_rx_csum()
696 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfd3_rx_csum()
699 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK || in nfp_nfd3_rx_csum()
700 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) { in nfp_nfd3_rx_csum()
702 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfd3_rx_csum()
703 r_vec->hw_csum_rx_inner_ok++; in nfp_nfd3_rx_csum()
704 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfd3_rx_csum()
712 if (!(netdev->features & NETIF_F_RXHASH)) in nfp_nfd3_set_hash()
719 meta->hash_type = PKT_HASH_TYPE_L3; in nfp_nfd3_set_hash()
722 meta->hash_type = PKT_HASH_TYPE_L4; in nfp_nfd3_set_hash()
726 meta->hash = get_unaligned_be32(hash); in nfp_nfd3_set_hash()
735 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) in nfp_nfd3_set_hash_desc()
738 nfp_nfd3_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type), in nfp_nfd3_set_hash_desc()
739 &rx_hash->hash); in nfp_nfd3_set_hash_desc()
761 meta->mark = get_unaligned_be32(data); in nfp_nfd3_parse_meta()
767 meta->vlan.stripped = true; in nfp_nfd3_parse_meta()
768 meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK, in nfp_nfd3_parse_meta()
770 meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK, in nfp_nfd3_parse_meta()
776 meta->portid = get_unaligned_be32(data); in nfp_nfd3_parse_meta()
780 meta->csum_type = CHECKSUM_COMPLETE; in nfp_nfd3_parse_meta()
781 meta->csum = in nfp_nfd3_parse_meta()
796 meta->ipsec_saidx = get_unaligned_be32(data) + 1; in nfp_nfd3_parse_meta()
815 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfd3_rx_drop()
816 r_vec->rx_drops++; in nfp_nfd3_rx_drop()
821 r_vec->rx_replace_buf_alloc_fail++; in nfp_nfd3_rx_drop()
822 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfd3_rx_drop()
827 if (skb && rxbuf && skb->head == rxbuf->frag) in nfp_nfd3_rx_drop()
828 page_ref_inc(virt_to_head_page(rxbuf->frag)); in nfp_nfd3_rx_drop()
830 nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); in nfp_nfd3_rx_drop()
841 unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA; in nfp_nfd3_tx_xdp_buf()
857 nfp_nfd3_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, in nfp_nfd3_tx_xdp_buf()
863 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfd3_tx_xdp_buf()
866 txbuf = &tx_ring->txbufs[wr_idx]; in nfp_nfd3_tx_xdp_buf()
868 nfp_nfd3_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr); in nfp_nfd3_tx_xdp_buf()
870 txbuf->frag = rxbuf->frag; in nfp_nfd3_tx_xdp_buf()
871 txbuf->dma_addr = rxbuf->dma_addr; in nfp_nfd3_tx_xdp_buf()
872 txbuf->fidx = -1; in nfp_nfd3_tx_xdp_buf()
873 txbuf->pkt_cnt = 1; in nfp_nfd3_tx_xdp_buf()
874 txbuf->real_len = pkt_len; in nfp_nfd3_tx_xdp_buf()
876 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off, in nfp_nfd3_tx_xdp_buf()
880 txd = &tx_ring->txds[wr_idx]; in nfp_nfd3_tx_xdp_buf()
881 txd->offset_eop = NFD3_DESC_TX_EOP; in nfp_nfd3_tx_xdp_buf()
882 txd->dma_len = cpu_to_le16(pkt_len); in nfp_nfd3_tx_xdp_buf()
883 nfp_desc_set_dma_addr_40b(txd, rxbuf->dma_addr + dma_off); in nfp_nfd3_tx_xdp_buf()
884 txd->data_len = cpu_to_le16(pkt_len); in nfp_nfd3_tx_xdp_buf()
886 txd->flags = 0; in nfp_nfd3_tx_xdp_buf()
887 txd->mss = 0; in nfp_nfd3_tx_xdp_buf()
888 txd->lso_hdrlen = 0; in nfp_nfd3_tx_xdp_buf()
890 tx_ring->wr_p++; in nfp_nfd3_tx_xdp_buf()
891 tx_ring->wr_ptr_add++; in nfp_nfd3_tx_xdp_buf()
896 * nfp_nfd3_rx() - receive up to @budget packets on @rx_ring
897 * @rx_ring: RX ring to receive from
908 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; in nfp_nfd3_rx()
909 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_nfd3_rx()
916 struct xdp_buff xdp; in nfp_nfd3_rx() local
918 xdp_prog = READ_ONCE(dp->xdp_prog); in nfp_nfd3_rx()
919 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; in nfp_nfd3_rx()
920 xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM, in nfp_nfd3_rx()
921 &rx_ring->xdp_rxq); in nfp_nfd3_rx()
922 tx_ring = r_vec->xdp_ring; in nfp_nfd3_rx()
935 idx = D_IDX(rx_ring, rx_ring->rd_p); in nfp_nfd3_rx()
937 rxd = &rx_ring->rxds[idx]; in nfp_nfd3_rx()
938 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) in nfp_nfd3_rx()
948 rx_ring->rd_p++; in nfp_nfd3_rx()
951 rxbuf = &rx_ring->rxbufs[idx]; in nfp_nfd3_rx()
953 * <-- [rx_offset] --> in nfp_nfd3_rx()
954 * --------------------------------------------------------- in nfp_nfd3_rx()
955 * | [XX] | metadata | packet | XXXX | in nfp_nfd3_rx()
956 * --------------------------------------------------------- in nfp_nfd3_rx()
957 * <---------------- data_len ---------------> in nfp_nfd3_rx()
961 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the in nfp_nfd3_rx()
964 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; in nfp_nfd3_rx()
965 data_len = le16_to_cpu(rxd->rxd.data_len); in nfp_nfd3_rx()
966 pkt_len = data_len - meta_len; in nfp_nfd3_rx()
968 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; in nfp_nfd3_rx()
969 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_nfd3_rx()
972 pkt_off += dp->rx_offset; in nfp_nfd3_rx()
973 meta_off = pkt_off - meta_len; in nfp_nfd3_rx()
976 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfd3_rx()
977 r_vec->rx_pkts++; in nfp_nfd3_rx()
978 r_vec->rx_bytes += pkt_len; in nfp_nfd3_rx()
979 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfd3_rx()
982 (dp->rx_offset && meta_len > dp->rx_offset))) { in nfp_nfd3_rx()
983 nn_dp_warn(dp, "oversized RX packet metadata %u\n", in nfp_nfd3_rx()
989 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, in nfp_nfd3_rx()
992 if (!dp->chained_metadata_format) { in nfp_nfd3_rx()
993 nfp_nfd3_set_hash_desc(dp->netdev, &meta, in nfp_nfd3_rx()
994 rxbuf->frag + meta_off, rxd); in nfp_nfd3_rx()
996 if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta, in nfp_nfd3_rx()
997 rxbuf->frag + meta_off, in nfp_nfd3_rx()
998 rxbuf->frag + pkt_off, in nfp_nfd3_rx()
1000 nn_dp_warn(dp, "invalid RX packet metadata\n"); in nfp_nfd3_rx()
1008 void *orig_data = rxbuf->frag + pkt_off; in nfp_nfd3_rx()
1012 xdp_prepare_buff(&xdp, in nfp_nfd3_rx()
1013 rxbuf->frag + NFP_NET_RX_BUF_HEADROOM, in nfp_nfd3_rx()
1014 pkt_off - NFP_NET_RX_BUF_HEADROOM, in nfp_nfd3_rx()
1017 act = bpf_prog_run_xdp(xdp_prog, &xdp); in nfp_nfd3_rx()
1019 pkt_len = xdp.data_end - xdp.data; in nfp_nfd3_rx()
1020 pkt_off += xdp.data - orig_data; in nfp_nfd3_rx()
1024 meta_len_xdp = xdp.data - xdp.data_meta; in nfp_nfd3_rx()
1027 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM; in nfp_nfd3_rx()
1034 trace_xdp_exception(dp->netdev, in nfp_nfd3_rx()
1038 bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act); in nfp_nfd3_rx()
1041 trace_xdp_exception(dp->netdev, xdp_prog, act); in nfp_nfd3_rx()
1044 nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag, in nfp_nfd3_rx()
1045 rxbuf->dma_addr); in nfp_nfd3_rx()
1051 netdev = dp->netdev; in nfp_nfd3_rx()
1053 struct nfp_net *nn = netdev_priv(dp->netdev); in nfp_nfd3_rx()
1055 nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off, in nfp_nfd3_rx()
1057 nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag, in nfp_nfd3_rx()
1058 rxbuf->dma_addr); in nfp_nfd3_rx()
1063 nn = netdev_priv(dp->netdev); in nfp_nfd3_rx()
1064 netdev = nfp_app_dev_get(nn->app, meta.portid, in nfp_nfd3_rx()
1076 skb = napi_build_skb(rxbuf->frag, true_bufsz); in nfp_nfd3_rx()
1087 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); in nfp_nfd3_rx()
1094 skb->mark = meta.mark; in nfp_nfd3_rx()
1097 skb_record_rx_queue(skb, rx_ring->idx); in nfp_nfd3_rx()
1098 skb->protocol = eth_type_trans(skb, netdev); in nfp_nfd3_rx()
1103 if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) { in nfp_nfd3_rx()
1104 skb->decrypted = true; in nfp_nfd3_rx()
1105 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfd3_rx()
1106 r_vec->hw_tls_rx++; in nfp_nfd3_rx()
1107 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfd3_rx()
1127 napi_gro_receive(&rx_ring->r_vec->napi, skb); in nfp_nfd3_rx()
1129 skb->dev = netdev; in nfp_nfd3_rx()
1137 if (tx_ring->wr_ptr_add) in nfp_nfd3_rx()
1139 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) && in nfp_nfd3_rx()
1149 * nfp_nfd3_poll() - napi poll function
1161 if (r_vec->tx_ring) in nfp_nfd3_poll()
1162 nfp_nfd3_tx_complete(r_vec->tx_ring, budget); in nfp_nfd3_poll()
1163 if (r_vec->rx_ring) in nfp_nfd3_poll()
1164 pkts_polled = nfp_nfd3_rx(r_vec->rx_ring, budget); in nfp_nfd3_poll()
1168 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); in nfp_nfd3_poll()
1170 if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) { in nfp_nfd3_poll()
1176 start = u64_stats_fetch_begin(&r_vec->rx_sync); in nfp_nfd3_poll()
1177 pkts = r_vec->rx_pkts; in nfp_nfd3_poll()
1178 bytes = r_vec->rx_bytes; in nfp_nfd3_poll()
1179 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); in nfp_nfd3_poll()
1181 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample); in nfp_nfd3_poll()
1182 net_dim(&r_vec->rx_dim, dim_sample); in nfp_nfd3_poll()
1185 if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) { in nfp_nfd3_poll()
1191 start = u64_stats_fetch_begin(&r_vec->tx_sync); in nfp_nfd3_poll()
1192 pkts = r_vec->tx_pkts; in nfp_nfd3_poll()
1193 bytes = r_vec->tx_bytes; in nfp_nfd3_poll()
1194 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); in nfp_nfd3_poll()
1196 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample); in nfp_nfd3_poll()
1197 net_dim(&r_vec->tx_dim, dim_sample); in nfp_nfd3_poll()
1210 unsigned int real_len = skb->len, meta_len = 0; in nfp_nfd3_ctrl_tx_one()
1218 dp = &r_vec->nfp_net->dp; in nfp_nfd3_ctrl_tx_one()
1219 tx_ring = r_vec->tx_ring; in nfp_nfd3_ctrl_tx_one()
1221 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) { in nfp_nfd3_ctrl_tx_one()
1227 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfd3_ctrl_tx_one()
1228 r_vec->tx_busy++; in nfp_nfd3_ctrl_tx_one()
1229 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfd3_ctrl_tx_one()
1231 __skb_queue_tail(&r_vec->queue, skb); in nfp_nfd3_ctrl_tx_one()
1233 __skb_queue_head(&r_vec->queue, skb); in nfp_nfd3_ctrl_tx_one()
1237 if (nfp_app_ctrl_has_meta(nn->app)) { in nfp_nfd3_ctrl_tx_one()
1248 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), in nfp_nfd3_ctrl_tx_one()
1250 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_nfd3_ctrl_tx_one()
1253 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfd3_ctrl_tx_one()
1256 txbuf = &tx_ring->txbufs[wr_idx]; in nfp_nfd3_ctrl_tx_one()
1257 txbuf->skb = skb; in nfp_nfd3_ctrl_tx_one()
1258 txbuf->dma_addr = dma_addr; in nfp_nfd3_ctrl_tx_one()
1259 txbuf->fidx = -1; in nfp_nfd3_ctrl_tx_one()
1260 txbuf->pkt_cnt = 1; in nfp_nfd3_ctrl_tx_one()
1261 txbuf->real_len = real_len; in nfp_nfd3_ctrl_tx_one()
1264 txd = &tx_ring->txds[wr_idx]; in nfp_nfd3_ctrl_tx_one()
1265 txd->offset_eop = meta_len | NFD3_DESC_TX_EOP; in nfp_nfd3_ctrl_tx_one()
1266 txd->dma_len = cpu_to_le16(skb_headlen(skb)); in nfp_nfd3_ctrl_tx_one()
1268 txd->data_len = cpu_to_le16(skb->len); in nfp_nfd3_ctrl_tx_one()
1270 txd->flags = 0; in nfp_nfd3_ctrl_tx_one()
1271 txd->mss = 0; in nfp_nfd3_ctrl_tx_one()
1272 txd->lso_hdrlen = 0; in nfp_nfd3_ctrl_tx_one()
1274 tx_ring->wr_p++; in nfp_nfd3_ctrl_tx_one()
1275 tx_ring->wr_ptr_add++; in nfp_nfd3_ctrl_tx_one()
1283 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfd3_ctrl_tx_one()
1284 r_vec->tx_errors++; in nfp_nfd3_ctrl_tx_one()
1285 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfd3_ctrl_tx_one()
1294 while ((skb = __skb_dequeue(&r_vec->queue))) in __nfp_ctrl_tx_queued()
1295 if (nfp_nfd3_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true)) in __nfp_ctrl_tx_queued()
1304 if (!nfp_app_ctrl_has_meta(nn->app)) in nfp_ctrl_meta_ok()
1329 idx = D_IDX(rx_ring, rx_ring->rd_p); in nfp_ctrl_rx_one()
1331 rxd = &rx_ring->rxds[idx]; in nfp_ctrl_rx_one()
1332 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) in nfp_ctrl_rx_one()
1340 rx_ring->rd_p++; in nfp_ctrl_rx_one()
1342 rxbuf = &rx_ring->rxbufs[idx]; in nfp_ctrl_rx_one()
1343 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; in nfp_ctrl_rx_one()
1344 data_len = le16_to_cpu(rxd->rxd.data_len); in nfp_ctrl_rx_one()
1345 pkt_len = data_len - meta_len; in nfp_ctrl_rx_one()
1347 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; in nfp_ctrl_rx_one()
1348 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_ctrl_rx_one()
1351 pkt_off += dp->rx_offset; in nfp_ctrl_rx_one()
1352 meta_off = pkt_off - meta_len; in nfp_ctrl_rx_one()
1355 u64_stats_update_begin(&r_vec->rx_sync); in nfp_ctrl_rx_one()
1356 r_vec->rx_pkts++; in nfp_ctrl_rx_one()
1357 r_vec->rx_bytes += pkt_len; in nfp_ctrl_rx_one()
1358 u64_stats_update_end(&r_vec->rx_sync); in nfp_ctrl_rx_one()
1360 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len); in nfp_ctrl_rx_one()
1362 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) { in nfp_ctrl_rx_one()
1363 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n", in nfp_ctrl_rx_one()
1369 skb = build_skb(rxbuf->frag, dp->fl_bufsz); in nfp_ctrl_rx_one()
1380 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); in nfp_ctrl_rx_one()
1387 nfp_app_ctrl_rx(nn->app, skb); in nfp_ctrl_rx_one()
1394 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; in nfp_ctrl_rx()
1395 struct nfp_net *nn = r_vec->nfp_net; in nfp_ctrl_rx()
1396 struct nfp_net_dp *dp = &nn->dp; in nfp_ctrl_rx()
1399 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--) in nfp_ctrl_rx()
1409 spin_lock(&r_vec->lock); in nfp_nfd3_ctrl_poll()
1410 nfp_nfd3_tx_complete(r_vec->tx_ring, 0); in nfp_nfd3_ctrl_poll()
1412 spin_unlock(&r_vec->lock); in nfp_nfd3_ctrl_poll()
1415 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); in nfp_nfd3_ctrl_poll()
1417 tasklet_schedule(&r_vec->tasklet); in nfp_nfd3_ctrl_poll()
1418 nn_dp_warn(&r_vec->nfp_net->dp, in nfp_nfd3_ctrl_poll()