Lines Matching +full:xdp +full:- +full:rx +full:- +full:metadata
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
47 if (!skb->encapsulation) { in nfp_nfdk_tx_tso()
50 l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? in nfp_nfdk_tx_tso()
55 l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? in nfp_nfdk_tx_tso()
60 segs = skb_shinfo(skb)->gso_segs; in nfp_nfdk_tx_tso()
61 mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK; in nfp_nfdk_tx_tso()
70 txbuf->pkt_cnt = segs; in nfp_nfdk_tx_tso()
71 txbuf->real_len = skb->len + hdrlen * (txbuf->pkt_cnt - 1); in nfp_nfdk_tx_tso()
73 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfdk_tx_tso()
74 r_vec->tx_lso++; in nfp_nfdk_tx_tso()
75 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfdk_tx_tso()
87 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) in nfp_nfdk_tx_csum()
90 if (skb->ip_summed != CHECKSUM_PARTIAL) in nfp_nfdk_tx_csum()
95 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); in nfp_nfdk_tx_csum()
96 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); in nfp_nfdk_tx_csum()
99 if (iph->version == 4) { in nfp_nfdk_tx_csum()
101 } else if (ipv6h->version != 6) { in nfp_nfdk_tx_csum()
102 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version); in nfp_nfdk_tx_csum()
106 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfdk_tx_csum()
107 if (!skb->encapsulation) { in nfp_nfdk_tx_csum()
108 r_vec->hw_csum_tx += pkt_cnt; in nfp_nfdk_tx_csum()
111 r_vec->hw_csum_tx_inner += pkt_cnt; in nfp_nfdk_tx_csum()
113 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfdk_tx_csum()
131 nr_frags = skb_shinfo(skb)->nr_frags; in nfp_nfdk_tx_maybe_close_block()
132 frag = skb_shinfo(skb)->frags; in nfp_nfdk_tx_maybe_close_block()
145 return -EINVAL; in nfp_nfdk_tx_maybe_close_block()
151 if (round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) != in nfp_nfdk_tx_maybe_close_block()
152 round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT)) in nfp_nfdk_tx_maybe_close_block()
155 if ((u32)tx_ring->data_pending + skb->len > NFDK_TX_MAX_DATA_PER_BLOCK) in nfp_nfdk_tx_maybe_close_block()
161 wr_p = tx_ring->wr_p; in nfp_nfdk_tx_maybe_close_block()
165 tx_ring->ktxbufs[wr_idx].skb = NULL; in nfp_nfdk_tx_maybe_close_block()
166 txd = &tx_ring->ktxds[wr_idx]; in nfp_nfdk_tx_maybe_close_block()
170 tx_ring->data_pending = 0; in nfp_nfdk_tx_maybe_close_block()
171 tx_ring->wr_p += nop_slots; in nfp_nfdk_tx_maybe_close_block()
172 tx_ring->wr_ptr_add += nop_slots; in nfp_nfdk_tx_maybe_close_block()
193 if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) in nfp_nfdk_prep_tx_meta()
196 vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2); in nfp_nfdk_prep_tx_meta()
207 return -ENOMEM; in nfp_nfdk_prep_tx_meta()
211 data -= NFP_NET_META_PORTID_SIZE; in nfp_nfdk_prep_tx_meta()
212 put_unaligned_be32(md_dst->u.port_info.port_id, data); in nfp_nfdk_prep_tx_meta()
216 data -= NFP_NET_META_VLAN_SIZE; in nfp_nfdk_prep_tx_meta()
217 /* data type of skb->vlan_proto is __be16 in nfp_nfdk_prep_tx_meta()
218 * so it fills metadata without calling put_unaligned_be16 in nfp_nfdk_prep_tx_meta()
220 memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto)); in nfp_nfdk_prep_tx_meta()
221 put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto)); in nfp_nfdk_prep_tx_meta()
227 data -= NFP_NET_META_IPSEC_SIZE; in nfp_nfdk_prep_tx_meta()
229 data -= NFP_NET_META_IPSEC_SIZE; in nfp_nfdk_prep_tx_meta()
231 data -= NFP_NET_META_IPSEC_SIZE; in nfp_nfdk_prep_tx_meta()
232 put_unaligned_be32(offload_info.handle - 1, data); in nfp_nfdk_prep_tx_meta()
240 data -= sizeof(meta_id); in nfp_nfdk_prep_tx_meta()
247 * nfp_nfdk_tx() - Main transmit entry point
269 u64 metadata; in nfp_nfdk_tx() local
271 dp = &nn->dp; in nfp_nfdk_tx()
273 tx_ring = &dp->tx_rings[qidx]; in nfp_nfdk_tx()
274 r_vec = tx_ring->r_vec; in nfp_nfdk_tx()
275 nd_q = netdev_get_tx_queue(dp->netdev, qidx); in nfp_nfdk_tx()
280 qidx, tx_ring->wr_p, tx_ring->rd_p); in nfp_nfdk_tx()
283 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfdk_tx()
284 r_vec->tx_busy++; in nfp_nfdk_tx()
285 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfdk_tx()
289 metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb, &ipsec); in nfp_nfdk_tx()
290 if (unlikely((int)metadata < 0)) in nfp_nfdk_tx()
299 nr_frags = skb_shinfo(skb)->nr_frags; in nfp_nfdk_tx()
301 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfdk_tx()
302 txd = &tx_ring->ktxds[wr_idx]; in nfp_nfdk_tx()
303 txbuf = &tx_ring->ktxbufs[wr_idx]; in nfp_nfdk_tx()
313 dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE); in nfp_nfdk_tx()
314 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_nfdk_tx()
317 txbuf->skb = skb; in nfp_nfdk_tx()
320 txbuf->dma_addr = dma_addr; in nfp_nfdk_tx()
324 dma_len -= 1; in nfp_nfdk_tx()
337 txd->dma_len_type = cpu_to_le16(dlen_type); in nfp_nfdk_tx()
347 dma_len -= tmp_dlen; in nfp_nfdk_tx()
354 frag = skb_shinfo(skb)->frags; in nfp_nfdk_tx()
359 dma_len -= 1; in nfp_nfdk_tx()
362 txd->dma_len_type = cpu_to_le16(dlen_type); in nfp_nfdk_tx()
365 dma_len -= dlen_type; in nfp_nfdk_tx()
374 dma_addr = skb_frag_dma_map(dp->dev, frag, 0, dma_len, in nfp_nfdk_tx()
376 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_nfdk_tx()
379 txbuf->dma_addr = dma_addr; in nfp_nfdk_tx()
385 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP); in nfp_nfdk_tx()
388 metadata = nfp_nfdk_ipsec_tx(metadata, skb); in nfp_nfdk_tx()
391 real_len = skb->len; in nfp_nfdk_tx()
392 /* Metadata desc */ in nfp_nfdk_tx()
394 metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata); in nfp_nfdk_tx()
395 txd->raw = cpu_to_le64(metadata); in nfp_nfdk_tx()
398 /* lso desc should be placed after metadata desc */ in nfp_nfdk_tx()
399 (txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb); in nfp_nfdk_tx()
400 real_len = txbuf->real_len; in nfp_nfdk_tx()
401 /* Metadata desc */ in nfp_nfdk_tx()
403 metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata); in nfp_nfdk_tx()
404 txd->raw = cpu_to_le64(metadata); in nfp_nfdk_tx()
409 cnt = txd - tx_ring->ktxds - wr_idx; in nfp_nfdk_tx()
411 round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT))) in nfp_nfdk_tx()
416 tx_ring->wr_p += cnt; in nfp_nfdk_tx()
417 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT) in nfp_nfdk_tx()
418 tx_ring->data_pending += skb->len; in nfp_nfdk_tx()
420 tx_ring->data_pending = 0; in nfp_nfdk_tx()
425 tx_ring->wr_ptr_add += cnt; in nfp_nfdk_tx()
435 txbuf--; in nfp_nfdk_tx()
437 /* txbuf pointed to the next-to-use */ in nfp_nfdk_tx()
440 txbuf = &tx_ring->ktxbufs[wr_idx + 1]; in nfp_nfdk_tx()
442 dma_unmap_single(dp->dev, txbuf->dma_addr, in nfp_nfdk_tx()
444 txbuf->raw = 0; in nfp_nfdk_tx()
447 frag = skb_shinfo(skb)->frags; in nfp_nfdk_tx()
449 dma_unmap_page(dp->dev, txbuf->dma_addr, in nfp_nfdk_tx()
451 txbuf->raw = 0; in nfp_nfdk_tx()
459 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfdk_tx()
460 r_vec->tx_errors++; in nfp_nfdk_tx()
461 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfdk_tx()
467 * nfp_nfdk_tx_complete() - Handled completed TX packets
473 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; in nfp_nfdk_tx_complete()
474 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_nfdk_tx_complete()
477 struct device *dev = dp->dev; in nfp_nfdk_tx_complete()
482 rd_p = tx_ring->rd_p; in nfp_nfdk_tx_complete()
483 if (tx_ring->wr_p == rd_p) in nfp_nfdk_tx_complete()
489 if (qcp_rd_p == tx_ring->qcp_rd_p) in nfp_nfdk_tx_complete()
492 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); in nfp_nfdk_tx_complete()
493 ktxbufs = tx_ring->ktxbufs; in nfp_nfdk_tx_complete()
502 skb = txbuf->skb; in nfp_nfdk_tx_complete()
514 dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE); in nfp_nfdk_tx_complete()
518 frag = skb_shinfo(skb)->frags; in nfp_nfdk_tx_complete()
519 fend = frag + skb_shinfo(skb)->nr_frags; in nfp_nfdk_tx_complete()
524 dma_unmap_page(dev, txbuf->dma_addr, in nfp_nfdk_tx_complete()
530 done_bytes += skb->len; in nfp_nfdk_tx_complete()
533 done_bytes += txbuf->real_len; in nfp_nfdk_tx_complete()
534 done_pkts += txbuf->pkt_cnt; in nfp_nfdk_tx_complete()
541 todo -= n_descs; in nfp_nfdk_tx_complete()
544 tx_ring->rd_p = rd_p; in nfp_nfdk_tx_complete()
545 tx_ring->qcp_rd_p = qcp_rd_p; in nfp_nfdk_tx_complete()
547 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfdk_tx_complete()
548 r_vec->tx_bytes += done_bytes; in nfp_nfdk_tx_complete()
549 r_vec->tx_pkts += done_pkts; in nfp_nfdk_tx_complete()
550 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfdk_tx_complete()
552 if (!dp->netdev) in nfp_nfdk_tx_complete()
555 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); in nfp_nfdk_tx_complete()
558 /* Make sure TX thread will see updated tx_ring->rd_p */ in nfp_nfdk_tx_complete()
565 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, in nfp_nfdk_tx_complete()
567 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); in nfp_nfdk_tx_complete()
576 if (!dp->xdp_prog) { in nfp_nfdk_napi_alloc_one()
577 frag = napi_alloc_frag(dp->fl_bufsz); in nfp_nfdk_napi_alloc_one()
590 if (dma_mapping_error(dp->dev, *dma_addr)) { in nfp_nfdk_napi_alloc_one()
591 nfp_net_free_frag(frag, dp->xdp_prog); in nfp_nfdk_napi_alloc_one()
592 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); in nfp_nfdk_napi_alloc_one()
600 * nfp_nfdk_rx_give_one() - Put mapped skb on the software and hardware rings
602 * @rx_ring: RX ring structure
613 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); in nfp_nfdk_rx_give_one()
618 rx_ring->rxbufs[wr_idx].frag = frag; in nfp_nfdk_rx_give_one()
619 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; in nfp_nfdk_rx_give_one()
622 rx_ring->rxds[wr_idx].fld.reserved = 0; in nfp_nfdk_rx_give_one()
623 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; in nfp_nfdk_rx_give_one()
624 nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld, in nfp_nfdk_rx_give_one()
625 dma_addr + dp->rx_dma_off); in nfp_nfdk_rx_give_one()
627 rx_ring->wr_p++; in nfp_nfdk_rx_give_one()
628 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) { in nfp_nfdk_rx_give_one()
633 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH); in nfp_nfdk_rx_give_one()
638 * nfp_nfdk_rx_ring_fill_freelist() - Give buffers from the ring to FW
640 * @rx_ring: RX ring to fill
647 for (i = 0; i < rx_ring->cnt - 1; i++) in nfp_nfdk_rx_ring_fill_freelist()
648 nfp_nfdk_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, in nfp_nfdk_rx_ring_fill_freelist()
649 rx_ring->rxbufs[i].dma_addr); in nfp_nfdk_rx_ring_fill_freelist()
653 * nfp_nfdk_rx_csum_has_errors() - group check if rxd has any csum errors
654 * @flags: RX descriptor flags field in CPU byte order
667 * nfp_nfdk_rx_csum() - set SKB checksum field based on RX descriptor flags
669 * @r_vec: per-ring structure
670 * @rxd: Pointer to RX descriptor
671 * @meta: Parsed metadata prepend
681 if (!(dp->netdev->features & NETIF_F_RXCSUM)) in nfp_nfdk_rx_csum()
684 if (meta->csum_type) { in nfp_nfdk_rx_csum()
685 skb->ip_summed = meta->csum_type; in nfp_nfdk_rx_csum()
686 skb->csum = meta->csum; in nfp_nfdk_rx_csum()
687 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfdk_rx_csum()
688 r_vec->hw_csum_rx_complete++; in nfp_nfdk_rx_csum()
689 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfdk_rx_csum()
693 if (nfp_nfdk_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { in nfp_nfdk_rx_csum()
694 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfdk_rx_csum()
695 r_vec->hw_csum_rx_error++; in nfp_nfdk_rx_csum()
696 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfdk_rx_csum()
704 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK || in nfp_nfdk_rx_csum()
705 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) { in nfp_nfdk_rx_csum()
707 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfdk_rx_csum()
708 r_vec->hw_csum_rx_ok++; in nfp_nfdk_rx_csum()
709 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfdk_rx_csum()
712 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK || in nfp_nfdk_rx_csum()
713 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) { in nfp_nfdk_rx_csum()
715 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfdk_rx_csum()
716 r_vec->hw_csum_rx_inner_ok++; in nfp_nfdk_rx_csum()
717 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfdk_rx_csum()
725 if (!(netdev->features & NETIF_F_RXHASH)) in nfp_nfdk_set_hash()
732 meta->hash_type = PKT_HASH_TYPE_L3; in nfp_nfdk_set_hash()
735 meta->hash_type = PKT_HASH_TYPE_L4; in nfp_nfdk_set_hash()
739 meta->hash = get_unaligned_be32(hash); in nfp_nfdk_set_hash()
761 meta->mark = get_unaligned_be32(data); in nfp_nfdk_parse_meta()
767 meta->vlan.stripped = true; in nfp_nfdk_parse_meta()
768 meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK, in nfp_nfdk_parse_meta()
770 meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK, in nfp_nfdk_parse_meta()
776 meta->portid = get_unaligned_be32(data); in nfp_nfdk_parse_meta()
780 meta->csum_type = CHECKSUM_COMPLETE; in nfp_nfdk_parse_meta()
781 meta->csum = in nfp_nfdk_parse_meta()
796 meta->ipsec_saidx = get_unaligned_be32(data) + 1; in nfp_nfdk_parse_meta()
815 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfdk_rx_drop()
816 r_vec->rx_drops++; in nfp_nfdk_rx_drop()
821 r_vec->rx_replace_buf_alloc_fail++; in nfp_nfdk_rx_drop()
822 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfdk_rx_drop()
827 if (skb && rxbuf && skb->head == rxbuf->frag) in nfp_nfdk_rx_drop()
828 page_ref_inc(virt_to_head_page(rxbuf->frag)); in nfp_nfdk_rx_drop()
830 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); in nfp_nfdk_rx_drop()
837 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; in nfp_nfdk_xdp_complete()
838 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_nfdk_xdp_complete()
846 if (qcp_rd_p == tx_ring->qcp_rd_p) in nfp_nfdk_xdp_complete()
849 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); in nfp_nfdk_xdp_complete()
854 rx_ring = r_vec->rx_ring; in nfp_nfdk_xdp_complete()
856 int idx = D_IDX(tx_ring, tx_ring->rd_p + done); in nfp_nfdk_xdp_complete()
860 txbuf = &tx_ring->ktxbufs[idx]; in nfp_nfdk_xdp_complete()
861 if (!txbuf->raw) in nfp_nfdk_xdp_complete()
864 if (NFDK_TX_BUF_INFO(txbuf->val) != NFDK_TX_BUF_INFO_SOP) { in nfp_nfdk_xdp_complete()
865 WARN_ONCE(1, "Unexpected TX buffer in XDP TX ring\n"); in nfp_nfdk_xdp_complete()
879 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfdk_xdp_complete()
881 r_vec->tx_pkts++; in nfp_nfdk_xdp_complete()
882 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfdk_xdp_complete()
884 todo -= step; in nfp_nfdk_xdp_complete()
888 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + done); in nfp_nfdk_xdp_complete()
889 tx_ring->rd_p += done; in nfp_nfdk_xdp_complete()
891 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, in nfp_nfdk_xdp_complete()
892 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", in nfp_nfdk_xdp_complete()
893 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); in nfp_nfdk_xdp_complete()
904 unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA; in nfp_nfdk_tx_xdp_buf()
927 nfp_nfdk_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, in nfp_nfdk_tx_xdp_buf()
935 if ((round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) != in nfp_nfdk_tx_xdp_buf()
936 round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT)) || in nfp_nfdk_tx_xdp_buf()
937 ((u32)tx_ring->data_pending + pkt_len > in nfp_nfdk_tx_xdp_buf()
939 unsigned int nop_slots = D_BLOCK_CPL(tx_ring->wr_p); in nfp_nfdk_tx_xdp_buf()
941 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfdk_tx_xdp_buf()
942 txd = &tx_ring->ktxds[wr_idx]; in nfp_nfdk_tx_xdp_buf()
946 tx_ring->data_pending = 0; in nfp_nfdk_tx_xdp_buf()
947 tx_ring->wr_p += nop_slots; in nfp_nfdk_tx_xdp_buf()
948 tx_ring->wr_ptr_add += nop_slots; in nfp_nfdk_tx_xdp_buf()
951 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfdk_tx_xdp_buf()
953 txbuf = &tx_ring->ktxbufs[wr_idx]; in nfp_nfdk_tx_xdp_buf()
955 txbuf[0].val = (unsigned long)rxbuf->frag | NFDK_TX_BUF_INFO_SOP; in nfp_nfdk_tx_xdp_buf()
956 txbuf[1].dma_addr = rxbuf->dma_addr; in nfp_nfdk_tx_xdp_buf()
959 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off, in nfp_nfdk_tx_xdp_buf()
963 txd = &tx_ring->ktxds[wr_idx]; in nfp_nfdk_tx_xdp_buf()
965 dma_addr = rxbuf->dma_addr + dma_off; in nfp_nfdk_tx_xdp_buf()
973 dma_len -= 1; in nfp_nfdk_tx_xdp_buf()
979 txd->dma_len_type = cpu_to_le16(dlen_type); in nfp_nfdk_tx_xdp_buf()
983 dma_len -= tmp_dlen; in nfp_nfdk_tx_xdp_buf()
988 dma_len -= 1; in nfp_nfdk_tx_xdp_buf()
990 txd->dma_len_type = cpu_to_le16(dlen_type); in nfp_nfdk_tx_xdp_buf()
994 dma_len -= dlen_type; in nfp_nfdk_tx_xdp_buf()
999 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP); in nfp_nfdk_tx_xdp_buf()
1001 /* Metadata desc */ in nfp_nfdk_tx_xdp_buf()
1002 txd->raw = 0; in nfp_nfdk_tx_xdp_buf()
1005 cnt = txd - tx_ring->ktxds - wr_idx; in nfp_nfdk_tx_xdp_buf()
1006 tx_ring->wr_p += cnt; in nfp_nfdk_tx_xdp_buf()
1007 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT) in nfp_nfdk_tx_xdp_buf()
1008 tx_ring->data_pending += pkt_len; in nfp_nfdk_tx_xdp_buf()
1010 tx_ring->data_pending = 0; in nfp_nfdk_tx_xdp_buf()
1012 tx_ring->wr_ptr_add += cnt; in nfp_nfdk_tx_xdp_buf()
1017 * nfp_nfdk_rx() - receive up to @budget packets on @rx_ring
1018 * @rx_ring: RX ring to receive from
1029 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; in nfp_nfdk_rx()
1030 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_nfdk_rx()
1037 struct xdp_buff xdp; in nfp_nfdk_rx() local
1040 xdp_prog = READ_ONCE(dp->xdp_prog); in nfp_nfdk_rx()
1041 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; in nfp_nfdk_rx()
1042 xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM, in nfp_nfdk_rx()
1043 &rx_ring->xdp_rxq); in nfp_nfdk_rx()
1044 tx_ring = r_vec->xdp_ring; in nfp_nfdk_rx()
1057 idx = D_IDX(rx_ring, rx_ring->rd_p); in nfp_nfdk_rx()
1059 rxd = &rx_ring->rxds[idx]; in nfp_nfdk_rx()
1060 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) in nfp_nfdk_rx()
1070 rx_ring->rd_p++; in nfp_nfdk_rx()
1073 rxbuf = &rx_ring->rxbufs[idx]; in nfp_nfdk_rx()
1075 * <-- [rx_offset] --> in nfp_nfdk_rx()
1076 * --------------------------------------------------------- in nfp_nfdk_rx()
1077 * | [XX] | metadata | packet | XXXX | in nfp_nfdk_rx()
1078 * --------------------------------------------------------- in nfp_nfdk_rx()
1079 * <---------------- data_len ---------------> in nfp_nfdk_rx()
1083 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the in nfp_nfdk_rx()
1086 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; in nfp_nfdk_rx()
1087 data_len = le16_to_cpu(rxd->rxd.data_len); in nfp_nfdk_rx()
1088 pkt_len = data_len - meta_len; in nfp_nfdk_rx()
1090 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; in nfp_nfdk_rx()
1091 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_nfdk_rx()
1094 pkt_off += dp->rx_offset; in nfp_nfdk_rx()
1095 meta_off = pkt_off - meta_len; in nfp_nfdk_rx()
1098 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfdk_rx()
1099 r_vec->rx_pkts++; in nfp_nfdk_rx()
1100 r_vec->rx_bytes += pkt_len; in nfp_nfdk_rx()
1101 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfdk_rx()
1104 (dp->rx_offset && meta_len > dp->rx_offset))) { in nfp_nfdk_rx()
1105 nn_dp_warn(dp, "oversized RX packet metadata %u\n", in nfp_nfdk_rx()
1111 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, in nfp_nfdk_rx()
1115 if (unlikely(nfp_nfdk_parse_meta(dp->netdev, &meta, in nfp_nfdk_rx()
1116 rxbuf->frag + meta_off, in nfp_nfdk_rx()
1117 rxbuf->frag + pkt_off, in nfp_nfdk_rx()
1119 nn_dp_warn(dp, "invalid RX packet metadata\n"); in nfp_nfdk_rx()
1127 void *orig_data = rxbuf->frag + pkt_off; in nfp_nfdk_rx()
1131 xdp_prepare_buff(&xdp, in nfp_nfdk_rx()
1132 rxbuf->frag + NFP_NET_RX_BUF_HEADROOM, in nfp_nfdk_rx()
1133 pkt_off - NFP_NET_RX_BUF_HEADROOM, in nfp_nfdk_rx()
1136 act = bpf_prog_run_xdp(xdp_prog, &xdp); in nfp_nfdk_rx()
1138 pkt_len = xdp.data_end - xdp.data; in nfp_nfdk_rx()
1139 pkt_off += xdp.data - orig_data; in nfp_nfdk_rx()
1143 meta_len_xdp = xdp.data - xdp.data_meta; in nfp_nfdk_rx()
1146 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM; in nfp_nfdk_rx()
1153 trace_xdp_exception(dp->netdev, in nfp_nfdk_rx()
1157 bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act); in nfp_nfdk_rx()
1160 trace_xdp_exception(dp->netdev, xdp_prog, act); in nfp_nfdk_rx()
1163 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, in nfp_nfdk_rx()
1164 rxbuf->dma_addr); in nfp_nfdk_rx()
1170 netdev = dp->netdev; in nfp_nfdk_rx()
1172 struct nfp_net *nn = netdev_priv(dp->netdev); in nfp_nfdk_rx()
1174 nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off, in nfp_nfdk_rx()
1176 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, in nfp_nfdk_rx()
1177 rxbuf->dma_addr); in nfp_nfdk_rx()
1182 nn = netdev_priv(dp->netdev); in nfp_nfdk_rx()
1183 netdev = nfp_app_dev_get(nn->app, meta.portid, in nfp_nfdk_rx()
1195 skb = napi_build_skb(rxbuf->frag, true_bufsz); in nfp_nfdk_rx()
1206 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); in nfp_nfdk_rx()
1213 skb->mark = meta.mark; in nfp_nfdk_rx()
1216 skb_record_rx_queue(skb, rx_ring->idx); in nfp_nfdk_rx()
1217 skb->protocol = eth_type_trans(skb, netdev); in nfp_nfdk_rx()
1237 napi_gro_receive(&rx_ring->r_vec->napi, skb); in nfp_nfdk_rx()
1239 skb->dev = netdev; in nfp_nfdk_rx()
1247 if (tx_ring->wr_ptr_add) in nfp_nfdk_rx()
1249 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) && in nfp_nfdk_rx()
1259 * nfp_nfdk_poll() - napi poll function
1271 if (r_vec->tx_ring) in nfp_nfdk_poll()
1272 nfp_nfdk_tx_complete(r_vec->tx_ring, budget); in nfp_nfdk_poll()
1273 if (r_vec->rx_ring) in nfp_nfdk_poll()
1274 pkts_polled = nfp_nfdk_rx(r_vec->rx_ring, budget); in nfp_nfdk_poll()
1278 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); in nfp_nfdk_poll()
1280 if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) { in nfp_nfdk_poll()
1286 start = u64_stats_fetch_begin(&r_vec->rx_sync); in nfp_nfdk_poll()
1287 pkts = r_vec->rx_pkts; in nfp_nfdk_poll()
1288 bytes = r_vec->rx_bytes; in nfp_nfdk_poll()
1289 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); in nfp_nfdk_poll()
1291 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample); in nfp_nfdk_poll()
1292 net_dim(&r_vec->rx_dim, dim_sample); in nfp_nfdk_poll()
1295 if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) { in nfp_nfdk_poll()
1301 start = u64_stats_fetch_begin(&r_vec->tx_sync); in nfp_nfdk_poll()
1302 pkts = r_vec->tx_pkts; in nfp_nfdk_poll()
1303 bytes = r_vec->tx_bytes; in nfp_nfdk_poll()
1304 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); in nfp_nfdk_poll()
1306 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample); in nfp_nfdk_poll()
1307 net_dim(&r_vec->tx_dim, dim_sample); in nfp_nfdk_poll()
1327 u64 metadata = 0; in nfp_nfdk_ctrl_tx_one() local
1330 dp = &r_vec->nfp_net->dp; in nfp_nfdk_ctrl_tx_one()
1331 tx_ring = r_vec->tx_ring; in nfp_nfdk_ctrl_tx_one()
1333 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) { in nfp_nfdk_ctrl_tx_one()
1340 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfdk_ctrl_tx_one()
1341 r_vec->tx_busy++; in nfp_nfdk_ctrl_tx_one()
1342 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfdk_ctrl_tx_one()
1344 __skb_queue_tail(&r_vec->queue, skb); in nfp_nfdk_ctrl_tx_one()
1346 __skb_queue_head(&r_vec->queue, skb); in nfp_nfdk_ctrl_tx_one()
1350 if (nfp_app_ctrl_has_meta(nn->app)) { in nfp_nfdk_ctrl_tx_one()
1355 metadata = NFDK_DESC_TX_CHAIN_META; in nfp_nfdk_ctrl_tx_one()
1367 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfdk_ctrl_tx_one()
1368 txd = &tx_ring->ktxds[wr_idx]; in nfp_nfdk_ctrl_tx_one()
1369 txbuf = &tx_ring->ktxbufs[wr_idx]; in nfp_nfdk_ctrl_tx_one()
1377 dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE); in nfp_nfdk_ctrl_tx_one()
1378 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_nfdk_ctrl_tx_one()
1381 txbuf->skb = skb; in nfp_nfdk_ctrl_tx_one()
1384 txbuf->dma_addr = dma_addr; in nfp_nfdk_ctrl_tx_one()
1387 dma_len -= 1; in nfp_nfdk_ctrl_tx_one()
1393 txd->dma_len_type = cpu_to_le16(dlen_type); in nfp_nfdk_ctrl_tx_one()
1397 dma_len -= tmp_dlen; in nfp_nfdk_ctrl_tx_one()
1402 dma_len -= 1; in nfp_nfdk_ctrl_tx_one()
1404 txd->dma_len_type = cpu_to_le16(dlen_type); in nfp_nfdk_ctrl_tx_one()
1408 dma_len -= dlen_type; in nfp_nfdk_ctrl_tx_one()
1413 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP); in nfp_nfdk_ctrl_tx_one()
1415 /* Metadata desc */ in nfp_nfdk_ctrl_tx_one()
1416 txd->raw = cpu_to_le64(metadata); in nfp_nfdk_ctrl_tx_one()
1419 cnt = txd - tx_ring->ktxds - wr_idx; in nfp_nfdk_ctrl_tx_one()
1421 round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT))) in nfp_nfdk_ctrl_tx_one()
1424 tx_ring->wr_p += cnt; in nfp_nfdk_ctrl_tx_one()
1425 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT) in nfp_nfdk_ctrl_tx_one()
1426 tx_ring->data_pending += skb->len; in nfp_nfdk_ctrl_tx_one()
1428 tx_ring->data_pending = 0; in nfp_nfdk_ctrl_tx_one()
1430 tx_ring->wr_ptr_add += cnt; in nfp_nfdk_ctrl_tx_one()
1438 txbuf--; in nfp_nfdk_ctrl_tx_one()
1439 dma_unmap_single(dp->dev, txbuf->dma_addr, in nfp_nfdk_ctrl_tx_one()
1441 txbuf->raw = 0; in nfp_nfdk_ctrl_tx_one()
1445 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfdk_ctrl_tx_one()
1446 r_vec->tx_errors++; in nfp_nfdk_ctrl_tx_one()
1447 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfdk_ctrl_tx_one()
1456 while ((skb = __skb_dequeue(&r_vec->queue))) in __nfp_ctrl_tx_queued()
1457 if (nfp_nfdk_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true)) in __nfp_ctrl_tx_queued()
1466 if (!nfp_app_ctrl_has_meta(nn->app)) in nfp_ctrl_meta_ok()
1491 idx = D_IDX(rx_ring, rx_ring->rd_p); in nfp_ctrl_rx_one()
1493 rxd = &rx_ring->rxds[idx]; in nfp_ctrl_rx_one()
1494 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) in nfp_ctrl_rx_one()
1502 rx_ring->rd_p++; in nfp_ctrl_rx_one()
1504 rxbuf = &rx_ring->rxbufs[idx]; in nfp_ctrl_rx_one()
1505 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; in nfp_ctrl_rx_one()
1506 data_len = le16_to_cpu(rxd->rxd.data_len); in nfp_ctrl_rx_one()
1507 pkt_len = data_len - meta_len; in nfp_ctrl_rx_one()
1509 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; in nfp_ctrl_rx_one()
1510 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_ctrl_rx_one()
1513 pkt_off += dp->rx_offset; in nfp_ctrl_rx_one()
1514 meta_off = pkt_off - meta_len; in nfp_ctrl_rx_one()
1517 u64_stats_update_begin(&r_vec->rx_sync); in nfp_ctrl_rx_one()
1518 r_vec->rx_pkts++; in nfp_ctrl_rx_one()
1519 r_vec->rx_bytes += pkt_len; in nfp_ctrl_rx_one()
1520 u64_stats_update_end(&r_vec->rx_sync); in nfp_ctrl_rx_one()
1522 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len); in nfp_ctrl_rx_one()
1524 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) { in nfp_ctrl_rx_one()
1525 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n", in nfp_ctrl_rx_one()
1531 skb = build_skb(rxbuf->frag, dp->fl_bufsz); in nfp_ctrl_rx_one()
1542 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); in nfp_ctrl_rx_one()
1549 nfp_app_ctrl_rx(nn->app, skb); in nfp_ctrl_rx_one()
1556 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; in nfp_ctrl_rx()
1557 struct nfp_net *nn = r_vec->nfp_net; in nfp_ctrl_rx()
1558 struct nfp_net_dp *dp = &nn->dp; in nfp_ctrl_rx()
1561 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--) in nfp_ctrl_rx()
1571 spin_lock(&r_vec->lock); in nfp_nfdk_ctrl_poll()
1572 nfp_nfdk_tx_complete(r_vec->tx_ring, 0); in nfp_nfdk_ctrl_poll()
1574 spin_unlock(&r_vec->lock); in nfp_nfdk_ctrl_poll()
1577 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); in nfp_nfdk_ctrl_poll()
1579 tasklet_schedule(&r_vec->tasklet); in nfp_nfdk_ctrl_poll()
1580 nn_dp_warn(&r_vec->nfp_net->dp, in nfp_nfdk_ctrl_poll()