Lines Matching +full:xdp +full:- +full:rx +full:- +full:metadata

1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
21 struct xsk_buff_pool *pool = r_vec->xsk_pool; in nfp_nfd3_xsk_tx_xdp()
29 xsk_buff_raw_dma_sync_for_device(pool, xrxbuf->dma_addr + pkt_off, in nfp_nfd3_xsk_tx_xdp()
32 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfd3_xsk_tx_xdp()
34 txbuf = &tx_ring->txbufs[wr_idx]; in nfp_nfd3_xsk_tx_xdp()
35 txbuf->xdp = xrxbuf->xdp; in nfp_nfd3_xsk_tx_xdp()
36 txbuf->real_len = pkt_len; in nfp_nfd3_xsk_tx_xdp()
37 txbuf->is_xsk_tx = true; in nfp_nfd3_xsk_tx_xdp()
40 txd = &tx_ring->txds[wr_idx]; in nfp_nfd3_xsk_tx_xdp()
41 txd->offset_eop = NFD3_DESC_TX_EOP; in nfp_nfd3_xsk_tx_xdp()
42 txd->dma_len = cpu_to_le16(pkt_len); in nfp_nfd3_xsk_tx_xdp()
43 nfp_desc_set_dma_addr_40b(txd, xrxbuf->dma_addr + pkt_off); in nfp_nfd3_xsk_tx_xdp()
44 txd->data_len = cpu_to_le16(pkt_len); in nfp_nfd3_xsk_tx_xdp()
46 txd->flags = 0; in nfp_nfd3_xsk_tx_xdp()
47 txd->mss = 0; in nfp_nfd3_xsk_tx_xdp()
48 txd->lso_hdrlen = 0; in nfp_nfd3_xsk_tx_xdp()
50 tx_ring->wr_ptr_add++; in nfp_nfd3_xsk_tx_xdp()
51 tx_ring->wr_p++; in nfp_nfd3_xsk_tx_xdp()
64 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; in nfp_nfd3_xsk_rx_skb()
65 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_nfd3_xsk_rx_skb()
69 if (likely(!meta->portid)) { in nfp_nfd3_xsk_rx_skb()
70 netdev = dp->netdev; in nfp_nfd3_xsk_rx_skb()
72 struct nfp_net *nn = netdev_priv(dp->netdev); in nfp_nfd3_xsk_rx_skb()
74 netdev = nfp_app_dev_get(nn->app, meta->portid, NULL); in nfp_nfd3_xsk_rx_skb()
82 skb = napi_alloc_skb(&r_vec->napi, pkt_len); in nfp_nfd3_xsk_rx_skb()
87 skb_put_data(skb, xrxbuf->xdp->data, pkt_len); in nfp_nfd3_xsk_rx_skb()
89 skb->mark = meta->mark; in nfp_nfd3_xsk_rx_skb()
90 skb_set_hash(skb, meta->hash, meta->hash_type); in nfp_nfd3_xsk_rx_skb()
92 skb_record_rx_queue(skb, rx_ring->idx); in nfp_nfd3_xsk_rx_skb()
93 skb->protocol = eth_type_trans(skb, netdev); in nfp_nfd3_xsk_rx_skb()
105 xrxbuf->xdp->data - xrxbuf->xdp->data_meta); in nfp_nfd3_xsk_rx_skb()
107 napi_gro_receive(&rx_ring->r_vec->napi, skb); in nfp_nfd3_xsk_rx_skb()
118 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; in nfp_nfd3_xsk_rx()
119 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_nfd3_xsk_rx()
125 xdp_prog = READ_ONCE(dp->xdp_prog); in nfp_nfd3_xsk_rx()
126 tx_ring = r_vec->xdp_ring; in nfp_nfd3_xsk_rx()
135 idx = D_IDX(rx_ring, rx_ring->rd_p); in nfp_nfd3_xsk_rx()
137 rxd = &rx_ring->rxds[idx]; in nfp_nfd3_xsk_rx()
138 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) in nfp_nfd3_xsk_rx()
141 rx_ring->rd_p++; in nfp_nfd3_xsk_rx()
144 xrxbuf = &rx_ring->xsk_rxbufs[idx]; in nfp_nfd3_xsk_rx()
147 if (rx_ring->rd_p >= rx_ring->wr_p) { in nfp_nfd3_xsk_rx()
148 nn_dp_warn(dp, "Starved of RX buffers\n"); in nfp_nfd3_xsk_rx()
160 /* Only supporting AF_XDP with dynamic metadata so buffer layout in nfp_nfd3_xsk_rx()
163 * --------------------------------------------------------- in nfp_nfd3_xsk_rx()
164 * | off | metadata | packet | XXXX | in nfp_nfd3_xsk_rx()
165 * --------------------------------------------------------- in nfp_nfd3_xsk_rx()
167 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; in nfp_nfd3_xsk_rx()
168 data_len = le16_to_cpu(rxd->rxd.data_len); in nfp_nfd3_xsk_rx()
169 pkt_len = data_len - meta_len; in nfp_nfd3_xsk_rx()
172 nn_dp_warn(dp, "Oversized RX packet metadata %u\n", in nfp_nfd3_xsk_rx()
179 u64_stats_update_begin(&r_vec->rx_sync); in nfp_nfd3_xsk_rx()
180 r_vec->rx_pkts++; in nfp_nfd3_xsk_rx()
181 r_vec->rx_bytes += pkt_len; in nfp_nfd3_xsk_rx()
182 u64_stats_update_end(&r_vec->rx_sync); in nfp_nfd3_xsk_rx()
184 xrxbuf->xdp->data += meta_len; in nfp_nfd3_xsk_rx()
185 xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len; in nfp_nfd3_xsk_rx()
186 xdp_set_data_meta_invalid(xrxbuf->xdp); in nfp_nfd3_xsk_rx()
187 xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool); in nfp_nfd3_xsk_rx()
188 net_prefetch(xrxbuf->xdp->data); in nfp_nfd3_xsk_rx()
191 if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta, in nfp_nfd3_xsk_rx()
192 xrxbuf->xdp->data - in nfp_nfd3_xsk_rx()
194 xrxbuf->xdp->data, in nfp_nfd3_xsk_rx()
196 nn_dp_warn(dp, "Invalid RX packet metadata\n"); in nfp_nfd3_xsk_rx()
202 struct nfp_net *nn = netdev_priv(dp->netdev); in nfp_nfd3_xsk_rx()
212 nfp_app_ctrl_rx_raw(nn->app, xrxbuf->xdp->data, in nfp_nfd3_xsk_rx()
219 act = bpf_prog_run_xdp(xdp_prog, xrxbuf->xdp); in nfp_nfd3_xsk_rx()
221 pkt_len = xrxbuf->xdp->data_end - xrxbuf->xdp->data; in nfp_nfd3_xsk_rx()
222 pkt_off = xrxbuf->xdp->data - xrxbuf->xdp->data_hard_start; in nfp_nfd3_xsk_rx()
237 if (xdp_do_redirect(dp->netdev, xrxbuf->xdp, xdp_prog)) { in nfp_nfd3_xsk_rx()
245 bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act); in nfp_nfd3_xsk_rx()
248 trace_xdp_exception(dp->netdev, xdp_prog, act); in nfp_nfd3_xsk_rx()
256 nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring); in nfp_nfd3_xsk_rx()
261 if (tx_ring->wr_ptr_add) in nfp_nfd3_xsk_rx()
269 xsk_buff_free(txbuf->xdp); in nfp_nfd3_xsk_tx_free()
271 txbuf->dma_addr = 0; in nfp_nfd3_xsk_tx_free()
272 txbuf->xdp = NULL; in nfp_nfd3_xsk_tx_free()
277 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; in nfp_nfd3_xsk_complete()
283 if (tx_ring->wr_p == tx_ring->rd_p) in nfp_nfd3_xsk_complete()
287 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); in nfp_nfd3_xsk_complete()
289 if (qcp_rd_p == tx_ring->qcp_rd_p) in nfp_nfd3_xsk_complete()
292 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); in nfp_nfd3_xsk_complete()
297 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo); in nfp_nfd3_xsk_complete()
300 while (todo--) { in nfp_nfd3_xsk_complete()
303 idx = D_IDX(tx_ring, tx_ring->rd_p); in nfp_nfd3_xsk_complete()
304 tx_ring->rd_p++; in nfp_nfd3_xsk_complete()
306 txbuf = &tx_ring->txbufs[idx]; in nfp_nfd3_xsk_complete()
307 if (unlikely(!txbuf->real_len)) in nfp_nfd3_xsk_complete()
310 done_bytes += txbuf->real_len; in nfp_nfd3_xsk_complete()
311 txbuf->real_len = 0; in nfp_nfd3_xsk_complete()
313 if (txbuf->is_xsk_tx) { in nfp_nfd3_xsk_complete()
319 u64_stats_update_begin(&r_vec->tx_sync); in nfp_nfd3_xsk_complete()
320 r_vec->tx_bytes += done_bytes; in nfp_nfd3_xsk_complete()
321 r_vec->tx_pkts += done_pkts; in nfp_nfd3_xsk_complete()
322 u64_stats_update_end(&r_vec->tx_sync); in nfp_nfd3_xsk_complete()
324 xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused); in nfp_nfd3_xsk_complete()
326 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, in nfp_nfd3_xsk_complete()
327 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", in nfp_nfd3_xsk_complete()
328 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); in nfp_nfd3_xsk_complete()
335 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; in nfp_nfd3_xsk_tx()
342 xsk_pool = r_vec->xsk_pool; in nfp_nfd3_xsk_tx()
352 wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i); in nfp_nfd3_xsk_tx()
353 prefetchw(&tx_ring->txds[wr_idx]); in nfp_nfd3_xsk_tx()
360 wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i); in nfp_nfd3_xsk_tx()
362 tx_ring->txbufs[wr_idx].real_len = desc[i].len; in nfp_nfd3_xsk_tx()
363 tx_ring->txbufs[wr_idx].is_xsk_tx = false; in nfp_nfd3_xsk_tx()
366 txd = &tx_ring->txds[wr_idx]; in nfp_nfd3_xsk_tx()
369 txd->offset_eop = NFD3_DESC_TX_EOP; in nfp_nfd3_xsk_tx()
370 txd->dma_len = cpu_to_le16(desc[i].len); in nfp_nfd3_xsk_tx()
371 txd->data_len = cpu_to_le16(desc[i].len); in nfp_nfd3_xsk_tx()
374 tx_ring->wr_p += got; in nfp_nfd3_xsk_tx()
384 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, pkts); in nfp_nfd3_xsk_tx()
393 pkts_polled = nfp_nfd3_xsk_rx(r_vec->rx_ring, budget, &skbs); in nfp_nfd3_xsk_poll()
396 if (r_vec->tx_ring) in nfp_nfd3_xsk_poll()
397 nfp_nfd3_tx_complete(r_vec->tx_ring, budget); in nfp_nfd3_xsk_poll()
399 if (!nfp_nfd3_xsk_complete(r_vec->xdp_ring)) in nfp_nfd3_xsk_poll()
402 nfp_nfd3_xsk_tx(r_vec->xdp_ring); in nfp_nfd3_xsk_poll()
405 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); in nfp_nfd3_xsk_poll()