Lines Matching full:xdp

29 				   struct xdp_buff *xdp)
39 if (xdp && xdp_buff_has_frags(xdp)) {
40 sinfo = xdp_get_shared_info_from_buff(xdp);
48 if (xdp)
49 tx_buf->page = virt_to_head_page(xdp->data);
97 struct xdp_buff *xdp)
101 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
184 struct xdp_buff *xdp)
199 xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
200 xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true);
204 struct xdp_buff *xdp)
209 if (!xdp || !xdp_buff_has_frags(xdp))
211 shinfo = xdp_get_shared_info_from_buff(xdp);
221 * true - packet consumed by XDP and new buffer is allocated.
225 struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
246 /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
247 orig_data = xdp->data;
249 act = bpf_prog_run_xdp(xdp_prog, xdp);
258 *len = xdp->data_end - xdp->data;
259 if (orig_data != xdp->data) {
260 offset = xdp->data - xdp->data_hard_start;
261 *data_ptr = xdp->data_hard_start + offset;
273 if (unlikely(xdp_buff_has_frags(xdp))) {
274 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
282 bnxt_xdp_buff_frags_free(rxr, xdp);
292 NEXT_RX(rxr->rx_prod), xdp);
304 bnxt_xdp_buff_frags_free(rxr, xdp);
309 if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
324 bnxt_xdp_buff_frags_free(rxr, xdp);
358 struct xdp_frame *xdp = frames[i];
363 mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
369 __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
395 netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
400 netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n");
404 netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
416 netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
446 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
451 switch (xdp->command) {
453 rc = bnxt_xdp_set(bp, xdp->prog);
464 struct page_pool *pool, struct xdp_buff *xdp)
466 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
474 xdp_buff_is_frag_pfmemalloc(xdp));