Home
last modified time | relevance | path

Searched full:xdp_tx (Results 1 – 25 of 117) sorted by relevance

12345

/linux/include/net/libeth/
H A Dxdp.h111 * for lockless sending (``XDP_TX``, .ndo_xdp_xmit()). Cap the maximum of these
282 * @LIBETH_XDP_TX_XSK: whether the function is called for ``XDP_TX`` for XSk
295 * @LIBETH_XDP_TX_LEN: only for ``XDP_TX``, [15:0] of ::len_fl is actual length
301 * @LIBETH_XDP_TX_FLAGS: only for ``XDP_TX``, [31:16] of ::len_fl is flags
318 * @data: frame start pointer for ``XDP_TX``
319 * @len_fl: ``XDP_TX``, combined flags [31:16] and len [15:0] field for speed
320 * @soff: ``XDP_TX``, offset from @data to the start of &skb_shared_info
321 * @frag: one (non-head) frag for ``XDP_TX``
324 * @xsk: ``XDP_TX`` for XSk, XDP buffer for any frag
325 * @len: frag length for XSk ``XDP_TX`` an
429 u32 *xdp_tx; global() member
[all...]
H A Dxsk.h15 /* ``XDP_TX`` bulking */
18 * libeth_xsk_tx_queue_head - internal helper for queueing XSk ``XDP_TX`` head
42 * libeth_xsk_tx_queue_frag - internal helper for queueing XSk ``XDP_TX`` frag
56 * libeth_xsk_tx_queue_bulk - internal helper for queueing XSk ``XDP_TX`` frame
102 * libeth_xsk_tx_fill_buf - internal helper to fill XSk ``XDP_TX`` &libeth_sqe
140 * libeth_xsk_tx_flush_bulk - wrapper to define flush of XSk ``XDP_TX`` bulk
146 * Use via LIBETH_XSK_DEFINE_FLUSH_TX() to define an XSk ``XDP_TX`` driver
380 * @bq: buffer bulk for ``XDP_TX`` queueing
411 * @bq: XDP Tx bulk to queue ``XDP_TX`` buffers
412 * @fl: driver ``XDP_TX`` bul
[all...]
H A Dtx.h15 * @LIBETH_SQE_EMPTY: unused/empty OR XDP_TX/XSk frame, no action required
48 * @sinfo: skb shared info of an XDP_TX frame
50 * @xsk: XSk Rx frame from XDP_TX action
96 * @xdp_tx: number of XDP-not-XSk frames processed
111 u32 xdp_tx; member
/linux/tools/testing/selftests/bpf/prog_tests/
H A Dxdp_adjust_tail.c34 ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval"); in test_xdp_adjust_tail_shrink()
74 ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval"); in test_xdp_adjust_tail_grow()
116 ASSERT_EQ(tattr.retval, XDP_TX, "case-64 retval"); in test_xdp_adjust_tail_grow2()
135 ASSERT_EQ(tattr.retval, XDP_TX, "case-128 retval"); in test_xdp_adjust_tail_grow2()
187 ASSERT_EQ(topts.retval, XDP_TX, "9Kb-10b retval"); in test_xdp_adjust_frags_tail_shrink()
198 ASSERT_EQ(topts.retval, XDP_TX, "9Kb-4Kb retval"); in test_xdp_adjust_frags_tail_shrink()
208 ASSERT_EQ(topts.retval, XDP_TX, "9Kb-9Kb retval"); in test_xdp_adjust_frags_tail_shrink()
251 ASSERT_EQ(topts.retval, XDP_TX, "9Kb+10b retval"); in test_xdp_adjust_frags_tail_grow_4k()
322 ASSERT_EQ(topts.retval, XDP_TX, "90Kb+10b retval"); in test_xdp_adjust_frags_tail_grow_64k()
H A Dtest_xdp_veth.c22 * (XDP_PASS) (XDP_TX) (XDP_PASS)
59 #include "xdp_tx.skel.h"
240 .remote_name = "xdp_tx", in xdp_veth_redirect()
256 struct xdp_tx *xdp_tx; in xdp_veth_redirect() local
264 xdp_tx = xdp_tx__open_and_load(); in xdp_veth_redirect()
265 if (!ASSERT_OK_PTR(xdp_tx, "xdp_tx__open_and_load")) in xdp_veth_redirect()
281 bpf_objs[1] = xdp_tx->obj; in xdp_veth_redirect()
314 xdp_tx__destroy(xdp_tx); in xdp_veth_redirect()
H A Dxdp_bonding.c7 * and verifies that XDP_TX program loaded on a bond device
8 * are correctly loaded onto the slave devices and XDP_TX'd
25 #include "xdp_tx.skel.h"
92 struct xdp_tx *xdp_tx; member
183 if (xdp_attach(skeletons, skeletons->xdp_tx->progs.xdp_tx, "bond2")) in bonding_setup()
652 skeletons.xdp_tx = xdp_tx__open_and_load(); in serial_test_xdp_bonding()
653 if (!ASSERT_OK_PTR(skeletons.xdp_tx, "xdp_tx__open_and_load")) in serial_test_xdp_bonding()
685 xdp_tx__destroy(skeletons.xdp_tx); in serial_test_xdp_bonding()
[all...]
H A Dxdp.c38 ASSERT_EQ(topts.retval, XDP_TX, "ipv4 test_run retval"); in test_xdp()
49 ASSERT_EQ(topts.retval, XDP_TX, "ipv6 test_run retval"); in test_xdp()
/linux/tools/testing/selftests/bpf/progs/
H A Dxdping_kern.c87 return XDP_TX; in icmp_check()
106 if (ret != XDP_TX) in xdping_client()
150 return XDP_TX; in xdping_client()
165 if (ret != XDP_TX) in xdping_server()
180 return XDP_TX; in xdping_server()
H A Dxdp_tx.c7 int xdp_tx(struct xdp_md *xdp) in xdp_tx() function
9 return XDP_TX; in xdp_tx()
H A Dxdp_synproxy_kern.c427 return XDP_TX; in tcp_dissect()
479 return XDP_TX; in tcp_lookup()
728 return XDP_TX; in syncookie_handle_syn()
756 if (ret != XDP_TX) in syncookie_part1()
760 if (ret != XDP_TX) in syncookie_part1()
784 return XDP_TX; in syncookie_part1()
830 if (ret != XDP_TX) in syncookie_xdp()
848 if (ret != XDP_TX) in syncookie_tc()
858 case XDP_TX: in syncookie_tc()
H A Dtest_xdp_adjust_tail_shrink.c49 return XDP_TX; in _xdp_adjust_tail_shrink()
H A Dtest_xdp_loop.c145 return XDP_TX; in handle_ipv4()
205 return XDP_TX; in handle_ipv6()
H A Dtest_xdp.c149 return XDP_TX; in handle_ipv4()
209 return XDP_TX; in handle_ipv6()
H A Dtest_xdp_adjust_tail_grow.c46 return XDP_TX; in _xdp_adjust_tail_grow()
/linux/tools/testing/selftests/bpf/
H A Dtest_xdp_features.sh74 ## XDP_TX
75 ./xdp_features -f XDP_TX -D ::ffff:$V1_IP4 -T ::ffff:$V0_IP4 v1 &
77 ip netns exec ${NS} ./xdp_features -t -f XDP_TX \
H A Dxdp_features.c75 "- XDP_TX\n";
98 } else if (!strcmp(arg, "XDP_TX")) { in get_xdp_feature()
100 env.feature.action = XDP_TX; in get_xdp_feature()
122 case XDP_TX: in get_xdp_feature_str()
123 return YELLOW("XDP_TX"); in get_xdp_feature_str()
301 case XDP_TX: in dut_attach_xdp_prog()
512 case XDP_TX: in tester_collect_detected_cap()
610 env.feature.action == XDP_TX) in tester_run()
/linux/drivers/net/ethernet/amazon/ena/
H A Dena_xdp.h95 case XDP_TX: in ena_xdp_execute()
107 /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ in ena_xdp_execute()
115 xdp_stat = &rx_ring->rx_stats.xdp_tx; in ena_xdp_execute()
/linux/drivers/net/ethernet/qlogic/qede/
H A Dqede_fp.c344 struct qede_tx_queue *xdp_tx; in qede_xdp_transmit() local
357 xdp_tx = edev->fp_array[i].xdp_tx; in qede_xdp_transmit()
359 spin_lock(&xdp_tx->xdp_tx_lock); in qede_xdp_transmit()
369 if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len, in qede_xdp_transmit()
376 xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl); in qede_xdp_transmit()
378 xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); in qede_xdp_transmit()
379 qede_update_tx_producer(xdp_tx); in qede_xdp_transmit()
382 spin_unlock(&xdp_tx->xdp_tx_lock); in qede_xdp_transmit()
1107 case XDP_TX in qede_rx_xdp()
[all...]
/linux/drivers/net/ethernet/pensando/ionic/
H A Dionic_stats.c33 IONIC_LIF_STAT_DESC(xdp_tx),
162 IONIC_RX_STAT_DESC(xdp_tx),
204 stats->xdp_tx += rxstats->xdp_tx; in ionic_add_lif_rxq_stats()
/linux/drivers/net/ethernet/intel/libeth/
H A Dxsk.c12 /* ``XDP_TX`` bulking */
87 * @bq: Tx bulk for queueing on ``XDP_TX``
107 case XDP_TX: in __libeth_xsk_run_prog_slow()
H A Dxdp.c82 /* ``XDP_TX`` bulking */
130 libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX); in libeth_xdp_tx_exception()
331 * completion of ``XDP_TX`` buffers and allows to free them in same bulks
/linux/samples/bpf/
H A Dxdp_tx_iptunnel_kern.c9 * and then XDP_TX it out.
149 return XDP_TX; in handle_ipv4()
212 return XDP_TX; in handle_ipv6()
/linux/Documentation/bpf/
H A Dbpf_prog_run.rst84 though it arrived on that ifindex, and if it returns ``XDP_TX``, the packet
87 ``XDP_TX`` is actually turned into the same action as an ``XDP_REDIRECT`` to
/linux/drivers/net/ethernet/microsoft/mana/
H A Dmana_ethtool.c197 u64 xdp_tx; in mana_get_ethtool_stats() local
233 xdp_tx = rx_stats->xdp_tx; in mana_get_ethtool_stats()
240 data[i++] = xdp_tx; in mana_get_ethtool_stats()
/linux/drivers/net/
H A Dveth.c50 u64 xdp_tx; member
104 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
590 rq->stats.vs.xdp_tx += sent; in veth_xdp_flush_bq()
662 case XDP_TX: in veth_xdp_rcv_one()
671 stats->xdp_tx++; in veth_xdp_rcv_one()
822 case XDP_TX: in veth_xdp_rcv_skb()
831 stats->xdp_tx++; in veth_xdp_rcv_skb()
995 if (stats.xdp_tx > 0) in veth_poll()

12345