Lines Matching +full:data +full:- +full:mapping

1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2017 Broadcom Limited
25 dma_addr_t mapping, u32 len) in bnxt_xmit_bd() argument
32 prod = txr->tx_prod; in bnxt_xmit_bd()
33 tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd()
35 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd()
38 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_xmit_bd()
39 txbd->tx_bd_opaque = prod; in bnxt_xmit_bd()
40 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_xmit_bd()
43 txr->tx_prod = prod; in bnxt_xmit_bd()
48 dma_addr_t mapping, u32 len, u16 rx_prod) in __bnxt_xmit_xdp() argument
52 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); in __bnxt_xmit_xdp()
53 tx_buf->rx_prod = rx_prod; in __bnxt_xmit_xdp()
54 tx_buf->action = XDP_TX; in __bnxt_xmit_xdp()
59 dma_addr_t mapping, u32 len, in __bnxt_xmit_xdp_redirect() argument
64 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); in __bnxt_xmit_xdp_redirect()
65 tx_buf->action = XDP_REDIRECT; in __bnxt_xmit_xdp_redirect()
66 tx_buf->xdpf = xdpf; in __bnxt_xmit_xdp_redirect()
67 dma_unmap_addr_set(tx_buf, mapping, mapping); in __bnxt_xmit_xdp_redirect()
73 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; in bnxt_tx_int_xdp()
74 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in bnxt_tx_int_xdp()
77 u16 tx_cons = txr->tx_cons; in bnxt_tx_int_xdp()
82 tx_buf = &txr->tx_buf_ring[tx_cons]; in bnxt_tx_int_xdp()
84 if (tx_buf->action == XDP_REDIRECT) { in bnxt_tx_int_xdp()
85 struct pci_dev *pdev = bp->pdev; in bnxt_tx_int_xdp()
87 dma_unmap_single(&pdev->dev, in bnxt_tx_int_xdp()
88 dma_unmap_addr(tx_buf, mapping), in bnxt_tx_int_xdp()
91 xdp_return_frame(tx_buf->xdpf); in bnxt_tx_int_xdp()
92 tx_buf->action = 0; in bnxt_tx_int_xdp()
93 tx_buf->xdpf = NULL; in bnxt_tx_int_xdp()
94 } else if (tx_buf->action == XDP_TX) { in bnxt_tx_int_xdp()
100 txr->tx_cons = tx_cons; in bnxt_tx_int_xdp()
102 tx_buf = &txr->tx_buf_ring[last_tx_cons]; in bnxt_tx_int_xdp()
103 bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); in bnxt_tx_int_xdp()
108 * true - packet consumed by XDP and new buffer is allocated.
109 * false - packet should be passed to the stack.
114 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); in bnxt_rx_xdp()
119 dma_addr_t mapping; in bnxt_rx_xdp() local
128 pdev = bp->pdev; in bnxt_rx_xdp()
129 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_rx_xdp()
130 offset = bp->rx_offset; in bnxt_rx_xdp()
132 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_rx_xdp()
133 dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); in bnxt_rx_xdp()
135 txr = rxr->bnapi->tx_ring; in bnxt_rx_xdp()
136 xdp.data_hard_start = *data_ptr - offset; in bnxt_rx_xdp()
137 xdp.data = *data_ptr; in bnxt_rx_xdp()
140 xdp.rxq = &rxr->xdp_rxq; in bnxt_rx_xdp()
142 orig_data = xdp.data; in bnxt_rx_xdp()
152 if (tx_avail != bp->tx_ring_size) in bnxt_rx_xdp()
155 *len = xdp.data_end - xdp.data; in bnxt_rx_xdp()
156 if (orig_data != xdp.data) { in bnxt_rx_xdp()
157 offset = xdp.data - xdp.data_hard_start; in bnxt_rx_xdp()
166 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
172 dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, in bnxt_rx_xdp()
173 bp->rx_dir); in bnxt_rx_xdp()
174 __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, in bnxt_rx_xdp()
175 NEXT_RX(rxr->rx_prod)); in bnxt_rx_xdp()
183 dma_unmap_page_attrs(&pdev->dev, mapping, in bnxt_rx_xdp()
184 PAGE_SIZE, bp->rx_dir, in bnxt_rx_xdp()
188 if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { in bnxt_rx_xdp()
189 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
194 if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { in bnxt_rx_xdp()
195 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
196 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_rx_xdp()
206 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
219 struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); in bnxt_xdp_xmit()
220 struct pci_dev *pdev = bp->pdev; in bnxt_xdp_xmit()
222 dma_addr_t mapping; in bnxt_xdp_xmit() local
227 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || in bnxt_xdp_xmit()
228 !bp->tx_nr_rings_xdp || in bnxt_xdp_xmit()
230 return -EINVAL; in bnxt_xdp_xmit()
232 ring = smp_processor_id() % bp->tx_nr_rings_xdp; in bnxt_xdp_xmit()
233 txr = &bp->tx_ring[ring]; in bnxt_xdp_xmit()
239 !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) { in bnxt_xdp_xmit()
245 mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, in bnxt_xdp_xmit()
248 if (dma_mapping_error(&pdev->dev, mapping)) { in bnxt_xdp_xmit()
253 __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); in bnxt_xdp_xmit()
257 /* Sync BD data before updating doorbell */ in bnxt_xdp_xmit()
259 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); in bnxt_xdp_xmit()
262 return num_frames - drops; in bnxt_xdp_xmit()
268 struct net_device *dev = bp->dev; in bnxt_xdp_set()
272 if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { in bnxt_xdp_set()
274 bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); in bnxt_xdp_set()
275 return -EOPNOTSUPP; in bnxt_xdp_set()
277 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { in bnxt_xdp_set()
279 return -EOPNOTSUPP; in bnxt_xdp_set()
282 tx_xdp = bp->rx_nr_rings; in bnxt_xdp_set()
287 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, in bnxt_xdp_set()
296 old = xchg(&bp->xdp_prog, prog); in bnxt_xdp_set()
308 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; in bnxt_xdp_set()
309 bp->dev->hw_features |= NETIF_F_LRO; in bnxt_xdp_set()
312 bp->tx_nr_rings_xdp = tx_xdp; in bnxt_xdp_set()
313 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; in bnxt_xdp_set()
314 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); in bnxt_xdp_set()
329 switch (xdp->command) { in bnxt_xdp()
331 rc = bnxt_xdp_set(bp, xdp->prog); in bnxt_xdp()
334 rc = -EINVAL; in bnxt_xdp()