Lines Matching +full:rx +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0+
15 #define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv))
20 *dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr; in lan969x_fdma_tx_dataptr_cb()
28 struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx; in lan969x_fdma_rx_dataptr_cb() local
31 page = page_pool_dev_alloc_pages(rx->page_pool); in lan969x_fdma_rx_dataptr_cb()
33 return -ENOMEM; in lan969x_fdma_rx_dataptr_cb()
35 rx->page[dcb][db] = page; in lan969x_fdma_rx_dataptr_cb()
42 static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx) in lan969x_fdma_get_next_dcb() argument
44 struct fdma *fdma = &tx->fdma; in lan969x_fdma_get_next_dcb()
46 for (int i = 0; i < fdma->n_dcbs; ++i) in lan969x_fdma_get_next_dcb()
47 if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i])) in lan969x_fdma_get_next_dcb()
50 return -ENOSPC; in lan969x_fdma_get_next_dcb()
55 struct fdma *fdma = &sparx5->tx.fdma; in lan969x_fdma_tx_clear_buf()
60 spin_lock_irqsave(&sparx5->tx_lock, flags); in lan969x_fdma_tx_clear_buf()
62 for (i = 0; i < fdma->n_dcbs; ++i) { in lan969x_fdma_tx_clear_buf()
63 db = &sparx5->tx.dbs[i]; in lan969x_fdma_tx_clear_buf()
65 if (!db->used) in lan969x_fdma_tx_clear_buf()
71 db->dev->stats.tx_bytes += db->skb->len; in lan969x_fdma_tx_clear_buf()
72 db->dev->stats.tx_packets++; in lan969x_fdma_tx_clear_buf()
73 sparx5->tx.packets++; in lan969x_fdma_tx_clear_buf()
75 dma_unmap_single(sparx5->dev, in lan969x_fdma_tx_clear_buf()
76 db->dma_addr, in lan969x_fdma_tx_clear_buf()
77 db->skb->len, in lan969x_fdma_tx_clear_buf()
80 if (!db->ptp) in lan969x_fdma_tx_clear_buf()
81 napi_consume_skb(db->skb, weight); in lan969x_fdma_tx_clear_buf()
83 db->used = false; in lan969x_fdma_tx_clear_buf()
86 spin_unlock_irqrestore(&sparx5->tx_lock, flags); in lan969x_fdma_tx_clear_buf()
89 static void lan969x_fdma_free_pages(struct sparx5_rx *rx) in lan969x_fdma_free_pages() argument
91 struct fdma *fdma = &rx->fdma; in lan969x_fdma_free_pages()
93 for (int i = 0; i < fdma->n_dcbs; ++i) { in lan969x_fdma_free_pages()
94 for (int j = 0; j < fdma->n_dbs; ++j) in lan969x_fdma_free_pages()
95 page_pool_put_full_page(rx->page_pool, in lan969x_fdma_free_pages()
96 rx->page[i][j], false); in lan969x_fdma_free_pages()
101 struct sparx5_rx *rx) in lan969x_fdma_rx_get_frame() argument
103 const struct sparx5_consts *consts = sparx5->data->consts; in lan969x_fdma_rx_get_frame()
104 struct fdma *fdma = &rx->fdma; in lan969x_fdma_rx_get_frame()
111 db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index]; in lan969x_fdma_rx_get_frame()
112 page = rx->page[fdma->dcb_index][fdma->db_index]; in lan969x_fdma_rx_get_frame()
115 port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] : in lan969x_fdma_rx_get_frame()
120 skb = build_skb(page_address(page), fdma->db_size); in lan969x_fdma_rx_get_frame()
128 skb->dev = port->ndev; in lan969x_fdma_rx_get_frame()
130 if (likely(!(skb->dev->features & NETIF_F_RXFCS))) in lan969x_fdma_rx_get_frame()
131 skb_trim(skb, skb->len - ETH_FCS_LEN); in lan969x_fdma_rx_get_frame()
134 skb->protocol = eth_type_trans(skb, skb->dev); in lan969x_fdma_rx_get_frame()
136 if (test_bit(port->portno, sparx5->bridge_mask)) in lan969x_fdma_rx_get_frame()
137 skb->offload_fwd_mark = 1; in lan969x_fdma_rx_get_frame()
139 skb->dev->stats.rx_bytes += skb->len; in lan969x_fdma_rx_get_frame()
140 skb->dev->stats.rx_packets++; in lan969x_fdma_rx_get_frame()
145 page_pool_recycle_direct(rx->page_pool, page); in lan969x_fdma_rx_get_frame()
152 struct sparx5_rx *rx = &sparx5->rx; in lan969x_fdma_rx_alloc() local
153 struct fdma *fdma = &rx->fdma; in lan969x_fdma_rx_alloc()
159 .pool_size = fdma->n_dcbs * fdma->n_dbs, in lan969x_fdma_rx_alloc()
161 .dev = sparx5->dev, in lan969x_fdma_rx_alloc()
164 .max_len = fdma->db_size - in lan969x_fdma_rx_alloc()
168 rx->page_pool = page_pool_create(&pp_params); in lan969x_fdma_rx_alloc()
169 if (IS_ERR(rx->page_pool)) in lan969x_fdma_rx_alloc()
170 return PTR_ERR(rx->page_pool); in lan969x_fdma_rx_alloc()
172 err = fdma_alloc_coherent(sparx5->dev, fdma); in lan969x_fdma_rx_alloc()
177 FDMA_DCB_INFO_DATAL(fdma->db_size), in lan969x_fdma_rx_alloc()
185 struct sparx5_tx *tx = &sparx5->tx; in lan969x_fdma_tx_alloc() local
186 struct fdma *fdma = &tx->fdma; in lan969x_fdma_tx_alloc()
189 tx->dbs = kcalloc(fdma->n_dcbs, in lan969x_fdma_tx_alloc()
192 if (!tx->dbs) in lan969x_fdma_tx_alloc()
193 return -ENOMEM; in lan969x_fdma_tx_alloc()
195 err = fdma_alloc_coherent(sparx5->dev, fdma); in lan969x_fdma_tx_alloc()
197 kfree(tx->dbs); in lan969x_fdma_tx_alloc()
202 FDMA_DCB_INFO_DATAL(fdma->db_size), in lan969x_fdma_tx_alloc()
210 struct fdma *fdma = &sparx5->rx.fdma; in lan969x_fdma_rx_init()
212 fdma->channel_id = FDMA_XTR_CHANNEL; in lan969x_fdma_rx_init()
213 fdma->n_dcbs = FDMA_DCB_MAX; in lan969x_fdma_rx_init()
214 fdma->n_dbs = 1; in lan969x_fdma_rx_init()
215 fdma->priv = sparx5; in lan969x_fdma_rx_init()
216 fdma->size = fdma_get_size(fdma); in lan969x_fdma_rx_init()
217 fdma->db_size = PAGE_SIZE; in lan969x_fdma_rx_init()
218 fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb; in lan969x_fdma_rx_init()
219 fdma->ops.nextptr_cb = &fdma_nextptr_cb; in lan969x_fdma_rx_init()
222 for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) { in lan969x_fdma_rx_init()
223 struct sparx5_port *port = sparx5->ports[idx]; in lan969x_fdma_rx_init()
225 if (port && port->ndev) { in lan969x_fdma_rx_init()
226 sparx5->rx.ndev = port->ndev; in lan969x_fdma_rx_init()
234 struct fdma *fdma = &sparx5->tx.fdma; in lan969x_fdma_tx_init()
236 fdma->channel_id = FDMA_INJ_CHANNEL; in lan969x_fdma_tx_init()
237 fdma->n_dcbs = FDMA_DCB_MAX; in lan969x_fdma_tx_init()
238 fdma->n_dbs = 1; in lan969x_fdma_tx_init()
239 fdma->priv = sparx5; in lan969x_fdma_tx_init()
240 fdma->size = fdma_get_size(fdma); in lan969x_fdma_tx_init()
241 fdma->db_size = PAGE_SIZE; in lan969x_fdma_tx_init()
242 fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb; in lan969x_fdma_tx_init()
243 fdma->ops.nextptr_cb = &fdma_nextptr_cb; in lan969x_fdma_tx_init()
248 struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); in lan969x_fdma_napi_poll() local
249 struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); in lan969x_fdma_napi_poll()
251 struct fdma *fdma = &rx->fdma; in lan969x_fdma_napi_poll()
254 dcb_reload = fdma->dcb_index; in lan969x_fdma_napi_poll()
258 /* Process RX data */ in lan969x_fdma_napi_poll()
263 skb = lan969x_fdma_rx_get_frame(sparx5, rx); in lan969x_fdma_napi_poll()
267 napi_gro_receive(&rx->napi, skb); in lan969x_fdma_napi_poll()
280 while (dcb_reload != fdma->dcb_index) { in lan969x_fdma_napi_poll()
284 dcb_reload &= fdma->n_dcbs - 1; in lan969x_fdma_napi_poll()
288 FDMA_DCB_INFO_DATAL(fdma->db_size), in lan969x_fdma_napi_poll()
304 struct sparx5_tx *tx = &sparx5->tx; in lan969x_fdma_xmit() local
305 struct fdma *fdma = &tx->fdma; in lan969x_fdma_xmit()
309 next_dcb = lan969x_fdma_get_next_dcb(tx); in lan969x_fdma_xmit()
311 return -EBUSY; in lan969x_fdma_xmit()
313 needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0); in lan969x_fdma_xmit()
314 needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); in lan969x_fdma_xmit()
323 memcpy(skb->data, ifh, IFH_LEN * 4); in lan969x_fdma_xmit()
326 db_buf = &tx->dbs[next_dcb]; in lan969x_fdma_xmit()
327 db_buf->dma_addr = dma_map_single(sparx5->dev, in lan969x_fdma_xmit()
328 skb->data, in lan969x_fdma_xmit()
329 skb->len, in lan969x_fdma_xmit()
331 if (dma_mapping_error(sparx5->dev, db_buf->dma_addr)) in lan969x_fdma_xmit()
332 return -ENOMEM; in lan969x_fdma_xmit()
334 db_buf->dev = dev; in lan969x_fdma_xmit()
335 db_buf->skb = skb; in lan969x_fdma_xmit()
336 db_buf->ptp = false; in lan969x_fdma_xmit()
337 db_buf->used = true; in lan969x_fdma_xmit()
339 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && in lan969x_fdma_xmit()
340 SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) in lan969x_fdma_xmit()
341 db_buf->ptp = true; in lan969x_fdma_xmit()
346 FDMA_DCB_STATUS_BLOCKL(skb->len) | in lan969x_fdma_xmit()
359 struct sparx5_rx *rx = &sparx5->rx; in lan969x_fdma_init() local
366 err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64)); in lan969x_fdma_init()
368 dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask"); in lan969x_fdma_init()
374 dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n", in lan969x_fdma_init()
381 fdma_free_coherent(sparx5->dev, &rx->fdma); in lan969x_fdma_init()
382 dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n", in lan969x_fdma_init()
396 struct sparx5_rx *rx = &sparx5->rx; in lan969x_fdma_deinit() local
397 struct sparx5_tx *tx = &sparx5->tx; in lan969x_fdma_deinit() local
400 fdma_free_coherent(sparx5->dev, &tx->fdma); in lan969x_fdma_deinit()
401 fdma_free_coherent(sparx5->dev, &rx->fdma); in lan969x_fdma_deinit()
402 lan969x_fdma_free_pages(rx); in lan969x_fdma_deinit()
403 page_pool_destroy(rx->page_pool); in lan969x_fdma_deinit()