xref: /linux/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip lan969x Switch driver
3  *
4  * Copyright (c) 2025 Microchip Technology Inc. and its subsidiaries.
5  */
6 #include <net/page_pool/helpers.h>
7 
8 #include "../sparx5_main.h"
9 #include "../sparx5_main_regs.h"
10 #include "../sparx5_port.h"
11 
12 #include "fdma_api.h"
13 #include "lan969x.h"
14 
15 #define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv))
16 
lan969x_fdma_tx_dataptr_cb(struct fdma * fdma,int dcb,int db,u64 * dataptr)17 static int lan969x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
18 				      u64 *dataptr)
19 {
20 	*dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr;
21 
22 	return 0;
23 }
24 
lan969x_fdma_rx_dataptr_cb(struct fdma * fdma,int dcb,int db,u64 * dataptr)25 static int lan969x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
26 				      u64 *dataptr)
27 {
28 	struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx;
29 	struct page *page;
30 
31 	page = page_pool_dev_alloc_pages(rx->page_pool);
32 	if (unlikely(!page))
33 		return -ENOMEM;
34 
35 	rx->page[dcb][db] = page;
36 
37 	*dataptr = page_pool_get_dma_addr(page);
38 
39 	return 0;
40 }
41 
lan969x_fdma_get_next_dcb(struct sparx5_tx * tx)42 static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx)
43 {
44 	struct fdma *fdma = &tx->fdma;
45 
46 	for (int i = 0; i < fdma->n_dcbs; ++i)
47 		if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i]))
48 			return i;
49 
50 	return -ENOSPC;
51 }
52 
lan969x_fdma_tx_clear_buf(struct sparx5 * sparx5,int weight)53 static void lan969x_fdma_tx_clear_buf(struct sparx5 *sparx5, int weight)
54 {
55 	struct fdma *fdma = &sparx5->tx.fdma;
56 	struct sparx5_tx_buf *db;
57 	unsigned long flags;
58 	int i;
59 
60 	spin_lock_irqsave(&sparx5->tx_lock, flags);
61 
62 	for (i = 0; i < fdma->n_dcbs; ++i) {
63 		db = &sparx5->tx.dbs[i];
64 
65 		if (!db->used)
66 			continue;
67 
68 		if (!fdma_db_is_done(fdma_db_get(fdma, i, 0)))
69 			continue;
70 
71 		db->dev->stats.tx_bytes += db->skb->len;
72 		db->dev->stats.tx_packets++;
73 		sparx5->tx.packets++;
74 
75 		dma_unmap_single(sparx5->dev,
76 				 db->dma_addr,
77 				 db->skb->len,
78 				 DMA_TO_DEVICE);
79 
80 		if (!db->ptp)
81 			napi_consume_skb(db->skb, weight);
82 
83 		db->used = false;
84 	}
85 
86 	spin_unlock_irqrestore(&sparx5->tx_lock, flags);
87 }
88 
lan969x_fdma_free_pages(struct sparx5_rx * rx)89 static void lan969x_fdma_free_pages(struct sparx5_rx *rx)
90 {
91 	struct fdma *fdma = &rx->fdma;
92 
93 	for (int i = 0; i < fdma->n_dcbs; ++i) {
94 		for (int j = 0; j < fdma->n_dbs; ++j)
95 			page_pool_put_full_page(rx->page_pool,
96 						rx->page[i][j], false);
97 	}
98 }
99 
lan969x_fdma_rx_get_frame(struct sparx5 * sparx5,struct sparx5_rx * rx)100 static struct sk_buff *lan969x_fdma_rx_get_frame(struct sparx5 *sparx5,
101 						 struct sparx5_rx *rx)
102 {
103 	const struct sparx5_consts *consts = sparx5->data->consts;
104 	struct fdma *fdma = &rx->fdma;
105 	struct sparx5_port *port;
106 	struct frame_info fi;
107 	struct sk_buff *skb;
108 	struct fdma_db *db;
109 	struct page *page;
110 
111 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
112 	page = rx->page[fdma->dcb_index][fdma->db_index];
113 
114 	sparx5_ifh_parse(sparx5, page_address(page), &fi);
115 	port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] :
116 					       NULL;
117 	if (WARN_ON(!port))
118 		goto free_page;
119 
120 	skb = build_skb(page_address(page), fdma->db_size);
121 	if (unlikely(!skb))
122 		goto free_page;
123 
124 	skb_mark_for_recycle(skb);
125 	skb_put(skb, fdma_db_len_get(db));
126 	skb_pull(skb, IFH_LEN * sizeof(u32));
127 
128 	skb->dev = port->ndev;
129 
130 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
131 		skb_trim(skb, skb->len - ETH_FCS_LEN);
132 
133 	sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
134 	skb->protocol = eth_type_trans(skb, skb->dev);
135 
136 	if (test_bit(port->portno, sparx5->bridge_mask))
137 		skb->offload_fwd_mark = 1;
138 
139 	skb->dev->stats.rx_bytes += skb->len;
140 	skb->dev->stats.rx_packets++;
141 
142 	return skb;
143 
144 free_page:
145 	page_pool_recycle_direct(rx->page_pool, page);
146 
147 	return NULL;
148 }
149 
lan969x_fdma_rx_alloc(struct sparx5 * sparx5)150 static int lan969x_fdma_rx_alloc(struct sparx5 *sparx5)
151 {
152 	struct sparx5_rx *rx = &sparx5->rx;
153 	struct fdma *fdma = &rx->fdma;
154 	int err;
155 
156 	struct page_pool_params pp_params = {
157 		.order = 0,
158 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
159 		.pool_size = fdma->n_dcbs * fdma->n_dbs,
160 		.nid = NUMA_NO_NODE,
161 		.dev = sparx5->dev,
162 		.dma_dir = DMA_FROM_DEVICE,
163 		.offset = 0,
164 		.max_len = fdma->db_size -
165 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
166 	};
167 
168 	rx->page_pool = page_pool_create(&pp_params);
169 	if (IS_ERR(rx->page_pool))
170 		return PTR_ERR(rx->page_pool);
171 
172 	err = fdma_alloc_coherent(sparx5->dev, fdma);
173 	if (err)
174 		return err;
175 
176 	fdma_dcbs_init(fdma,
177 		       FDMA_DCB_INFO_DATAL(fdma->db_size),
178 		       FDMA_DCB_STATUS_INTR);
179 
180 	return 0;
181 }
182 
lan969x_fdma_tx_alloc(struct sparx5 * sparx5)183 static int lan969x_fdma_tx_alloc(struct sparx5 *sparx5)
184 {
185 	struct sparx5_tx *tx = &sparx5->tx;
186 	struct fdma *fdma = &tx->fdma;
187 	int err;
188 
189 	tx->dbs = kzalloc_objs(struct sparx5_tx_buf, fdma->n_dcbs);
190 	if (!tx->dbs)
191 		return -ENOMEM;
192 
193 	err = fdma_alloc_coherent(sparx5->dev, fdma);
194 	if (err) {
195 		kfree(tx->dbs);
196 		return err;
197 	}
198 
199 	fdma_dcbs_init(fdma,
200 		       FDMA_DCB_INFO_DATAL(fdma->db_size),
201 		       FDMA_DCB_STATUS_DONE);
202 
203 	return 0;
204 }
205 
lan969x_fdma_rx_init(struct sparx5 * sparx5)206 static void lan969x_fdma_rx_init(struct sparx5 *sparx5)
207 {
208 	struct fdma *fdma = &sparx5->rx.fdma;
209 
210 	fdma->channel_id = FDMA_XTR_CHANNEL;
211 	fdma->n_dcbs = FDMA_DCB_MAX;
212 	fdma->n_dbs = 1;
213 	fdma->priv = sparx5;
214 	fdma->size = fdma_get_size(fdma);
215 	fdma->db_size = PAGE_SIZE;
216 	fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb;
217 	fdma->ops.nextptr_cb = &fdma_nextptr_cb;
218 
219 	/* Fetch a netdev for SKB and NAPI use, any will do */
220 	for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
221 		struct sparx5_port *port = sparx5->ports[idx];
222 
223 		if (port && port->ndev) {
224 			sparx5->rx.ndev = port->ndev;
225 			break;
226 		}
227 	}
228 }
229 
lan969x_fdma_tx_init(struct sparx5 * sparx5)230 static void lan969x_fdma_tx_init(struct sparx5 *sparx5)
231 {
232 	struct fdma *fdma = &sparx5->tx.fdma;
233 
234 	fdma->channel_id = FDMA_INJ_CHANNEL;
235 	fdma->n_dcbs = FDMA_DCB_MAX;
236 	fdma->n_dbs = 1;
237 	fdma->priv = sparx5;
238 	fdma->size = fdma_get_size(fdma);
239 	fdma->db_size = PAGE_SIZE;
240 	fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb;
241 	fdma->ops.nextptr_cb = &fdma_nextptr_cb;
242 }
243 
lan969x_fdma_napi_poll(struct napi_struct * napi,int weight)244 int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight)
245 {
246 	struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
247 	struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
248 	int old_dcb, dcb_reload, counter = 0;
249 	struct fdma *fdma = &rx->fdma;
250 	struct sk_buff *skb;
251 
252 	dcb_reload = fdma->dcb_index;
253 
254 	lan969x_fdma_tx_clear_buf(sparx5, weight);
255 
256 	/* Process RX data */
257 	while (counter < weight) {
258 		if (!fdma_has_frames(fdma))
259 			break;
260 
261 		skb = lan969x_fdma_rx_get_frame(sparx5, rx);
262 		if (!skb)
263 			break;
264 
265 		napi_gro_receive(&rx->napi, skb);
266 
267 		fdma_db_advance(fdma);
268 		counter++;
269 		/* Check if the DCB can be reused */
270 		if (fdma_dcb_is_reusable(fdma))
271 			continue;
272 
273 		fdma_db_reset(fdma);
274 		fdma_dcb_advance(fdma);
275 	}
276 
277 	/* Allocate new pages and map them */
278 	while (dcb_reload != fdma->dcb_index) {
279 		old_dcb = dcb_reload;
280 		dcb_reload++;
281 		 /* n_dcbs must be a power of 2 */
282 		dcb_reload &= fdma->n_dcbs - 1;
283 
284 		fdma_dcb_add(fdma,
285 			     old_dcb,
286 			     FDMA_DCB_INFO_DATAL(fdma->db_size),
287 			     FDMA_DCB_STATUS_INTR);
288 
289 		sparx5_fdma_reload(sparx5, fdma);
290 	}
291 
292 	if (counter < weight && napi_complete_done(napi, counter))
293 		spx5_wr(0xff, sparx5, FDMA_INTR_DB_ENA);
294 
295 	return counter;
296 }
297 
lan969x_fdma_xmit(struct sparx5 * sparx5,u32 * ifh,struct sk_buff * skb,struct net_device * dev)298 int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
299 		      struct net_device *dev)
300 {
301 	int next_dcb, needed_headroom, needed_tailroom, err;
302 	struct sparx5_tx *tx = &sparx5->tx;
303 	struct fdma *fdma = &tx->fdma;
304 	struct sparx5_tx_buf *db_buf;
305 	u64 status;
306 
307 	next_dcb = lan969x_fdma_get_next_dcb(tx);
308 	if (next_dcb < 0)
309 		return -EBUSY;
310 
311 	needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0);
312 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
313 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
314 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
315 				       GFP_ATOMIC);
316 		if (unlikely(err))
317 			return err;
318 	}
319 
320 	skb_push(skb, IFH_LEN * 4);
321 	memcpy(skb->data, ifh, IFH_LEN * 4);
322 	skb_put(skb, ETH_FCS_LEN);
323 
324 	db_buf = &tx->dbs[next_dcb];
325 	db_buf->dma_addr = dma_map_single(sparx5->dev,
326 					  skb->data,
327 					  skb->len,
328 					  DMA_TO_DEVICE);
329 	if (dma_mapping_error(sparx5->dev, db_buf->dma_addr))
330 		return -ENOMEM;
331 
332 	db_buf->dev = dev;
333 	db_buf->skb = skb;
334 	db_buf->ptp = false;
335 	db_buf->used = true;
336 
337 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
338 	    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
339 		db_buf->ptp = true;
340 
341 	status = FDMA_DCB_STATUS_SOF |
342 		 FDMA_DCB_STATUS_EOF |
343 		 FDMA_DCB_STATUS_BLOCKO(0) |
344 		 FDMA_DCB_STATUS_BLOCKL(skb->len) |
345 		 FDMA_DCB_STATUS_INTR;
346 
347 	fdma_dcb_advance(fdma);
348 	fdma_dcb_add(fdma, next_dcb, 0, status);
349 
350 	sparx5_fdma_reload(sparx5, fdma);
351 
352 	return NETDEV_TX_OK;
353 }
354 
lan969x_fdma_init(struct sparx5 * sparx5)355 int lan969x_fdma_init(struct sparx5 *sparx5)
356 {
357 	struct sparx5_rx *rx = &sparx5->rx;
358 	int err;
359 
360 	lan969x_fdma_rx_init(sparx5);
361 	lan969x_fdma_tx_init(sparx5);
362 	sparx5_fdma_injection_mode(sparx5);
363 
364 	err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64));
365 	if (err) {
366 		dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask");
367 		return err;
368 	}
369 
370 	err = lan969x_fdma_rx_alloc(sparx5);
371 	if (err) {
372 		dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n",
373 			err);
374 		return err;
375 	}
376 
377 	err = lan969x_fdma_tx_alloc(sparx5);
378 	if (err) {
379 		fdma_free_coherent(sparx5->dev, &rx->fdma);
380 		dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n",
381 			err);
382 		return err;
383 	}
384 
385 	/* Reset FDMA state */
386 	spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
387 	spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
388 
389 	return err;
390 }
391 
lan969x_fdma_deinit(struct sparx5 * sparx5)392 int lan969x_fdma_deinit(struct sparx5 *sparx5)
393 {
394 	struct sparx5_rx *rx = &sparx5->rx;
395 	struct sparx5_tx *tx = &sparx5->tx;
396 
397 	sparx5_fdma_stop(sparx5);
398 	fdma_free_coherent(sparx5->dev, &tx->fdma);
399 	fdma_free_coherent(sparx5->dev, &rx->fdma);
400 	lan969x_fdma_free_pages(rx);
401 	page_pool_destroy(rx->page_pool);
402 
403 	return 0;
404 }
405