1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
6
7 #include "lan966x_main.h"
8
lan966x_fdma_rx_dataptr_cb(struct fdma * fdma,int dcb,int db,u64 * dataptr)9 static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
10 u64 *dataptr)
11 {
12 struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13 struct lan966x_rx *rx = &lan966x->rx;
14 struct page *page;
15
16 page = page_pool_dev_alloc_pages(rx->page_pool);
17 if (unlikely(!page))
18 return -ENOMEM;
19
20 rx->page[dcb][db] = page;
21 *dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
22
23 return 0;
24 }
25
lan966x_fdma_tx_dataptr_cb(struct fdma * fdma,int dcb,int db,u64 * dataptr)26 static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
27 u64 *dataptr)
28 {
29 struct lan966x *lan966x = (struct lan966x *)fdma->priv;
30
31 *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
32
33 return 0;
34 }
35
lan966x_fdma_xdp_tx_dataptr_cb(struct fdma * fdma,int dcb,int db,u64 * dataptr)36 static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
37 u64 *dataptr)
38 {
39 struct lan966x *lan966x = (struct lan966x *)fdma->priv;
40
41 *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
42
43 return 0;
44 }
45
lan966x_fdma_channel_active(struct lan966x * lan966x)46 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
47 {
48 return lan_rd(lan966x, FDMA_CH_ACTIVE);
49 }
50
lan966x_fdma_rx_free_pages(struct lan966x_rx * rx)51 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
52 {
53 struct fdma *fdma = &rx->fdma;
54 int i, j;
55
56 for (i = 0; i < fdma->n_dcbs; ++i) {
57 for (j = 0; j < fdma->n_dbs; ++j)
58 page_pool_put_full_page(rx->page_pool,
59 rx->page[i][j], false);
60 }
61 }
62
lan966x_fdma_rx_free_page(struct lan966x_rx * rx)63 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
64 {
65 struct fdma *fdma = &rx->fdma;
66 struct page *page;
67
68 page = rx->page[fdma->dcb_index][fdma->db_index];
69 if (unlikely(!page))
70 return;
71
72 page_pool_recycle_direct(rx->page_pool, page);
73 }
74
lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx * rx)75 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
76 {
77 struct lan966x *lan966x = rx->lan966x;
78 struct page_pool_params pp_params = {
79 .order = rx->page_order,
80 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
81 .pool_size = rx->fdma.n_dcbs,
82 .nid = NUMA_NO_NODE,
83 .dev = lan966x->dev,
84 .dma_dir = DMA_FROM_DEVICE,
85 .offset = XDP_PACKET_HEADROOM,
86 .max_len = rx->max_mtu -
87 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
88 };
89
90 if (lan966x_xdp_present(lan966x))
91 pp_params.dma_dir = DMA_BIDIRECTIONAL;
92
93 rx->page_pool = page_pool_create(&pp_params);
94 if (unlikely(IS_ERR(rx->page_pool)))
95 return PTR_ERR(rx->page_pool);
96
97 for (int i = 0; i < lan966x->num_phys_ports; ++i) {
98 struct lan966x_port *port;
99
100 if (!lan966x->ports[i])
101 continue;
102
103 port = lan966x->ports[i];
104 xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
105 xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
106 rx->page_pool);
107 }
108
109 return PTR_ERR_OR_ZERO(rx->page_pool);
110 }
111
lan966x_fdma_rx_alloc(struct lan966x_rx * rx)112 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
113 {
114 struct lan966x *lan966x = rx->lan966x;
115 struct fdma *fdma = &rx->fdma;
116 int err;
117
118 if (lan966x_fdma_rx_alloc_page_pool(rx))
119 return PTR_ERR(rx->page_pool);
120
121 err = fdma_alloc_coherent(lan966x->dev, fdma);
122 if (err) {
123 page_pool_destroy(rx->page_pool);
124 return err;
125 }
126
127 fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
128 FDMA_DCB_STATUS_INTR);
129
130 return 0;
131 }
132
lan966x_fdma_rx_start(struct lan966x_rx * rx)133 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
134 {
135 struct lan966x *lan966x = rx->lan966x;
136 struct fdma *fdma = &rx->fdma;
137 u32 mask;
138
139 /* When activating a channel, first is required to write the first DCB
140 * address and then to activate it
141 */
142 lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
143 FDMA_DCB_LLP(fdma->channel_id));
144 lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
145 FDMA_DCB_LLP1(fdma->channel_id));
146
147 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
148 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
149 FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
150 FDMA_CH_CFG_CH_MEM_SET(1),
151 lan966x, FDMA_CH_CFG(fdma->channel_id));
152
153 /* Start fdma */
154 lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
155 FDMA_PORT_CTRL_XTR_STOP,
156 lan966x, FDMA_PORT_CTRL(0));
157
158 /* Enable interrupts */
159 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
160 mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
161 mask |= BIT(fdma->channel_id);
162 lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
163 FDMA_INTR_DB_ENA_INTR_DB_ENA,
164 lan966x, FDMA_INTR_DB_ENA);
165
166 /* Activate the channel */
167 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
168 FDMA_CH_ACTIVATE_CH_ACTIVATE,
169 lan966x, FDMA_CH_ACTIVATE);
170 }
171
lan966x_fdma_rx_disable(struct lan966x_rx * rx)172 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
173 {
174 struct lan966x *lan966x = rx->lan966x;
175 struct fdma *fdma = &rx->fdma;
176 u32 val;
177
178 /* Disable the channel */
179 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
180 FDMA_CH_DISABLE_CH_DISABLE,
181 lan966x, FDMA_CH_DISABLE);
182
183 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
184 val, !(val & BIT(fdma->channel_id)),
185 READL_SLEEP_US, READL_TIMEOUT_US);
186
187 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
188 FDMA_CH_DB_DISCARD_DB_DISCARD,
189 lan966x, FDMA_CH_DB_DISCARD);
190 }
191
lan966x_fdma_rx_reload(struct lan966x_rx * rx)192 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
193 {
194 struct lan966x *lan966x = rx->lan966x;
195
196 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
197 FDMA_CH_RELOAD_CH_RELOAD,
198 lan966x, FDMA_CH_RELOAD);
199 }
200
lan966x_fdma_tx_alloc(struct lan966x_tx * tx)201 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
202 {
203 struct lan966x *lan966x = tx->lan966x;
204 struct fdma *fdma = &tx->fdma;
205 int err;
206
207 tx->dcbs_buf = kzalloc_objs(struct lan966x_tx_dcb_buf, fdma->n_dcbs);
208 if (!tx->dcbs_buf)
209 return -ENOMEM;
210
211 err = fdma_alloc_coherent(lan966x->dev, fdma);
212 if (err)
213 goto out;
214
215 fdma_dcbs_init(fdma, 0, 0);
216
217 return 0;
218
219 out:
220 kfree(tx->dcbs_buf);
221 return -ENOMEM;
222 }
223
lan966x_fdma_tx_free(struct lan966x_tx * tx)224 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
225 {
226 struct lan966x *lan966x = tx->lan966x;
227
228 kfree(tx->dcbs_buf);
229 fdma_free_coherent(lan966x->dev, &tx->fdma);
230 }
231
lan966x_fdma_tx_activate(struct lan966x_tx * tx)232 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
233 {
234 struct lan966x *lan966x = tx->lan966x;
235 struct fdma *fdma = &tx->fdma;
236 u32 mask;
237
238 /* When activating a channel, first is required to write the first DCB
239 * address and then to activate it
240 */
241 lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
242 FDMA_DCB_LLP(fdma->channel_id));
243 lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
244 FDMA_DCB_LLP1(fdma->channel_id));
245
246 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
247 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
248 FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
249 FDMA_CH_CFG_CH_MEM_SET(1),
250 lan966x, FDMA_CH_CFG(fdma->channel_id));
251
252 /* Start fdma */
253 lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
254 FDMA_PORT_CTRL_INJ_STOP,
255 lan966x, FDMA_PORT_CTRL(0));
256
257 /* Enable interrupts */
258 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
259 mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
260 mask |= BIT(fdma->channel_id);
261 lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
262 FDMA_INTR_DB_ENA_INTR_DB_ENA,
263 lan966x, FDMA_INTR_DB_ENA);
264
265 /* Activate the channel */
266 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
267 FDMA_CH_ACTIVATE_CH_ACTIVATE,
268 lan966x, FDMA_CH_ACTIVATE);
269 }
270
lan966x_fdma_tx_disable(struct lan966x_tx * tx)271 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
272 {
273 struct lan966x *lan966x = tx->lan966x;
274 struct fdma *fdma = &tx->fdma;
275 u32 val;
276
277 /* Disable the channel */
278 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
279 FDMA_CH_DISABLE_CH_DISABLE,
280 lan966x, FDMA_CH_DISABLE);
281
282 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
283 val, !(val & BIT(fdma->channel_id)),
284 READL_SLEEP_US, READL_TIMEOUT_US);
285
286 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
287 FDMA_CH_DB_DISCARD_DB_DISCARD,
288 lan966x, FDMA_CH_DB_DISCARD);
289
290 tx->activated = false;
291 }
292
lan966x_fdma_tx_reload(struct lan966x_tx * tx)293 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
294 {
295 struct lan966x *lan966x = tx->lan966x;
296
297 /* Write the registers to reload the channel */
298 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
299 FDMA_CH_RELOAD_CH_RELOAD,
300 lan966x, FDMA_CH_RELOAD);
301 }
302
lan966x_fdma_wakeup_netdev(struct lan966x * lan966x)303 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
304 {
305 struct lan966x_port *port;
306 int i;
307
308 for (i = 0; i < lan966x->num_phys_ports; ++i) {
309 port = lan966x->ports[i];
310 if (!port)
311 continue;
312
313 if (netif_queue_stopped(port->dev))
314 netif_wake_queue(port->dev);
315 }
316 }
317
lan966x_fdma_stop_netdev(struct lan966x * lan966x)318 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
319 {
320 struct lan966x_port *port;
321 int i;
322
323 for (i = 0; i < lan966x->num_phys_ports; ++i) {
324 port = lan966x->ports[i];
325 if (!port)
326 continue;
327
328 netif_stop_queue(port->dev);
329 }
330 }
331
lan966x_fdma_tx_clear_buf(struct lan966x * lan966x,int weight)332 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
333 {
334 struct lan966x_tx *tx = &lan966x->tx;
335 struct lan966x_rx *rx = &lan966x->rx;
336 struct lan966x_tx_dcb_buf *dcb_buf;
337 struct fdma *fdma = &tx->fdma;
338 struct xdp_frame_bulk bq;
339 unsigned long flags;
340 bool clear = false;
341 struct fdma_db *db;
342 int i;
343
344 xdp_frame_bulk_init(&bq);
345
346 spin_lock_irqsave(&lan966x->tx_lock, flags);
347 for (i = 0; i < fdma->n_dcbs; ++i) {
348 dcb_buf = &tx->dcbs_buf[i];
349
350 if (!dcb_buf->used)
351 continue;
352
353 db = fdma_db_get(fdma, i, 0);
354 if (!fdma_db_is_done(db))
355 continue;
356
357 dcb_buf->dev->stats.tx_packets++;
358 dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
359
360 dcb_buf->used = false;
361 if (dcb_buf->use_skb) {
362 dma_unmap_single(lan966x->dev,
363 dcb_buf->dma_addr,
364 dcb_buf->len,
365 DMA_TO_DEVICE);
366
367 if (!dcb_buf->ptp)
368 napi_consume_skb(dcb_buf->data.skb, weight);
369 } else {
370 if (dcb_buf->xdp_ndo)
371 dma_unmap_single(lan966x->dev,
372 dcb_buf->dma_addr,
373 dcb_buf->len,
374 DMA_TO_DEVICE);
375
376 if (dcb_buf->xdp_ndo)
377 xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
378 else
379 page_pool_recycle_direct(rx->page_pool,
380 dcb_buf->data.page);
381 }
382
383 clear = true;
384 }
385
386 xdp_flush_frame_bulk(&bq);
387
388 if (clear)
389 lan966x_fdma_wakeup_netdev(lan966x);
390
391 spin_unlock_irqrestore(&lan966x->tx_lock, flags);
392 }
393
lan966x_fdma_rx_check_frame(struct lan966x_rx * rx,u64 * src_port)394 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
395 {
396 struct lan966x *lan966x = rx->lan966x;
397 struct fdma *fdma = &rx->fdma;
398 struct lan966x_port *port;
399 struct fdma_db *db;
400 struct page *page;
401
402 db = fdma_db_next_get(fdma);
403 page = rx->page[fdma->dcb_index][fdma->db_index];
404 if (unlikely(!page))
405 return FDMA_ERROR;
406
407 dma_sync_single_for_cpu(lan966x->dev,
408 (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
409 FDMA_DCB_STATUS_BLOCKL(db->status),
410 DMA_FROM_DEVICE);
411
412 lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
413 src_port);
414 if (WARN_ON(*src_port >= lan966x->num_phys_ports))
415 return FDMA_ERROR;
416
417 port = lan966x->ports[*src_port];
418 if (!lan966x_xdp_port_present(port))
419 return FDMA_PASS;
420
421 return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
422 }
423
lan966x_fdma_rx_get_frame(struct lan966x_rx * rx,u64 src_port)424 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
425 u64 src_port)
426 {
427 struct lan966x *lan966x = rx->lan966x;
428 struct fdma *fdma = &rx->fdma;
429 struct sk_buff *skb;
430 struct fdma_db *db;
431 struct page *page;
432 u64 timestamp;
433
434 /* Get the received frame and unmap it */
435 db = fdma_db_next_get(fdma);
436 page = rx->page[fdma->dcb_index][fdma->db_index];
437
438 skb = build_skb(page_address(page), fdma->db_size);
439 if (unlikely(!skb))
440 goto free_page;
441
442 skb_mark_for_recycle(skb);
443
444 skb_reserve(skb, XDP_PACKET_HEADROOM);
445 skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
446
447 lan966x_ifh_get_timestamp(skb->data, ×tamp);
448
449 skb->dev = lan966x->ports[src_port]->dev;
450 skb_pull(skb, IFH_LEN_BYTES);
451
452 if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
453 skb_trim(skb, skb->len - ETH_FCS_LEN);
454
455 lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
456 skb->protocol = eth_type_trans(skb, skb->dev);
457
458 if (lan966x->bridge_mask & BIT(src_port)) {
459 skb->offload_fwd_mark = 1;
460
461 skb_reset_network_header(skb);
462 if (!lan966x_hw_offload(lan966x, src_port, skb))
463 skb->offload_fwd_mark = 0;
464 }
465
466 skb->dev->stats.rx_bytes += skb->len;
467 skb->dev->stats.rx_packets++;
468
469 return skb;
470
471 free_page:
472 page_pool_recycle_direct(rx->page_pool, page);
473
474 return NULL;
475 }
476
lan966x_fdma_napi_poll(struct napi_struct * napi,int weight)477 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
478 {
479 struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
480 struct lan966x_rx *rx = &lan966x->rx;
481 int old_dcb, dcb_reload, counter = 0;
482 struct fdma *fdma = &rx->fdma;
483 bool redirect = false;
484 struct sk_buff *skb;
485 u64 src_port;
486
487 dcb_reload = fdma->dcb_index;
488
489 lan966x_fdma_tx_clear_buf(lan966x, weight);
490
491 /* Get all received skb */
492 while (counter < weight) {
493 if (!fdma_has_frames(fdma))
494 break;
495
496 counter++;
497
498 switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
499 case FDMA_PASS:
500 break;
501 case FDMA_ERROR:
502 lan966x_fdma_rx_free_page(rx);
503 fdma_dcb_advance(fdma);
504 goto allocate_new;
505 case FDMA_REDIRECT:
506 redirect = true;
507 fallthrough;
508 case FDMA_TX:
509 fdma_dcb_advance(fdma);
510 continue;
511 case FDMA_DROP:
512 lan966x_fdma_rx_free_page(rx);
513 fdma_dcb_advance(fdma);
514 continue;
515 }
516
517 skb = lan966x_fdma_rx_get_frame(rx, src_port);
518 fdma_dcb_advance(fdma);
519 if (!skb)
520 goto allocate_new;
521
522 napi_gro_receive(&lan966x->napi, skb);
523 }
524
525 allocate_new:
526 /* Allocate new pages and map them */
527 while (dcb_reload != fdma->dcb_index) {
528 old_dcb = dcb_reload;
529 dcb_reload++;
530 dcb_reload &= fdma->n_dcbs - 1;
531
532 fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
533 FDMA_DCB_STATUS_INTR);
534
535 lan966x_fdma_rx_reload(rx);
536 }
537
538 if (redirect)
539 xdp_do_flush();
540
541 if (counter < weight && napi_complete_done(napi, counter))
542 lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
543
544 return counter;
545 }
546
lan966x_fdma_irq_handler(int irq,void * args)547 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
548 {
549 struct lan966x *lan966x = args;
550 u32 db, err, err_type;
551
552 db = lan_rd(lan966x, FDMA_INTR_DB);
553 err = lan_rd(lan966x, FDMA_INTR_ERR);
554
555 if (db) {
556 lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
557 lan_wr(db, lan966x, FDMA_INTR_DB);
558
559 napi_schedule(&lan966x->napi);
560 }
561
562 if (err) {
563 err_type = lan_rd(lan966x, FDMA_ERRORS);
564
565 WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
566
567 lan_wr(err, lan966x, FDMA_INTR_ERR);
568 lan_wr(err_type, lan966x, FDMA_ERRORS);
569 }
570
571 return IRQ_HANDLED;
572 }
573
lan966x_fdma_get_next_dcb(struct lan966x_tx * tx)574 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
575 {
576 struct lan966x_tx_dcb_buf *dcb_buf;
577 struct fdma *fdma = &tx->fdma;
578 int i;
579
580 for (i = 0; i < fdma->n_dcbs; ++i) {
581 dcb_buf = &tx->dcbs_buf[i];
582 if (!dcb_buf->used &&
583 !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i]))
584 return i;
585 }
586
587 return -1;
588 }
589
lan966x_fdma_tx_start(struct lan966x_tx * tx)590 static void lan966x_fdma_tx_start(struct lan966x_tx *tx)
591 {
592 struct lan966x *lan966x = tx->lan966x;
593
594 if (likely(lan966x->tx.activated)) {
595 lan966x_fdma_tx_reload(tx);
596 } else {
597 /* Because it is first time, then just activate */
598 lan966x->tx.activated = true;
599 lan966x_fdma_tx_activate(tx);
600 }
601 }
602
lan966x_fdma_xmit_xdpf(struct lan966x_port * port,void * ptr,u32 len)603 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
604 {
605 struct lan966x *lan966x = port->lan966x;
606 struct lan966x_tx_dcb_buf *next_dcb_buf;
607 struct lan966x_tx *tx = &lan966x->tx;
608 struct xdp_frame *xdpf;
609 dma_addr_t dma_addr;
610 struct page *page;
611 int next_to_use;
612 __be32 *ifh;
613 int ret = 0;
614
615 spin_lock(&lan966x->tx_lock);
616
617 /* Get next index */
618 next_to_use = lan966x_fdma_get_next_dcb(tx);
619 if (next_to_use < 0) {
620 netif_stop_queue(port->dev);
621 ret = NETDEV_TX_BUSY;
622 goto out;
623 }
624
625 /* Get the next buffer */
626 next_dcb_buf = &tx->dcbs_buf[next_to_use];
627
628 /* Generate new IFH */
629 if (!len) {
630 xdpf = ptr;
631
632 if (xdpf->headroom < IFH_LEN_BYTES) {
633 ret = NETDEV_TX_OK;
634 goto out;
635 }
636
637 ifh = xdpf->data - IFH_LEN_BYTES;
638 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
639 lan966x_ifh_set_bypass(ifh, 1);
640 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
641
642 dma_addr = dma_map_single(lan966x->dev,
643 xdpf->data - IFH_LEN_BYTES,
644 xdpf->len + IFH_LEN_BYTES,
645 DMA_TO_DEVICE);
646 if (dma_mapping_error(lan966x->dev, dma_addr)) {
647 ret = NETDEV_TX_OK;
648 goto out;
649 }
650
651 next_dcb_buf->data.xdpf = xdpf;
652 next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
653 } else {
654 page = ptr;
655
656 ifh = page_address(page) + XDP_PACKET_HEADROOM;
657 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
658 lan966x_ifh_set_bypass(ifh, 1);
659 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
660
661 dma_addr = page_pool_get_dma_addr(page);
662 dma_sync_single_for_device(lan966x->dev,
663 dma_addr + XDP_PACKET_HEADROOM,
664 len + IFH_LEN_BYTES,
665 DMA_TO_DEVICE);
666
667 next_dcb_buf->data.page = page;
668 next_dcb_buf->len = len + IFH_LEN_BYTES;
669 }
670
671 /* Fill up the buffer */
672 next_dcb_buf->use_skb = false;
673 next_dcb_buf->xdp_ndo = !len;
674 next_dcb_buf->dma_addr = dma_addr;
675 next_dcb_buf->used = true;
676 next_dcb_buf->ptp = false;
677 next_dcb_buf->dev = port->dev;
678
679 __fdma_dcb_add(&tx->fdma,
680 next_to_use,
681 0,
682 FDMA_DCB_STATUS_INTR |
683 FDMA_DCB_STATUS_SOF |
684 FDMA_DCB_STATUS_EOF |
685 FDMA_DCB_STATUS_BLOCKO(0) |
686 FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
687 &fdma_nextptr_cb,
688 &lan966x_fdma_xdp_tx_dataptr_cb);
689
690 /* Start the transmission */
691 lan966x_fdma_tx_start(tx);
692
693 out:
694 spin_unlock(&lan966x->tx_lock);
695
696 return ret;
697 }
698
lan966x_fdma_xmit(struct sk_buff * skb,__be32 * ifh,struct net_device * dev)699 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
700 {
701 struct lan966x_port *port = netdev_priv(dev);
702 struct lan966x *lan966x = port->lan966x;
703 struct lan966x_tx_dcb_buf *next_dcb_buf;
704 struct lan966x_tx *tx = &lan966x->tx;
705 int needed_headroom;
706 int needed_tailroom;
707 dma_addr_t dma_addr;
708 int next_to_use;
709 int err;
710
711 /* Get next index */
712 next_to_use = lan966x_fdma_get_next_dcb(tx);
713 if (next_to_use < 0) {
714 netif_stop_queue(dev);
715 return NETDEV_TX_BUSY;
716 }
717
718 if (skb_put_padto(skb, ETH_ZLEN)) {
719 dev->stats.tx_dropped++;
720 return NETDEV_TX_OK;
721 }
722
723 /* skb processing */
724 needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
725 needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
726 if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
727 err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
728 GFP_ATOMIC);
729 if (unlikely(err)) {
730 dev->stats.tx_dropped++;
731 err = NETDEV_TX_OK;
732 goto release;
733 }
734 }
735
736 skb_tx_timestamp(skb);
737 skb_push(skb, IFH_LEN_BYTES);
738 memcpy(skb->data, ifh, IFH_LEN_BYTES);
739 skb_put(skb, 4);
740
741 dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
742 DMA_TO_DEVICE);
743 if (dma_mapping_error(lan966x->dev, dma_addr)) {
744 dev->stats.tx_dropped++;
745 err = NETDEV_TX_OK;
746 goto release;
747 }
748
749 /* Fill up the buffer */
750 next_dcb_buf = &tx->dcbs_buf[next_to_use];
751 next_dcb_buf->use_skb = true;
752 next_dcb_buf->data.skb = skb;
753 next_dcb_buf->xdp_ndo = false;
754 next_dcb_buf->len = skb->len;
755 next_dcb_buf->dma_addr = dma_addr;
756 next_dcb_buf->used = true;
757 next_dcb_buf->ptp = false;
758 next_dcb_buf->dev = dev;
759
760 fdma_dcb_add(&tx->fdma,
761 next_to_use,
762 0,
763 FDMA_DCB_STATUS_INTR |
764 FDMA_DCB_STATUS_SOF |
765 FDMA_DCB_STATUS_EOF |
766 FDMA_DCB_STATUS_BLOCKO(0) |
767 FDMA_DCB_STATUS_BLOCKL(skb->len));
768
769 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
770 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
771 next_dcb_buf->ptp = true;
772
773 /* Start the transmission */
774 lan966x_fdma_tx_start(tx);
775
776 return NETDEV_TX_OK;
777
778 release:
779 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
780 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
781 lan966x_ptp_txtstamp_release(port, skb);
782
783 dev_kfree_skb_any(skb);
784 return err;
785 }
786
lan966x_fdma_get_max_mtu(struct lan966x * lan966x)787 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
788 {
789 int max_mtu = 0;
790 int i;
791
792 for (i = 0; i < lan966x->num_phys_ports; ++i) {
793 struct lan966x_port *port;
794 int mtu;
795
796 port = lan966x->ports[i];
797 if (!port)
798 continue;
799
800 mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
801 if (mtu > max_mtu)
802 max_mtu = mtu;
803 }
804
805 return max_mtu;
806 }
807
lan966x_qsys_sw_status(struct lan966x * lan966x)808 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
809 {
810 return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
811 }
812
lan966x_fdma_reload(struct lan966x * lan966x,int new_mtu)813 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
814 {
815 struct page *(*old_pages)[FDMA_RX_DCB_MAX_DBS];
816 struct page_pool *page_pool;
817 struct fdma fdma_rx_old;
818 int err, i, j;
819
820 old_pages = kmemdup(lan966x->rx.page, sizeof(lan966x->rx.page),
821 GFP_KERNEL);
822 if (!old_pages)
823 return -ENOMEM;
824
825 /* Store these for later to free them */
826 memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
827 page_pool = lan966x->rx.page_pool;
828
829 napi_synchronize(&lan966x->napi);
830 napi_disable(&lan966x->napi);
831 lan966x_fdma_stop_netdev(lan966x);
832
833 lan966x_fdma_rx_disable(&lan966x->rx);
834 lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
835 lan966x->rx.max_mtu = new_mtu;
836 err = lan966x_fdma_rx_alloc(&lan966x->rx);
837 if (err)
838 goto restore;
839 lan966x_fdma_rx_start(&lan966x->rx);
840
841 for (i = 0; i < fdma_rx_old.n_dcbs; ++i)
842 for (j = 0; j < fdma_rx_old.n_dbs; ++j)
843 page_pool_put_full_page(page_pool,
844 old_pages[i][j], false);
845
846 fdma_free_coherent(lan966x->dev, &fdma_rx_old);
847
848 page_pool_destroy(page_pool);
849
850 lan966x_fdma_wakeup_netdev(lan966x);
851 napi_enable(&lan966x->napi);
852
853 kfree(old_pages);
854 return 0;
855 restore:
856 lan966x->rx.page_pool = page_pool;
857 memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
858 lan966x_fdma_rx_start(&lan966x->rx);
859
860 lan966x_fdma_wakeup_netdev(lan966x);
861 napi_enable(&lan966x->napi);
862
863 kfree(old_pages);
864 return err;
865 }
866
lan966x_fdma_get_max_frame(struct lan966x * lan966x)867 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
868 {
869 return lan966x_fdma_get_max_mtu(lan966x) +
870 IFH_LEN_BYTES +
871 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
872 VLAN_HLEN * 2 +
873 XDP_PACKET_HEADROOM;
874 }
875
__lan966x_fdma_reload(struct lan966x * lan966x,int max_mtu)876 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
877 {
878 int err;
879 u32 val;
880
881 /* Disable the CPU port */
882 lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
883 QSYS_SW_PORT_MODE_PORT_ENA,
884 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
885
886 /* Flush the CPU queues */
887 readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
888 val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
889 READL_SLEEP_US, READL_TIMEOUT_US);
890
891 /* Add a sleep in case there are frames between the queues and the CPU
892 * port
893 */
894 usleep_range(1000, 2000);
895
896 err = lan966x_fdma_reload(lan966x, max_mtu);
897
898 /* Enable back the CPU port */
899 lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
900 QSYS_SW_PORT_MODE_PORT_ENA,
901 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
902
903 return err;
904 }
905
lan966x_fdma_change_mtu(struct lan966x * lan966x)906 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
907 {
908 int max_mtu;
909
910 max_mtu = lan966x_fdma_get_max_frame(lan966x);
911 if (max_mtu == lan966x->rx.max_mtu)
912 return 0;
913
914 return __lan966x_fdma_reload(lan966x, max_mtu);
915 }
916
lan966x_fdma_reload_page_pool(struct lan966x * lan966x)917 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
918 {
919 int max_mtu;
920
921 max_mtu = lan966x_fdma_get_max_frame(lan966x);
922 return __lan966x_fdma_reload(lan966x, max_mtu);
923 }
924
lan966x_fdma_netdev_init(struct lan966x * lan966x,struct net_device * dev)925 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
926 {
927 if (lan966x->fdma_ndev)
928 return;
929
930 lan966x->fdma_ndev = dev;
931 netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
932 napi_enable(&lan966x->napi);
933 }
934
lan966x_fdma_netdev_deinit(struct lan966x * lan966x,struct net_device * dev)935 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
936 {
937 if (lan966x->fdma_ndev == dev) {
938 netif_napi_del(&lan966x->napi);
939 lan966x->fdma_ndev = NULL;
940 }
941 }
942
lan966x_fdma_init(struct lan966x * lan966x)943 int lan966x_fdma_init(struct lan966x *lan966x)
944 {
945 int err;
946
947 if (!lan966x->fdma)
948 return 0;
949
950 lan966x->rx.lan966x = lan966x;
951 lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
952 lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
953 lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
954 lan966x->rx.fdma.priv = lan966x;
955 lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
956 lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
957 lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
958 lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
959 lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
960 lan966x->tx.lan966x = lan966x;
961 lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
962 lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
963 lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
964 lan966x->tx.fdma.priv = lan966x;
965 lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
966 lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
967 lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
968 lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
969
970 err = lan966x_fdma_rx_alloc(&lan966x->rx);
971 if (err)
972 return err;
973
974 err = lan966x_fdma_tx_alloc(&lan966x->tx);
975 if (err) {
976 fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
977 page_pool_destroy(lan966x->rx.page_pool);
978 return err;
979 }
980
981 lan966x_fdma_rx_start(&lan966x->rx);
982
983 return 0;
984 }
985
lan966x_fdma_deinit(struct lan966x * lan966x)986 void lan966x_fdma_deinit(struct lan966x *lan966x)
987 {
988 if (!lan966x->fdma)
989 return;
990
991 lan966x_fdma_rx_disable(&lan966x->rx);
992 lan966x_fdma_tx_disable(&lan966x->tx);
993
994 napi_synchronize(&lan966x->napi);
995 napi_disable(&lan966x->napi);
996
997 lan966x_fdma_rx_free_pages(&lan966x->rx);
998 fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
999 page_pool_destroy(lan966x->rx.page_pool);
1000 lan966x_fdma_tx_free(&lan966x->tx);
1001 }
1002