Lines Matching +full:mac +full:- +full:clk +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0
7 * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
14 #include <linux/clk.h>
27 /* cpu port mac */
42 /* Remove Layer-2 header from packets from PMAC to DMA */
66 struct clk *clk; member
79 return __raw_readl(priv->pmac_reg + offset); in xrx200_pmac_r32()
84 __raw_writel(val, priv->pmac_reg + offset); in xrx200_pmac_w32()
103 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; in xrx200_flush_dma()
105 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) in xrx200_flush_dma()
108 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | in xrx200_flush_dma()
110 ch->dma.desc++; in xrx200_flush_dma()
111 ch->dma.desc %= LTQ_DESC_NUM; in xrx200_flush_dma()
119 napi_enable(&priv->chan_tx.napi); in xrx200_open()
120 ltq_dma_open(&priv->chan_tx.dma); in xrx200_open()
121 ltq_dma_enable_irq(&priv->chan_tx.dma); in xrx200_open()
123 napi_enable(&priv->chan_rx.napi); in xrx200_open()
124 ltq_dma_open(&priv->chan_rx.dma); in xrx200_open()
132 xrx200_flush_dma(&priv->chan_rx); in xrx200_open()
133 ltq_dma_enable_irq(&priv->chan_rx.dma); in xrx200_open()
146 napi_disable(&priv->chan_rx.napi); in xrx200_close()
147 ltq_dma_close(&priv->chan_rx.dma); in xrx200_close()
149 napi_disable(&priv->chan_tx.napi); in xrx200_close()
150 ltq_dma_close(&priv->chan_tx.dma); in xrx200_close()
159 ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, in xrx200_alloc_skb()
161 if (!ch->skb[ch->dma.desc]) { in xrx200_alloc_skb()
162 ret = -ENOMEM; in xrx200_alloc_skb()
166 ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev, in xrx200_alloc_skb()
167 ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN, in xrx200_alloc_skb()
169 if (unlikely(dma_mapping_error(ch->priv->dev, in xrx200_alloc_skb()
170 ch->dma.desc_base[ch->dma.desc].addr))) { in xrx200_alloc_skb()
171 dev_kfree_skb_any(ch->skb[ch->dma.desc]); in xrx200_alloc_skb()
172 ret = -ENOMEM; in xrx200_alloc_skb()
177 ch->dma.desc_base[ch->dma.desc].ctl = in xrx200_alloc_skb()
186 struct xrx200_priv *priv = ch->priv; in xrx200_hw_receive()
187 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; in xrx200_hw_receive()
188 struct sk_buff *skb = ch->skb[ch->dma.desc]; in xrx200_hw_receive()
189 int len = (desc->ctl & LTQ_DMA_SIZE_MASK); in xrx200_hw_receive()
190 struct net_device *net_dev = priv->net_dev; in xrx200_hw_receive()
195 ch->dma.desc++; in xrx200_hw_receive()
196 ch->dma.desc %= LTQ_DESC_NUM; in xrx200_hw_receive()
204 skb->protocol = eth_type_trans(skb, net_dev); in xrx200_hw_receive()
206 net_dev->stats.rx_packets++; in xrx200_hw_receive()
207 net_dev->stats.rx_bytes += len - ETH_FCS_LEN; in xrx200_hw_receive()
220 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; in xrx200_poll_rx()
222 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { in xrx200_poll_rx()
233 if (napi_complete_done(&ch->napi, rx)) in xrx200_poll_rx()
234 ltq_dma_enable_irq(&ch->dma); in xrx200_poll_rx()
244 struct net_device *net_dev = ch->priv->net_dev; in xrx200_tx_housekeeping()
250 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free]; in xrx200_tx_housekeeping()
252 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { in xrx200_tx_housekeeping()
253 struct sk_buff *skb = ch->skb[ch->tx_free]; in xrx200_tx_housekeeping()
256 bytes += skb->len; in xrx200_tx_housekeeping()
257 ch->skb[ch->tx_free] = NULL; in xrx200_tx_housekeeping()
259 memset(&ch->dma.desc_base[ch->tx_free], 0, in xrx200_tx_housekeeping()
261 ch->tx_free++; in xrx200_tx_housekeeping()
262 ch->tx_free %= LTQ_DESC_NUM; in xrx200_tx_housekeeping()
268 net_dev->stats.tx_packets += pkts; in xrx200_tx_housekeeping()
269 net_dev->stats.tx_bytes += bytes; in xrx200_tx_housekeeping()
270 netdev_completed_queue(ch->priv->net_dev, pkts, bytes); in xrx200_tx_housekeeping()
277 if (napi_complete_done(&ch->napi, pkts)) in xrx200_tx_housekeeping()
278 ltq_dma_enable_irq(&ch->dma); in xrx200_tx_housekeeping()
288 struct xrx200_chan *ch = &priv->chan_tx; in xrx200_start_xmit()
289 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; in xrx200_start_xmit()
294 skb->dev = net_dev; in xrx200_start_xmit()
296 net_dev->stats.tx_dropped++; in xrx200_start_xmit()
300 len = skb->len; in xrx200_start_xmit()
302 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { in xrx200_start_xmit()
303 netdev_err(net_dev, "tx ring full\n"); in xrx200_start_xmit()
308 ch->skb[ch->dma.desc] = skb; in xrx200_start_xmit()
310 mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); in xrx200_start_xmit()
311 if (unlikely(dma_mapping_error(priv->dev, mapping))) in xrx200_start_xmit()
317 desc->addr = mapping - byte_offset; in xrx200_start_xmit()
320 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | in xrx200_start_xmit()
322 ch->dma.desc++; in xrx200_start_xmit()
323 ch->dma.desc %= LTQ_DESC_NUM; in xrx200_start_xmit()
324 if (ch->dma.desc == ch->tx_free) in xrx200_start_xmit()
333 net_dev->stats.tx_dropped++; in xrx200_start_xmit()
334 net_dev->stats.tx_errors++; in xrx200_start_xmit()
350 if (napi_schedule_prep(&ch->napi)) { in xrx200_dma_irq()
351 __napi_schedule(&ch->napi); in xrx200_dma_irq()
352 ltq_dma_disable_irq(&ch->dma); in xrx200_dma_irq()
355 ltq_dma_ack_irq(&ch->dma); in xrx200_dma_irq()
362 struct xrx200_chan *ch_rx = &priv->chan_rx; in xrx200_dma_init()
363 struct xrx200_chan *ch_tx = &priv->chan_tx; in xrx200_dma_init()
369 ch_rx->dma.nr = XRX200_DMA_RX; in xrx200_dma_init()
370 ch_rx->dma.dev = priv->dev; in xrx200_dma_init()
371 ch_rx->priv = priv; in xrx200_dma_init()
373 ltq_dma_alloc_rx(&ch_rx->dma); in xrx200_dma_init()
374 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; in xrx200_dma_init()
375 ch_rx->dma.desc++) { in xrx200_dma_init()
380 ch_rx->dma.desc = 0; in xrx200_dma_init()
381 ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0, in xrx200_dma_init()
382 "xrx200_net_rx", &priv->chan_rx); in xrx200_dma_init()
384 dev_err(priv->dev, "failed to request RX irq %d\n", in xrx200_dma_init()
385 ch_rx->dma.irq); in xrx200_dma_init()
389 ch_tx->dma.nr = XRX200_DMA_TX; in xrx200_dma_init()
390 ch_tx->dma.dev = priv->dev; in xrx200_dma_init()
391 ch_tx->priv = priv; in xrx200_dma_init()
393 ltq_dma_alloc_tx(&ch_tx->dma); in xrx200_dma_init()
394 ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0, in xrx200_dma_init()
395 "xrx200_net_tx", &priv->chan_tx); in xrx200_dma_init()
397 dev_err(priv->dev, "failed to request TX irq %d\n", in xrx200_dma_init()
398 ch_tx->dma.irq); in xrx200_dma_init()
405 ltq_dma_free(&ch_tx->dma); in xrx200_dma_init()
410 if (priv->chan_rx.skb[i]) in xrx200_dma_init()
411 dev_kfree_skb_any(priv->chan_rx.skb[i]); in xrx200_dma_init()
415 ltq_dma_free(&ch_rx->dma); in xrx200_dma_init()
423 ltq_dma_free(&priv->chan_tx.dma); in xrx200_hw_cleanup()
424 ltq_dma_free(&priv->chan_rx.dma); in xrx200_hw_cleanup()
428 dev_kfree_skb_any(priv->chan_rx.skb[i]); in xrx200_hw_cleanup()
433 struct device *dev = &pdev->dev; in xrx200_probe()
434 struct device_node *np = dev->of_node; in xrx200_probe()
438 const u8 *mac; in xrx200_probe() local
444 return -ENOMEM; in xrx200_probe()
447 priv->net_dev = net_dev; in xrx200_probe()
448 priv->dev = dev; in xrx200_probe()
450 net_dev->netdev_ops = &xrx200_netdev_ops; in xrx200_probe()
452 net_dev->min_mtu = ETH_ZLEN; in xrx200_probe()
453 net_dev->max_mtu = XRX200_DMA_DATA_LEN; in xrx200_probe()
459 return -ENOENT; in xrx200_probe()
462 priv->pmac_reg = devm_ioremap_resource(dev, res); in xrx200_probe()
463 if (IS_ERR(priv->pmac_reg)) { in xrx200_probe()
465 return PTR_ERR(priv->pmac_reg); in xrx200_probe()
468 priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx"); in xrx200_probe()
469 if (priv->chan_rx.dma.irq < 0) in xrx200_probe()
470 return -ENOENT; in xrx200_probe()
471 priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx"); in xrx200_probe()
472 if (priv->chan_tx.dma.irq < 0) in xrx200_probe()
473 return -ENOENT; in xrx200_probe()
476 priv->clk = devm_clk_get(dev, NULL); in xrx200_probe()
477 if (IS_ERR(priv->clk)) { in xrx200_probe()
479 return PTR_ERR(priv->clk); in xrx200_probe()
482 mac = of_get_mac_address(np); in xrx200_probe()
483 if (!IS_ERR(mac)) in xrx200_probe()
484 ether_addr_copy(net_dev->dev_addr, mac); in xrx200_probe()
494 err = clk_prepare_enable(priv->clk); in xrx200_probe()
508 netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); in xrx200_probe()
509 netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); in xrx200_probe()
520 clk_disable_unprepare(priv->clk); in xrx200_probe()
531 struct net_device *net_dev = priv->net_dev; in xrx200_remove()
535 netif_napi_del(&priv->chan_tx.napi); in xrx200_remove()
536 netif_napi_del(&priv->chan_rx.napi); in xrx200_remove()
542 clk_disable_unprepare(priv->clk); in xrx200_remove()
551 { .compatible = "lantiq,xrx200-net" },
560 .name = "lantiq,xrx200-net",