Lines Matching +full:fu540 +full:- +full:c000 +full:- +full:gpio

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2004-2006 Atmel Corporation
10 #include <linux/clk-provider.h>
20 #include <linux/gpio.h>
21 #include <linux/gpio/consumer.h>
25 #include <linux/dma-mapping.h>
40 /* This structure is only used for MACB on SiFive FU540 devices */
54 * (bp)->rx_ring_size)
60 * (bp)->tx_ring_size)
63 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
74 …MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN -
88 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
128 switch (bp->hw_dma_cap) { in macb_dma_desc_get_size()
153 switch (bp->hw_dma_cap) { in macb_adj_dma_desc_idx()
179 return index & (bp->tx_ring_size - 1); in macb_tx_ring_wrap()
185 index = macb_tx_ring_wrap(queue->bp, index); in macb_tx_desc()
186 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_tx_desc()
187 return &queue->tx_ring[index]; in macb_tx_desc()
193 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; in macb_tx_skb()
200 offset = macb_tx_ring_wrap(queue->bp, index) * in macb_tx_dma()
201 macb_dma_desc_get_size(queue->bp); in macb_tx_dma()
203 return queue->tx_ring_dma + offset; in macb_tx_dma()
208 return index & (bp->rx_ring_size - 1); in macb_rx_ring_wrap()
213 index = macb_rx_ring_wrap(queue->bp, index); in macb_rx_desc()
214 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_rx_desc()
215 return &queue->rx_ring[index]; in macb_rx_desc()
220 return queue->rx_buffers + queue->bp->rx_buffer_size * in macb_rx_buffer()
221 macb_rx_ring_wrap(queue->bp, index); in macb_rx_buffer()
227 return __raw_readl(bp->regs + offset); in hw_readl_native()
232 __raw_writel(value, bp->regs + offset); in hw_writel_native()
237 return readl_relaxed(bp->regs + offset); in hw_readl()
242 writel_relaxed(value, bp->regs + offset); in hw_writel()
279 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); in macb_set_hwaddr()
281 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); in macb_set_hwaddr()
313 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); in macb_get_hwaddr()
318 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); in macb_get_hwaddr()
319 eth_hw_addr_random(bp->dev); in macb_get_hwaddr()
332 struct macb *bp = bus->priv; in macb_mdio_read()
335 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_read()
337 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_read()
377 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_read()
378 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_read()
386 struct macb *bp = bus->priv; in macb_mdio_write()
389 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_write()
391 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_write()
431 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_write()
432 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_write()
442 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_init_buffers()
443 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); in macb_init_buffers()
445 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
447 upper_32_bits(queue->rx_ring_dma)); in macb_init_buffers()
449 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); in macb_init_buffers()
451 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
453 upper_32_bits(queue->tx_ring_dma)); in macb_init_buffers()
459 * macb_set_tx_clk() - Set a clock to a new frequency
492 ferr = abs(rate_rounded - rate); in macb_set_tx_clk()
506 struct net_device *ndev = to_net_dev(config->dev); in macb_validate()
511 if (state->interface != PHY_INTERFACE_MODE_NA && in macb_validate()
512 state->interface != PHY_INTERFACE_MODE_MII && in macb_validate()
513 state->interface != PHY_INTERFACE_MODE_RMII && in macb_validate()
514 state->interface != PHY_INTERFACE_MODE_GMII && in macb_validate()
515 state->interface != PHY_INTERFACE_MODE_SGMII && in macb_validate()
516 !phy_interface_mode_is_rgmii(state->interface)) { in macb_validate()
522 (state->interface == PHY_INTERFACE_MODE_GMII || in macb_validate()
523 phy_interface_mode_is_rgmii(state->interface))) { in macb_validate()
537 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && in macb_validate()
538 (state->interface == PHY_INTERFACE_MODE_NA || in macb_validate()
539 state->interface == PHY_INTERFACE_MODE_GMII || in macb_validate()
540 state->interface == PHY_INTERFACE_MODE_SGMII || in macb_validate()
541 phy_interface_mode_is_rgmii(state->interface))) { in macb_validate()
545 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) in macb_validate()
550 bitmap_and(state->advertising, state->advertising, mask, in macb_validate()
557 state->link = 0; in macb_mac_pcs_get_state()
568 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_config()
573 spin_lock_irqsave(&bp->lock, flags); in macb_mac_config()
577 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { in macb_mac_config()
578 if (state->interface == PHY_INTERFACE_MODE_RMII) in macb_mac_config()
583 if (state->interface == PHY_INTERFACE_MODE_SGMII) in macb_mac_config()
591 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_config()
597 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_link_down()
603 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) in macb_mac_link_down()
604 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_down()
606 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_down()
621 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_link_up()
628 spin_lock_irqsave(&bp->lock, flags); in macb_mac_link_up()
640 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { in macb_mac_link_up()
652 macb_set_tx_clk(bp->tx_clk, speed, ndev); in macb_mac_link_up()
657 bp->macbgem_ops.mog_init_rings(bp); in macb_mac_link_up()
660 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_up()
662 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_up()
667 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_link_up()
686 dn = of_parse_phandle(dn, "phy-handle", 0); in macb_phy_handle_exists()
693 struct device_node *dn = bp->pdev->dev.of_node; in macb_phylink_connect()
694 struct net_device *dev = bp->dev; in macb_phylink_connect()
699 ret = phylink_of_phy_connect(bp->phylink, dn, 0); in macb_phylink_connect()
702 phydev = phy_find_first(bp->mii_bus); in macb_phylink_connect()
705 return -ENXIO; in macb_phylink_connect()
709 ret = phylink_connect_phy(bp->phylink, phydev); in macb_phylink_connect()
717 phylink_start(bp->phylink); in macb_phylink_connect()
727 bp->phylink_config.dev = &dev->dev; in macb_mii_probe()
728 bp->phylink_config.type = PHYLINK_NETDEV; in macb_mii_probe()
730 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, in macb_mii_probe()
731 bp->phy_interface, &macb_phylink_ops); in macb_mii_probe()
732 if (IS_ERR(bp->phylink)) { in macb_mii_probe()
734 PTR_ERR(bp->phylink)); in macb_mii_probe()
735 return PTR_ERR(bp->phylink); in macb_mii_probe()
743 struct device_node *child, *np = bp->pdev->dev.of_node; in macb_mdiobus_register()
746 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
760 return of_mdiobus_register(bp->mii_bus, np); in macb_mdiobus_register()
763 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
768 int err = -ENXIO; in macb_mii_init()
773 bp->mii_bus = mdiobus_alloc(); in macb_mii_init()
774 if (!bp->mii_bus) { in macb_mii_init()
775 err = -ENOMEM; in macb_mii_init()
779 bp->mii_bus->name = "MACB_mii_bus"; in macb_mii_init()
780 bp->mii_bus->read = &macb_mdio_read; in macb_mii_init()
781 bp->mii_bus->write = &macb_mdio_write; in macb_mii_init()
782 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in macb_mii_init()
783 bp->pdev->name, bp->pdev->id); in macb_mii_init()
784 bp->mii_bus->priv = bp; in macb_mii_init()
785 bp->mii_bus->parent = &bp->pdev->dev; in macb_mii_init()
787 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); in macb_mii_init()
793 err = macb_mii_probe(bp->dev); in macb_mii_init()
800 mdiobus_unregister(bp->mii_bus); in macb_mii_init()
802 mdiobus_free(bp->mii_bus); in macb_mii_init()
809 u32 *p = &bp->hw_stats.macb.rx_pause_frames; in macb_update_stats()
810 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; in macb_update_stats()
813 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); in macb_update_stats()
816 *p += bp->macb_reg_readl(bp, offset); in macb_update_stats()
836 return -ETIMEDOUT; in macb_halt_tx()
841 if (tx_skb->mapping) { in macb_tx_unmap()
842 if (tx_skb->mapped_as_page) in macb_tx_unmap()
843 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
844 tx_skb->size, DMA_TO_DEVICE); in macb_tx_unmap()
846 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
847 tx_skb->size, DMA_TO_DEVICE); in macb_tx_unmap()
848 tx_skb->mapping = 0; in macb_tx_unmap()
851 if (tx_skb->skb) { in macb_tx_unmap()
852 dev_kfree_skb_any(tx_skb->skb); in macb_tx_unmap()
853 tx_skb->skb = NULL; in macb_tx_unmap()
862 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_set_addr()
864 desc_64->addrh = upper_32_bits(addr); in macb_set_addr()
872 desc->addr = lower_32_bits(addr); in macb_set_addr()
881 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_get_addr()
883 addr = ((u64)(desc_64->addrh) << 32); in macb_get_addr()
886 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); in macb_get_addr()
894 struct macb *bp = queue->bp; in macb_tx_error_task()
901 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", in macb_tx_error_task()
902 (unsigned int)(queue - bp->queues), in macb_tx_error_task()
903 queue->tx_tail, queue->tx_head); in macb_tx_error_task()
911 spin_lock_irqsave(&bp->lock, flags); in macb_tx_error_task()
914 netif_tx_stop_all_queues(bp->dev); in macb_tx_error_task()
922 netdev_err(bp->dev, "BUG: halt tx timed out\n"); in macb_tx_error_task()
927 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { in macb_tx_error_task()
931 ctrl = desc->ctrl; in macb_tx_error_task()
933 skb = tx_skb->skb; in macb_tx_error_task()
941 skb = tx_skb->skb; in macb_tx_error_task()
948 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", in macb_tx_error_task()
950 skb->data); in macb_tx_error_task()
951 bp->dev->stats.tx_packets++; in macb_tx_error_task()
952 queue->stats.tx_packets++; in macb_tx_error_task()
953 bp->dev->stats.tx_bytes += skb->len; in macb_tx_error_task()
954 queue->stats.tx_bytes += skb->len; in macb_tx_error_task()
957 /* "Buffers exhausted mid-frame" errors may only happen in macb_tx_error_task()
962 netdev_err(bp->dev, in macb_tx_error_task()
963 "BUG: TX buffers exhausted mid-frame\n"); in macb_tx_error_task()
965 desc->ctrl = ctrl | MACB_BIT(TX_USED); in macb_tx_error_task()
974 desc->ctrl = MACB_BIT(TX_USED); in macb_tx_error_task()
980 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); in macb_tx_error_task()
982 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_tx_error_task()
983 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); in macb_tx_error_task()
986 queue->tx_head = 0; in macb_tx_error_task()
987 queue->tx_tail = 0; in macb_tx_error_task()
994 netif_tx_start_all_queues(bp->dev); in macb_tx_error_task()
997 spin_unlock_irqrestore(&bp->lock, flags); in macb_tx_error_task()
1005 struct macb *bp = queue->bp; in macb_tx_interrupt()
1006 u16 queue_index = queue - bp->queues; in macb_tx_interrupt()
1011 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_interrupt()
1014 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", in macb_tx_interrupt()
1017 head = queue->tx_head; in macb_tx_interrupt()
1018 for (tail = queue->tx_tail; tail != head; tail++) { in macb_tx_interrupt()
1029 ctrl = desc->ctrl; in macb_tx_interrupt()
1040 skb = tx_skb->skb; in macb_tx_interrupt()
1044 if (unlikely(skb_shinfo(skb)->tx_flags & in macb_tx_interrupt()
1050 tx_skb->skb = NULL; in macb_tx_interrupt()
1052 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", in macb_tx_interrupt()
1054 skb->data); in macb_tx_interrupt()
1055 bp->dev->stats.tx_packets++; in macb_tx_interrupt()
1056 queue->stats.tx_packets++; in macb_tx_interrupt()
1057 bp->dev->stats.tx_bytes += skb->len; in macb_tx_interrupt()
1058 queue->stats.tx_bytes += skb->len; in macb_tx_interrupt()
1073 queue->tx_tail = tail; in macb_tx_interrupt()
1074 if (__netif_subqueue_stopped(bp->dev, queue_index) && in macb_tx_interrupt()
1075 CIRC_CNT(queue->tx_head, queue->tx_tail, in macb_tx_interrupt()
1076 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) in macb_tx_interrupt()
1077 netif_wake_subqueue(bp->dev, queue_index); in macb_tx_interrupt()
1085 struct macb *bp = queue->bp; in gem_rx_refill()
1088 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, in gem_rx_refill()
1089 bp->rx_ring_size) > 0) { in gem_rx_refill()
1090 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); in gem_rx_refill()
1095 queue->rx_prepared_head++; in gem_rx_refill()
1098 if (!queue->rx_skbuff[entry]) { in gem_rx_refill()
1100 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); in gem_rx_refill()
1102 netdev_err(bp->dev, in gem_rx_refill()
1108 paddr = dma_map_single(&bp->pdev->dev, skb->data, in gem_rx_refill()
1109 bp->rx_buffer_size, in gem_rx_refill()
1111 if (dma_mapping_error(&bp->pdev->dev, paddr)) { in gem_rx_refill()
1116 queue->rx_skbuff[entry] = skb; in gem_rx_refill()
1118 if (entry == bp->rx_ring_size - 1) in gem_rx_refill()
1120 desc->ctrl = 0; in gem_rx_refill()
1130 desc->ctrl = 0; in gem_rx_refill()
1132 desc->addr &= ~MACB_BIT(RX_USED); in gem_rx_refill()
1139 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", in gem_rx_refill()
1140 queue, queue->rx_prepared_head, queue->rx_tail); in gem_rx_refill()
1152 desc->addr &= ~MACB_BIT(RX_USED); in discard_partial_frame()
1167 struct macb *bp = queue->bp; in gem_rx()
1179 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in gem_rx()
1185 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; in gem_rx()
1191 /* Ensure ctrl is at least as up-to-date as rxused */ in gem_rx()
1194 ctrl = desc->ctrl; in gem_rx()
1196 queue->rx_tail++; in gem_rx()
1200 netdev_err(bp->dev, in gem_rx()
1202 bp->dev->stats.rx_dropped++; in gem_rx()
1203 queue->stats.rx_dropped++; in gem_rx()
1206 skb = queue->rx_skbuff[entry]; in gem_rx()
1208 netdev_err(bp->dev, in gem_rx()
1210 bp->dev->stats.rx_dropped++; in gem_rx()
1211 queue->stats.rx_dropped++; in gem_rx()
1215 queue->rx_skbuff[entry] = NULL; in gem_rx()
1216 len = ctrl & bp->rx_frm_len_mask; in gem_rx()
1218 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); in gem_rx()
1221 dma_unmap_single(&bp->pdev->dev, addr, in gem_rx()
1222 bp->rx_buffer_size, DMA_FROM_DEVICE); in gem_rx()
1224 skb->protocol = eth_type_trans(skb, bp->dev); in gem_rx()
1226 if (bp->dev->features & NETIF_F_RXCSUM && in gem_rx()
1227 !(bp->dev->flags & IFF_PROMISC) && in gem_rx()
1229 skb->ip_summed = CHECKSUM_UNNECESSARY; in gem_rx()
1231 bp->dev->stats.rx_packets++; in gem_rx()
1232 queue->stats.rx_packets++; in gem_rx()
1233 bp->dev->stats.rx_bytes += skb->len; in gem_rx()
1234 queue->stats.rx_bytes += skb->len; in gem_rx()
1239 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in gem_rx()
1240 skb->len, skb->csum); in gem_rx()
1244 skb->data, 32, true); in gem_rx()
1263 struct macb *bp = queue->bp; in macb_rx_frame()
1266 len = desc->ctrl & bp->rx_frm_len_mask; in macb_rx_frame()
1268 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", in macb_rx_frame()
1274 * payload word-aligned. in macb_rx_frame()
1280 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); in macb_rx_frame()
1282 bp->dev->stats.rx_dropped++; in macb_rx_frame()
1285 desc->addr &= ~MACB_BIT(RX_USED); in macb_rx_frame()
1302 unsigned int frag_len = bp->rx_buffer_size; in macb_rx_frame()
1307 return -1; in macb_rx_frame()
1309 frag_len = len - offset; in macb_rx_frame()
1314 offset += bp->rx_buffer_size; in macb_rx_frame()
1316 desc->addr &= ~MACB_BIT(RX_USED); in macb_rx_frame()
1326 skb->protocol = eth_type_trans(skb, bp->dev); in macb_rx_frame()
1328 bp->dev->stats.rx_packets++; in macb_rx_frame()
1329 bp->dev->stats.rx_bytes += skb->len; in macb_rx_frame()
1330 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in macb_rx_frame()
1331 skb->len, skb->csum); in macb_rx_frame()
1339 struct macb *bp = queue->bp; in macb_init_rx_ring()
1344 addr = queue->rx_buffers_dma; in macb_init_rx_ring()
1345 for (i = 0; i < bp->rx_ring_size; i++) { in macb_init_rx_ring()
1348 desc->ctrl = 0; in macb_init_rx_ring()
1349 addr += bp->rx_buffer_size; in macb_init_rx_ring()
1351 desc->addr |= MACB_BIT(RX_WRAP); in macb_init_rx_ring()
1352 queue->rx_tail = 0; in macb_init_rx_ring()
1358 struct macb *bp = queue->bp; in macb_rx()
1362 int first_frag = -1; in macb_rx()
1364 for (tail = queue->rx_tail; budget > 0; tail++) { in macb_rx()
1371 if (!(desc->addr & MACB_BIT(RX_USED))) in macb_rx()
1374 /* Ensure ctrl is at least as up-to-date as addr */ in macb_rx()
1377 ctrl = desc->ctrl; in macb_rx()
1380 if (first_frag != -1) in macb_rx()
1388 if (unlikely(first_frag == -1)) { in macb_rx()
1394 first_frag = -1; in macb_rx()
1401 budget--; in macb_rx()
1410 netdev_err(bp->dev, "RX queue corruption: reset it\n"); in macb_rx()
1412 spin_lock_irqsave(&bp->lock, flags); in macb_rx()
1418 queue_writel(queue, RBQP, queue->rx_ring_dma); in macb_rx()
1422 spin_unlock_irqrestore(&bp->lock, flags); in macb_rx()
1426 if (first_frag != -1) in macb_rx()
1427 queue->rx_tail = first_frag; in macb_rx()
1429 queue->rx_tail = tail; in macb_rx()
1437 struct macb *bp = queue->bp; in macb_poll()
1444 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", in macb_poll()
1447 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); in macb_poll()
1454 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_poll()
1458 queue_writel(queue, IER, bp->rx_intr_mask); in macb_poll()
1470 struct net_device *dev = bp->dev; in macb_hresp_error_task()
1475 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_hresp_error_task()
1476 queue_writel(queue, IDR, bp->rx_intr_mask | in macb_hresp_error_task()
1487 bp->macbgem_ops.mog_init_rings(bp); in macb_hresp_error_task()
1493 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_hresp_error_task()
1495 bp->rx_intr_mask | in macb_hresp_error_task()
1508 unsigned int head = queue->tx_head; in macb_tx_restart()
1509 unsigned int tail = queue->tx_tail; in macb_tx_restart()
1510 struct macb *bp = queue->bp; in macb_tx_restart()
1512 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_restart()
1524 struct macb *bp = queue->bp; in macb_wol_interrupt()
1532 spin_lock(&bp->lock); in macb_wol_interrupt()
1537 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", in macb_wol_interrupt()
1538 (unsigned int)(queue - bp->queues), in macb_wol_interrupt()
1540 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_wol_interrupt()
1542 pm_wakeup_event(&bp->pdev->dev, 0); in macb_wol_interrupt()
1545 spin_unlock(&bp->lock); in macb_wol_interrupt()
1553 struct macb *bp = queue->bp; in gem_wol_interrupt()
1561 spin_lock(&bp->lock); in gem_wol_interrupt()
1566 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", in gem_wol_interrupt()
1567 (unsigned int)(queue - bp->queues), in gem_wol_interrupt()
1569 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in gem_wol_interrupt()
1571 pm_wakeup_event(&bp->pdev->dev, 0); in gem_wol_interrupt()
1574 spin_unlock(&bp->lock); in gem_wol_interrupt()
1582 struct macb *bp = queue->bp; in macb_interrupt()
1583 struct net_device *dev = bp->dev; in macb_interrupt()
1591 spin_lock(&bp->lock); in macb_interrupt()
1596 queue_writel(queue, IDR, -1); in macb_interrupt()
1597 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1598 queue_writel(queue, ISR, -1); in macb_interrupt()
1602 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", in macb_interrupt()
1603 (unsigned int)(queue - bp->queues), in macb_interrupt()
1606 if (status & bp->rx_intr_mask) { in macb_interrupt()
1613 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_interrupt()
1614 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1617 if (napi_schedule_prep(&queue->napi)) { in macb_interrupt()
1618 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); in macb_interrupt()
1619 __napi_schedule(&queue->napi); in macb_interrupt()
1625 schedule_work(&queue->tx_error_task); in macb_interrupt()
1627 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1640 * add that if/when we get our hands on a full-blown MII PHY. in macb_interrupt()
1645 * interrupts but it can be cleared by re-enabling RX. See in macb_interrupt()
1656 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1663 bp->hw_stats.gem.rx_overruns++; in macb_interrupt()
1665 bp->hw_stats.macb.rx_overruns++; in macb_interrupt()
1667 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1672 tasklet_schedule(&bp->hresp_err_tasklet); in macb_interrupt()
1675 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1681 spin_unlock(&bp->lock); in macb_interrupt()
1687 /* Polling receive - used by netconsole and other diagnostic tools
1698 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
1699 macb_interrupt(dev->irq, queue); in macb_poll_controller()
1710 unsigned int len, entry, i, tx_head = queue->tx_head; in macb_tx_map()
1714 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; in macb_tx_map()
1719 if (skb_shinfo(skb)->gso_size != 0) { in macb_tx_map()
1720 if (ip_hdr(skb)->protocol == IPPROTO_UDP) in macb_tx_map()
1721 /* UDP - UFO */ in macb_tx_map()
1724 /* TCP - TSO */ in macb_tx_map()
1728 /* First, map non-paged data */ in macb_tx_map()
1737 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
1739 mapping = dma_map_single(&bp->pdev->dev, in macb_tx_map()
1740 skb->data + offset, in macb_tx_map()
1742 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
1746 tx_skb->skb = NULL; in macb_tx_map()
1747 tx_skb->mapping = mapping; in macb_tx_map()
1748 tx_skb->size = size; in macb_tx_map()
1749 tx_skb->mapped_as_page = false; in macb_tx_map()
1751 len -= size; in macb_tx_map()
1756 size = min(len, bp->max_tx_length); in macb_tx_map()
1761 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in macb_tx_map()
1766 size = min(len, bp->max_tx_length); in macb_tx_map()
1768 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
1770 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, in macb_tx_map()
1772 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
1776 tx_skb->skb = NULL; in macb_tx_map()
1777 tx_skb->mapping = mapping; in macb_tx_map()
1778 tx_skb->size = size; in macb_tx_map()
1779 tx_skb->mapped_as_page = true; in macb_tx_map()
1781 len -= size; in macb_tx_map()
1790 netdev_err(bp->dev, "BUG! empty skb!\n"); in macb_tx_map()
1795 tx_skb->skb = skb; in macb_tx_map()
1808 desc->ctrl = ctrl; in macb_tx_map()
1813 mss_mfs = skb_shinfo(skb)->gso_size + in macb_tx_map()
1817 mss_mfs = skb_shinfo(skb)->gso_size; in macb_tx_map()
1826 i--; in macb_tx_map()
1828 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
1831 ctrl = (u32)tx_skb->size; in macb_tx_map()
1836 if (unlikely(entry == (bp->tx_ring_size - 1))) in macb_tx_map()
1840 if (i == queue->tx_head) { in macb_tx_map()
1843 if ((bp->dev->features & NETIF_F_HW_CSUM) && in macb_tx_map()
1844 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) in macb_tx_map()
1853 macb_set_addr(bp, desc, tx_skb->mapping); in macb_tx_map()
1854 /* desc->addr must be visible to hardware before clearing in macb_tx_map()
1855 * 'TX_USED' bit in desc->ctrl. in macb_tx_map()
1858 desc->ctrl = ctrl; in macb_tx_map()
1859 } while (i != queue->tx_head); in macb_tx_map()
1861 queue->tx_head = tx_head; in macb_tx_map()
1866 netdev_err(bp->dev, "TX DMA map failed\n"); in macb_tx_map()
1868 for (i = queue->tx_head; i != tx_head; i++) { in macb_tx_map()
1887 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) in macb_features_check()
1897 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) in macb_features_check()
1900 nr_frags = skb_shinfo(skb)->nr_frags; in macb_features_check()
1902 nr_frags--; in macb_features_check()
1904 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in macb_features_check()
1915 if (skb->ip_summed != CHECKSUM_PARTIAL) in macb_clear_csum()
1920 return -1; in macb_clear_csum()
1923 * This is required - at least for Zynq, which otherwise calculates in macb_clear_csum()
1926 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; in macb_clear_csum()
1934 int padlen = ETH_ZLEN - (*skb)->len; in macb_pad_and_fcs()
1940 if (!(ndev->features & NETIF_F_HW_CSUM) || in macb_pad_and_fcs()
1941 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || in macb_pad_and_fcs()
1942 skb_shinfo(*skb)->gso_size) /* Not available for GSO */ in macb_pad_and_fcs()
1961 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); in macb_pad_and_fcs()
1962 skb_set_tail_pointer(*skb, (*skb)->len); in macb_pad_and_fcs()
1966 return -ENOMEM; in macb_pad_and_fcs()
1973 skb_put_zero(*skb, padlen - ETH_FCS_LEN); in macb_pad_and_fcs()
1977 fcs = crc32_le(~0, (*skb)->data, (*skb)->len); in macb_pad_and_fcs()
1992 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit()
2009 is_lso = (skb_shinfo(skb)->gso_size != 0); in macb_start_xmit()
2013 if (ip_hdr(skb)->protocol == IPPROTO_UDP) in macb_start_xmit()
2019 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); in macb_start_xmit()
2024 hdrlen = min(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2027 netdev_vdbg(bp->dev, in macb_start_xmit()
2029 queue_index, skb->len, skb->head, skb->data, in macb_start_xmit()
2032 skb->data, 16, true); in macb_start_xmit()
2041 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; in macb_start_xmit()
2043 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2044 nr_frags = skb_shinfo(skb)->nr_frags; in macb_start_xmit()
2046 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); in macb_start_xmit()
2047 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); in macb_start_xmit()
2050 spin_lock_irqsave(&bp->lock, flags); in macb_start_xmit()
2053 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, in macb_start_xmit()
2054 bp->tx_ring_size) < desc_cnt) { in macb_start_xmit()
2056 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
2057 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", in macb_start_xmit()
2058 queue->tx_head, queue->tx_tail); in macb_start_xmit()
2074 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) in macb_start_xmit()
2078 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
2086 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; in macb_init_rx_buffer_size()
2088 bp->rx_buffer_size = size; in macb_init_rx_buffer_size()
2090 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { in macb_init_rx_buffer_size()
2091 netdev_dbg(bp->dev, in macb_init_rx_buffer_size()
2094 bp->rx_buffer_size = in macb_init_rx_buffer_size()
2095 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); in macb_init_rx_buffer_size()
2099 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", in macb_init_rx_buffer_size()
2100 bp->dev->mtu, bp->rx_buffer_size); in macb_init_rx_buffer_size()
2112 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_free_rx_buffers()
2113 if (!queue->rx_skbuff) in gem_free_rx_buffers()
2116 for (i = 0; i < bp->rx_ring_size; i++) { in gem_free_rx_buffers()
2117 skb = queue->rx_skbuff[i]; in gem_free_rx_buffers()
2125 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, in gem_free_rx_buffers()
2131 kfree(queue->rx_skbuff); in gem_free_rx_buffers()
2132 queue->rx_skbuff = NULL; in gem_free_rx_buffers()
2138 struct macb_queue *queue = &bp->queues[0]; in macb_free_rx_buffers()
2140 if (queue->rx_buffers) { in macb_free_rx_buffers()
2141 dma_free_coherent(&bp->pdev->dev, in macb_free_rx_buffers()
2142 bp->rx_ring_size * bp->rx_buffer_size, in macb_free_rx_buffers()
2143 queue->rx_buffers, queue->rx_buffers_dma); in macb_free_rx_buffers()
2144 queue->rx_buffers = NULL; in macb_free_rx_buffers()
2154 bp->macbgem_ops.mog_free_rx_buffers(bp); in macb_free_consistent()
2156 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
2157 kfree(queue->tx_skb); in macb_free_consistent()
2158 queue->tx_skb = NULL; in macb_free_consistent()
2159 if (queue->tx_ring) { in macb_free_consistent()
2160 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_free_consistent()
2161 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2162 queue->tx_ring, queue->tx_ring_dma); in macb_free_consistent()
2163 queue->tx_ring = NULL; in macb_free_consistent()
2165 if (queue->rx_ring) { in macb_free_consistent()
2166 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_free_consistent()
2167 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2168 queue->rx_ring, queue->rx_ring_dma); in macb_free_consistent()
2169 queue->rx_ring = NULL; in macb_free_consistent()
2180 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_alloc_rx_buffers()
2181 size = bp->rx_ring_size * sizeof(struct sk_buff *); in gem_alloc_rx_buffers()
2182 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); in gem_alloc_rx_buffers()
2183 if (!queue->rx_skbuff) in gem_alloc_rx_buffers()
2184 return -ENOMEM; in gem_alloc_rx_buffers()
2186 netdev_dbg(bp->dev, in gem_alloc_rx_buffers()
2188 bp->rx_ring_size, queue->rx_skbuff); in gem_alloc_rx_buffers()
2195 struct macb_queue *queue = &bp->queues[0]; in macb_alloc_rx_buffers()
2198 size = bp->rx_ring_size * bp->rx_buffer_size; in macb_alloc_rx_buffers()
2199 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_rx_buffers()
2200 &queue->rx_buffers_dma, GFP_KERNEL); in macb_alloc_rx_buffers()
2201 if (!queue->rx_buffers) in macb_alloc_rx_buffers()
2202 return -ENOMEM; in macb_alloc_rx_buffers()
2204 netdev_dbg(bp->dev, in macb_alloc_rx_buffers()
2206 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); in macb_alloc_rx_buffers()
2216 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
2217 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_alloc_consistent()
2218 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2219 &queue->tx_ring_dma, in macb_alloc_consistent()
2221 if (!queue->tx_ring) in macb_alloc_consistent()
2223 netdev_dbg(bp->dev, in macb_alloc_consistent()
2225 q, size, (unsigned long)queue->tx_ring_dma, in macb_alloc_consistent()
2226 queue->tx_ring); in macb_alloc_consistent()
2228 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); in macb_alloc_consistent()
2229 queue->tx_skb = kmalloc(size, GFP_KERNEL); in macb_alloc_consistent()
2230 if (!queue->tx_skb) in macb_alloc_consistent()
2233 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_alloc_consistent()
2234 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2235 &queue->rx_ring_dma, GFP_KERNEL); in macb_alloc_consistent()
2236 if (!queue->rx_ring) in macb_alloc_consistent()
2238 netdev_dbg(bp->dev, in macb_alloc_consistent()
2240 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); in macb_alloc_consistent()
2242 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) in macb_alloc_consistent()
2249 return -ENOMEM; in macb_alloc_consistent()
2259 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings()
2260 for (i = 0; i < bp->tx_ring_size; i++) { in gem_init_rings()
2263 desc->ctrl = MACB_BIT(TX_USED); in gem_init_rings()
2265 desc->ctrl |= MACB_BIT(TX_WRAP); in gem_init_rings()
2266 queue->tx_head = 0; in gem_init_rings()
2267 queue->tx_tail = 0; in gem_init_rings()
2269 queue->rx_tail = 0; in gem_init_rings()
2270 queue->rx_prepared_head = 0; in gem_init_rings()
2282 macb_init_rx_ring(&bp->queues[0]); in macb_init_rings()
2284 for (i = 0; i < bp->tx_ring_size; i++) { in macb_init_rings()
2285 desc = macb_tx_desc(&bp->queues[0], i); in macb_init_rings()
2287 desc->ctrl = MACB_BIT(TX_USED); in macb_init_rings()
2289 bp->queues[0].tx_head = 0; in macb_init_rings()
2290 bp->queues[0].tx_tail = 0; in macb_init_rings()
2291 desc->ctrl |= MACB_BIT(TX_WRAP); in macb_init_rings()
2311 macb_writel(bp, TSR, -1); in macb_reset_hw()
2312 macb_writel(bp, RSR, -1); in macb_reset_hw()
2315 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_reset_hw()
2316 queue_writel(queue, IDR, -1); in macb_reset_hw()
2318 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_reset_hw()
2319 queue_writel(queue, ISR, -1); in macb_reset_hw()
2326 unsigned long pclk_hz = clk_get_rate(bp->pclk); in gem_mdc_clk_div()
2352 pclk_hz = clk_get_rate(bp->pclk); in macb_mdc_clk_div()
2386 * - use the correct receive buffer size
2387 * - set best burst length for DMA operations
2389 * - set both rx/tx packet buffers to full memory size
2399 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; in macb_configure_dma()
2401 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); in macb_configure_dma()
2402 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_configure_dma()
2408 if (bp->dma_burst_length) in macb_configure_dma()
2409 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); in macb_configure_dma()
2410 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); in macb_configure_dma()
2413 if (bp->native_io) in macb_configure_dma()
2418 if (bp->dev->features & NETIF_F_HW_CSUM) in macb_configure_dma()
2425 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_configure_dma()
2429 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_configure_dma()
2432 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", in macb_configure_dma()
2448 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2452 if (bp->dev->flags & IFF_PROMISC) in macb_init_hw()
2454 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) in macb_init_hw()
2456 if (!(bp->dev->flags & IFF_BROADCAST)) in macb_init_hw()
2460 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_init_hw()
2461 gem_writel(bp, JML, bp->jumbo_max_len); in macb_init_hw()
2462 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; in macb_init_hw()
2463 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2464 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; in macb_init_hw()
2525 /* Add multicast addresses to the internal multicast-hash table. */
2537 bitnr = hash_get_index(ha->addr); in macb_sethashtable()
2553 if (dev->flags & IFF_PROMISC) { in macb_set_rx_mode()
2565 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) in macb_set_rx_mode()
2569 if (dev->flags & IFF_ALLMULTI) { in macb_set_rx_mode()
2571 macb_or_gem_writel(bp, HRB, -1); in macb_set_rx_mode()
2572 macb_or_gem_writel(bp, HRT, -1); in macb_set_rx_mode()
2578 } else if (dev->flags & (~IFF_ALLMULTI)) { in macb_set_rx_mode()
2590 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; in macb_open()
2596 netdev_dbg(bp->dev, "open\n"); in macb_open()
2598 err = pm_runtime_get_sync(&bp->pdev->dev); in macb_open()
2612 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_open()
2613 napi_enable(&queue->napi); in macb_open()
2623 if (bp->ptp_info) in macb_open()
2624 bp->ptp_info->ptp_init(dev); in macb_open()
2630 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_open()
2631 napi_disable(&queue->napi); in macb_open()
2634 pm_runtime_put_sync(&bp->pdev->dev); in macb_open()
2647 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_close()
2648 napi_disable(&queue->napi); in macb_close()
2650 phylink_stop(bp->phylink); in macb_close()
2651 phylink_disconnect_phy(bp->phylink); in macb_close()
2653 spin_lock_irqsave(&bp->lock, flags); in macb_close()
2656 spin_unlock_irqrestore(&bp->lock, flags); in macb_close()
2660 if (bp->ptp_info) in macb_close()
2661 bp->ptp_info->ptp_remove(dev); in macb_close()
2663 pm_runtime_put(&bp->pdev->dev); in macb_close()
2671 return -EBUSY; in macb_change_mtu()
2673 dev->mtu = new_mtu; in macb_change_mtu()
2684 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; in gem_update_stats()
2688 u64 val = bp->macb_reg_readl(bp, offset); in gem_update_stats()
2690 bp->ethtool_stats[i] += val; in gem_update_stats()
2695 val = bp->macb_reg_readl(bp, offset + 4); in gem_update_stats()
2696 bp->ethtool_stats[i] += ((u64)val) << 32; in gem_update_stats()
2702 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in gem_update_stats()
2703 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) in gem_update_stats()
2704 bp->ethtool_stats[idx++] = *stat; in gem_update_stats()
2709 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_stats()
2710 struct net_device_stats *nstat = &bp->dev->stats; in gem_get_stats()
2714 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + in gem_get_stats()
2715 hwstat->rx_alignment_errors + in gem_get_stats()
2716 hwstat->rx_resource_errors + in gem_get_stats()
2717 hwstat->rx_overruns + in gem_get_stats()
2718 hwstat->rx_oversize_frames + in gem_get_stats()
2719 hwstat->rx_jabbers + in gem_get_stats()
2720 hwstat->rx_undersized_frames + in gem_get_stats()
2721 hwstat->rx_length_field_frame_errors); in gem_get_stats()
2722 nstat->tx_errors = (hwstat->tx_late_collisions + in gem_get_stats()
2723 hwstat->tx_excessive_collisions + in gem_get_stats()
2724 hwstat->tx_underrun + in gem_get_stats()
2725 hwstat->tx_carrier_sense_errors); in gem_get_stats()
2726 nstat->multicast = hwstat->rx_multicast_frames; in gem_get_stats()
2727 nstat->collisions = (hwstat->tx_single_collision_frames + in gem_get_stats()
2728 hwstat->tx_multiple_collision_frames + in gem_get_stats()
2729 hwstat->tx_excessive_collisions); in gem_get_stats()
2730 nstat->rx_length_errors = (hwstat->rx_oversize_frames + in gem_get_stats()
2731 hwstat->rx_jabbers + in gem_get_stats()
2732 hwstat->rx_undersized_frames + in gem_get_stats()
2733 hwstat->rx_length_field_frame_errors); in gem_get_stats()
2734 nstat->rx_over_errors = hwstat->rx_resource_errors; in gem_get_stats()
2735 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; in gem_get_stats()
2736 nstat->rx_frame_errors = hwstat->rx_alignment_errors; in gem_get_stats()
2737 nstat->rx_fifo_errors = hwstat->rx_overruns; in gem_get_stats()
2738 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; in gem_get_stats()
2739 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; in gem_get_stats()
2740 nstat->tx_fifo_errors = hwstat->tx_underrun; in gem_get_stats()
2752 memcpy(data, &bp->ethtool_stats, sizeof(u64) in gem_get_ethtool_stats()
2762 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; in gem_get_sset_count()
2764 return -EOPNOTSUPP; in gem_get_sset_count()
2782 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_get_ethtool_strings()
2796 struct net_device_stats *nstat = &bp->dev->stats; in macb_get_stats()
2797 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_stats()
2806 nstat->rx_errors = (hwstat->rx_fcs_errors + in macb_get_stats()
2807 hwstat->rx_align_errors + in macb_get_stats()
2808 hwstat->rx_resource_errors + in macb_get_stats()
2809 hwstat->rx_overruns + in macb_get_stats()
2810 hwstat->rx_oversize_pkts + in macb_get_stats()
2811 hwstat->rx_jabbers + in macb_get_stats()
2812 hwstat->rx_undersize_pkts + in macb_get_stats()
2813 hwstat->rx_length_mismatch); in macb_get_stats()
2814 nstat->tx_errors = (hwstat->tx_late_cols + in macb_get_stats()
2815 hwstat->tx_excessive_cols + in macb_get_stats()
2816 hwstat->tx_underruns + in macb_get_stats()
2817 hwstat->tx_carrier_errors + in macb_get_stats()
2818 hwstat->sqe_test_errors); in macb_get_stats()
2819 nstat->collisions = (hwstat->tx_single_cols + in macb_get_stats()
2820 hwstat->tx_multiple_cols + in macb_get_stats()
2821 hwstat->tx_excessive_cols); in macb_get_stats()
2822 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + in macb_get_stats()
2823 hwstat->rx_jabbers + in macb_get_stats()
2824 hwstat->rx_undersize_pkts + in macb_get_stats()
2825 hwstat->rx_length_mismatch); in macb_get_stats()
2826 nstat->rx_over_errors = hwstat->rx_resource_errors + in macb_get_stats()
2827 hwstat->rx_overruns; in macb_get_stats()
2828 nstat->rx_crc_errors = hwstat->rx_fcs_errors; in macb_get_stats()
2829 nstat->rx_frame_errors = hwstat->rx_align_errors; in macb_get_stats()
2830 nstat->rx_fifo_errors = hwstat->rx_overruns; in macb_get_stats()
2832 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; in macb_get_stats()
2833 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; in macb_get_stats()
2834 nstat->tx_fifo_errors = hwstat->tx_underruns; in macb_get_stats()
2852 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) in macb_get_regs()
2855 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); in macb_get_regs()
2856 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); in macb_get_regs()
2869 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); in macb_get_regs()
2870 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); in macb_get_regs()
2872 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_get_regs()
2882 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { in macb_get_wol()
2883 phylink_ethtool_get_wol(bp->phylink, wol); in macb_get_wol()
2884 wol->supported |= WAKE_MAGIC; in macb_get_wol()
2886 if (bp->wol & MACB_WOL_ENABLED) in macb_get_wol()
2887 wol->wolopts |= WAKE_MAGIC; in macb_get_wol()
2897 ret = phylink_ethtool_set_wol(bp->phylink, wol); in macb_set_wol()
2901 if (!ret || ret != -EOPNOTSUPP) in macb_set_wol()
2904 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || in macb_set_wol()
2905 (wol->wolopts & ~WAKE_MAGIC)) in macb_set_wol()
2906 return -EOPNOTSUPP; in macb_set_wol()
2908 if (wol->wolopts & WAKE_MAGIC) in macb_set_wol()
2909 bp->wol |= MACB_WOL_ENABLED; in macb_set_wol()
2911 bp->wol &= ~MACB_WOL_ENABLED; in macb_set_wol()
2913 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); in macb_set_wol()
2923 return phylink_ethtool_ksettings_get(bp->phylink, kset); in macb_get_link_ksettings()
2931 return phylink_ethtool_ksettings_set(bp->phylink, kset); in macb_set_link_ksettings()
2939 ring->rx_max_pending = MAX_RX_RING_SIZE; in macb_get_ringparam()
2940 ring->tx_max_pending = MAX_TX_RING_SIZE; in macb_get_ringparam()
2942 ring->rx_pending = bp->rx_ring_size; in macb_get_ringparam()
2943 ring->tx_pending = bp->tx_ring_size; in macb_get_ringparam()
2953 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in macb_set_ringparam()
2954 return -EINVAL; in macb_set_ringparam()
2956 new_rx_size = clamp_t(u32, ring->rx_pending, in macb_set_ringparam()
2960 new_tx_size = clamp_t(u32, ring->tx_pending, in macb_set_ringparam()
2964 if ((new_tx_size == bp->tx_ring_size) && in macb_set_ringparam()
2965 (new_rx_size == bp->rx_ring_size)) { in macb_set_ringparam()
2970 if (netif_running(bp->dev)) { in macb_set_ringparam()
2972 macb_close(bp->dev); in macb_set_ringparam()
2975 bp->rx_ring_size = new_rx_size; in macb_set_ringparam()
2976 bp->tx_ring_size = new_tx_size; in macb_set_ringparam()
2979 macb_open(bp->dev); in macb_set_ringparam()
2990 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); in gem_get_tsu_rate()
2994 else if (!IS_ERR(bp->pclk)) { in gem_get_tsu_rate()
2995 tsu_clk = bp->pclk; in gem_get_tsu_rate()
2998 return -ENOTSUPP; in gem_get_tsu_rate()
3012 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { in gem_get_ts_info()
3017 info->so_timestamping = in gem_get_ts_info()
3024 info->tx_types = in gem_get_ts_info()
3028 info->rx_filters = in gem_get_ts_info()
3032 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; in gem_get_ts_info()
3053 if (bp->ptp_info) in macb_get_ts_info()
3054 return bp->ptp_info->get_ts_info(netdev, info); in macb_get_ts_info()
3061 struct net_device *netdev = bp->dev; in gem_enable_flow_filters()
3066 if (!(netdev->features & NETIF_F_NTUPLE)) in gem_enable_flow_filters()
3071 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_enable_flow_filters()
3072 struct ethtool_rx_flow_spec *fs = &item->fs; in gem_enable_flow_filters()
3075 if (fs->location >= num_t2_scr) in gem_enable_flow_filters()
3078 t2_scr = gem_readl_n(bp, SCRT2, fs->location); in gem_enable_flow_filters()
3084 tp4sp_m = &(fs->m_u.tcp_ip4_spec); in gem_enable_flow_filters()
3086 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) in gem_enable_flow_filters()
3091 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) in gem_enable_flow_filters()
3096 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) in gem_enable_flow_filters()
3101 gem_writel_n(bp, SCRT2, fs->location, t2_scr); in gem_enable_flow_filters()
3108 uint16_t index = fs->location; in gem_prog_cmp_regs()
3114 tp4sp_v = &(fs->h_u.tcp_ip4_spec); in gem_prog_cmp_regs()
3115 tp4sp_m = &(fs->m_u.tcp_ip4_spec); in gem_prog_cmp_regs()
3118 if (tp4sp_m->ip4src == 0xFFFFFFFF) { in gem_prog_cmp_regs()
3119 /* 1st compare reg - IP source address */ in gem_prog_cmp_regs()
3122 w0 = tp4sp_v->ip4src; in gem_prog_cmp_regs()
3123 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3132 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { in gem_prog_cmp_regs()
3133 /* 2nd compare reg - IP destination address */ in gem_prog_cmp_regs()
3136 w0 = tp4sp_v->ip4dst; in gem_prog_cmp_regs()
3137 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3146 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { in gem_prog_cmp_regs()
3147 /* 3rd compare reg - source port, destination port */ in gem_prog_cmp_regs()
3151 if (tp4sp_m->psrc == tp4sp_m->pdst) { in gem_prog_cmp_regs()
3152 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); in gem_prog_cmp_regs()
3153 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); in gem_prog_cmp_regs()
3154 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3158 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ in gem_prog_cmp_regs()
3160 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ in gem_prog_cmp_regs()
3161 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); in gem_prog_cmp_regs()
3164 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); in gem_prog_cmp_regs()
3174 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); in gem_prog_cmp_regs()
3189 struct ethtool_rx_flow_spec *fs = &cmd->fs; in gem_add_flow_filter()
3192 int ret = -EINVAL; in gem_add_flow_filter()
3197 return -ENOMEM; in gem_add_flow_filter()
3198 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); in gem_add_flow_filter()
3202 fs->flow_type, (int)fs->ring_cookie, fs->location, in gem_add_flow_filter()
3203 htonl(fs->h_u.tcp_ip4_spec.ip4src), in gem_add_flow_filter()
3204 htonl(fs->h_u.tcp_ip4_spec.ip4dst), in gem_add_flow_filter()
3205 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); in gem_add_flow_filter()
3207 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3210 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_add_flow_filter()
3211 if (item->fs.location > newfs->fs.location) { in gem_add_flow_filter()
3212 list_add_tail(&newfs->list, &item->list); in gem_add_flow_filter()
3215 } else if (item->fs.location == fs->location) { in gem_add_flow_filter()
3217 fs->location); in gem_add_flow_filter()
3218 ret = -EBUSY; in gem_add_flow_filter()
3223 list_add_tail(&newfs->list, &bp->rx_fs_list.list); in gem_add_flow_filter()
3226 bp->rx_fs_list.count++; in gem_add_flow_filter()
3230 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3234 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3247 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3249 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_del_flow_filter()
3250 if (item->fs.location == cmd->fs.location) { in gem_del_flow_filter()
3252 fs = &(item->fs); in gem_del_flow_filter()
3255 fs->flow_type, (int)fs->ring_cookie, fs->location, in gem_del_flow_filter()
3256 htonl(fs->h_u.tcp_ip4_spec.ip4src), in gem_del_flow_filter()
3257 htonl(fs->h_u.tcp_ip4_spec.ip4dst), in gem_del_flow_filter()
3258 htons(fs->h_u.tcp_ip4_spec.psrc), in gem_del_flow_filter()
3259 htons(fs->h_u.tcp_ip4_spec.pdst)); in gem_del_flow_filter()
3261 gem_writel_n(bp, SCRT2, fs->location, 0); in gem_del_flow_filter()
3263 list_del(&item->list); in gem_del_flow_filter()
3264 bp->rx_fs_list.count--; in gem_del_flow_filter()
3265 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3271 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3272 return -EINVAL; in gem_del_flow_filter()
3281 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_flow_entry()
3282 if (item->fs.location == cmd->fs.location) { in gem_get_flow_entry()
3283 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); in gem_get_flow_entry()
3287 return -EINVAL; in gem_get_flow_entry()
3297 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_all_flow_entries()
3298 if (cnt == cmd->rule_cnt) in gem_get_all_flow_entries()
3299 return -EMSGSIZE; in gem_get_all_flow_entries()
3300 rule_locs[cnt] = item->fs.location; in gem_get_all_flow_entries()
3303 cmd->data = bp->max_tuples; in gem_get_all_flow_entries()
3304 cmd->rule_cnt = cnt; in gem_get_all_flow_entries()
3315 switch (cmd->cmd) { in gem_get_rxnfc()
3317 cmd->data = bp->num_queues; in gem_get_rxnfc()
3320 cmd->rule_cnt = bp->rx_fs_list.count; in gem_get_rxnfc()
3330 "Command parameter %d is not supported\n", cmd->cmd); in gem_get_rxnfc()
3331 ret = -EOPNOTSUPP; in gem_get_rxnfc()
3342 switch (cmd->cmd) { in gem_set_rxnfc()
3344 if ((cmd->fs.location >= bp->max_tuples) in gem_set_rxnfc()
3345 || (cmd->fs.ring_cookie >= bp->num_queues)) { in gem_set_rxnfc()
3346 ret = -EINVAL; in gem_set_rxnfc()
3356 "Command parameter %d is not supported\n", cmd->cmd); in gem_set_rxnfc()
3357 ret = -EOPNOTSUPP; in gem_set_rxnfc()
3399 return -EINVAL; in macb_ioctl()
3401 if (bp->ptp_info) { in macb_ioctl()
3404 return bp->ptp_info->set_hwtst(dev, rq, cmd); in macb_ioctl()
3406 return bp->ptp_info->get_hwtst(dev, rq); in macb_ioctl()
3410 return phylink_mii_ioctl(bp->phylink, rq, cmd); in macb_ioctl()
3433 struct net_device *netdev = bp->dev; in macb_set_rxcsum_feature()
3440 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) in macb_set_rxcsum_feature()
3461 netdev_features_t changed = features ^ netdev->features; in macb_set_features()
3480 struct net_device *netdev = bp->dev; in macb_restore_features()
3481 netdev_features_t features = netdev->features; in macb_restore_features()
3519 bp->caps = dt_conf->caps; in macb_configure_caps()
3521 if (hw_is_gem(bp->regs, bp->native_io)) { in macb_configure_caps()
3522 bp->caps |= MACB_CAPS_MACB_IS_GEM; in macb_configure_caps()
3526 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; in macb_configure_caps()
3529 bp->caps |= MACB_CAPS_FIFO_MODE; in macb_configure_caps()
3533 dev_err(&bp->pdev->dev, in macb_configure_caps()
3536 bp->hw_dma_cap |= HW_DMA_CAP_PTP; in macb_configure_caps()
3537 bp->ptp_info = &gem_ptp_info; in macb_configure_caps()
3543 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); in macb_configure_caps()
3575 pdata = dev_get_platdata(&pdev->dev); in macb_clk_init()
3577 *pclk = pdata->pclk; in macb_clk_init()
3578 *hclk = pdata->hclk; in macb_clk_init()
3580 *pclk = devm_clk_get(&pdev->dev, "pclk"); in macb_clk_init()
3581 *hclk = devm_clk_get(&pdev->dev, "hclk"); in macb_clk_init()
3587 err = -ENODEV; in macb_clk_init()
3589 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); in macb_clk_init()
3596 err = -ENODEV; in macb_clk_init()
3598 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); in macb_clk_init()
3602 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); in macb_clk_init()
3606 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); in macb_clk_init()
3610 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); in macb_clk_init()
3616 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); in macb_clk_init()
3622 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); in macb_clk_init()
3628 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); in macb_clk_init()
3634 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); in macb_clk_init()
3640 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); in macb_clk_init()
3670 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; in macb_init()
3671 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; in macb_init()
3678 if (!(bp->queue_mask & (1 << hw_q))) in macb_init()
3681 queue = &bp->queues[q]; in macb_init()
3682 queue->bp = bp; in macb_init()
3683 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT); in macb_init()
3685 queue->ISR = GEM_ISR(hw_q - 1); in macb_init()
3686 queue->IER = GEM_IER(hw_q - 1); in macb_init()
3687 queue->IDR = GEM_IDR(hw_q - 1); in macb_init()
3688 queue->IMR = GEM_IMR(hw_q - 1); in macb_init()
3689 queue->TBQP = GEM_TBQP(hw_q - 1); in macb_init()
3690 queue->RBQP = GEM_RBQP(hw_q - 1); in macb_init()
3691 queue->RBQS = GEM_RBQS(hw_q - 1); in macb_init()
3693 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
3694 queue->TBQPH = GEM_TBQPH(hw_q - 1); in macb_init()
3695 queue->RBQPH = GEM_RBQPH(hw_q - 1); in macb_init()
3700 queue->ISR = MACB_ISR; in macb_init()
3701 queue->IER = MACB_IER; in macb_init()
3702 queue->IDR = MACB_IDR; in macb_init()
3703 queue->IMR = MACB_IMR; in macb_init()
3704 queue->TBQP = MACB_TBQP; in macb_init()
3705 queue->RBQP = MACB_RBQP; in macb_init()
3707 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
3708 queue->TBQPH = MACB_TBQPH; in macb_init()
3709 queue->RBQPH = MACB_RBQPH; in macb_init()
3719 queue->irq = platform_get_irq(pdev, q); in macb_init()
3720 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, in macb_init()
3721 IRQF_SHARED, dev->name, queue); in macb_init()
3723 dev_err(&pdev->dev, in macb_init()
3725 queue->irq, err); in macb_init()
3729 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); in macb_init()
3733 dev->netdev_ops = &macb_netdev_ops; in macb_init()
3737 bp->max_tx_length = GEM_MAX_TX_LEN; in macb_init()
3738 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; in macb_init()
3739 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; in macb_init()
3740 bp->macbgem_ops.mog_init_rings = gem_init_rings; in macb_init()
3741 bp->macbgem_ops.mog_rx = gem_rx; in macb_init()
3742 dev->ethtool_ops = &gem_ethtool_ops; in macb_init()
3744 bp->max_tx_length = MACB_MAX_TX_LEN; in macb_init()
3745 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; in macb_init()
3746 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; in macb_init()
3747 bp->macbgem_ops.mog_init_rings = macb_init_rings; in macb_init()
3748 bp->macbgem_ops.mog_rx = macb_rx; in macb_init()
3749 dev->ethtool_ops = &macb_ethtool_ops; in macb_init()
3753 dev->hw_features = NETIF_F_SG; in macb_init()
3757 dev->hw_features |= MACB_NETIF_LSO; in macb_init()
3760 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) in macb_init()
3761 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; in macb_init()
3762 if (bp->caps & MACB_CAPS_SG_DISABLED) in macb_init()
3763 dev->hw_features &= ~NETIF_F_SG; in macb_init()
3764 dev->features = dev->hw_features; in macb_init()
3768 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs in macb_init()
3771 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), in macb_init()
3773 if (bp->max_tuples > 0) { in macb_init()
3781 dev->hw_features |= NETIF_F_NTUPLE; in macb_init()
3783 INIT_LIST_HEAD(&bp->rx_fs_list.list); in macb_init()
3784 bp->rx_fs_list.count = 0; in macb_init()
3785 spin_lock_init(&bp->rx_fs_lock); in macb_init()
3787 bp->max_tuples = 0; in macb_init()
3790 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { in macb_init()
3792 if (phy_interface_mode_is_rgmii(bp->phy_interface)) in macb_init()
3794 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && in macb_init()
3795 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
3797 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
3800 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) in macb_init()
3809 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) in macb_init()
3826 struct macb_queue *q = &lp->queues[0]; in at91ether_alloc_coherent()
3828 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
3831 &q->rx_ring_dma, GFP_KERNEL); in at91ether_alloc_coherent()
3832 if (!q->rx_ring) in at91ether_alloc_coherent()
3833 return -ENOMEM; in at91ether_alloc_coherent()
3835 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
3838 &q->rx_buffers_dma, GFP_KERNEL); in at91ether_alloc_coherent()
3839 if (!q->rx_buffers) { in at91ether_alloc_coherent()
3840 dma_free_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
3843 q->rx_ring, q->rx_ring_dma); in at91ether_alloc_coherent()
3844 q->rx_ring = NULL; in at91ether_alloc_coherent()
3845 return -ENOMEM; in at91ether_alloc_coherent()
3853 struct macb_queue *q = &lp->queues[0]; in at91ether_free_coherent()
3855 if (q->rx_ring) { in at91ether_free_coherent()
3856 dma_free_coherent(&lp->pdev->dev, in at91ether_free_coherent()
3859 q->rx_ring, q->rx_ring_dma); in at91ether_free_coherent()
3860 q->rx_ring = NULL; in at91ether_free_coherent()
3863 if (q->rx_buffers) { in at91ether_free_coherent()
3864 dma_free_coherent(&lp->pdev->dev, in at91ether_free_coherent()
3867 q->rx_buffers, q->rx_buffers_dma); in at91ether_free_coherent()
3868 q->rx_buffers = NULL; in at91ether_free_coherent()
3875 struct macb_queue *q = &lp->queues[0]; in at91ether_start()
3885 addr = q->rx_buffers_dma; in at91ether_start()
3889 desc->ctrl = 0; in at91ether_start()
3894 desc->addr |= MACB_BIT(RX_WRAP); in at91ether_start()
3897 q->rx_tail = 0; in at91ether_start()
3900 macb_writel(lp, RBQP, q->rx_ring_dma); in at91ether_start()
3948 ret = pm_runtime_get_sync(&lp->pdev->dev); in at91ether_open()
3950 pm_runtime_put_noidle(&lp->pdev->dev); in at91ether_open()
3975 pm_runtime_put_sync(&lp->pdev->dev); in at91ether_open()
3986 phylink_stop(lp->phylink); in at91ether_close()
3987 phylink_disconnect_phy(lp->phylink); in at91ether_close()
3991 return pm_runtime_put(&lp->pdev->dev); in at91ether_close()
4001 if (lp->rm9200_tx_len < 2) { in at91ether_start_xmit()
4002 int desc = lp->rm9200_tx_tail; in at91ether_start_xmit()
4005 lp->rm9200_txq[desc].skb = skb; in at91ether_start_xmit()
4006 lp->rm9200_txq[desc].size = skb->len; in at91ether_start_xmit()
4007 lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data, in at91ether_start_xmit()
4008 skb->len, DMA_TO_DEVICE); in at91ether_start_xmit()
4009 if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) { in at91ether_start_xmit()
4011 dev->stats.tx_dropped++; in at91ether_start_xmit()
4016 spin_lock_irqsave(&lp->lock, flags); in at91ether_start_xmit()
4018 lp->rm9200_tx_tail = (desc + 1) & 1; in at91ether_start_xmit()
4019 lp->rm9200_tx_len++; in at91ether_start_xmit()
4020 if (lp->rm9200_tx_len > 1) in at91ether_start_xmit()
4023 spin_unlock_irqrestore(&lp->lock, flags); in at91ether_start_xmit()
4026 macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping); in at91ether_start_xmit()
4028 macb_writel(lp, TCR, skb->len); in at91ether_start_xmit()
4044 struct macb_queue *q = &lp->queues[0]; in at91ether_rx()
4050 desc = macb_rx_desc(q, q->rx_tail); in at91ether_rx()
4051 while (desc->addr & MACB_BIT(RX_USED)) { in at91ether_rx()
4052 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; in at91ether_rx()
4053 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); in at91ether_rx()
4059 skb->protocol = eth_type_trans(skb, dev); in at91ether_rx()
4060 dev->stats.rx_packets++; in at91ether_rx()
4061 dev->stats.rx_bytes += pktlen; in at91ether_rx()
4064 dev->stats.rx_dropped++; in at91ether_rx()
4067 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) in at91ether_rx()
4068 dev->stats.multicast++; in at91ether_rx()
4071 desc->addr &= ~MACB_BIT(RX_USED); in at91ether_rx()
4074 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) in at91ether_rx()
4075 q->rx_tail = 0; in at91ether_rx()
4077 q->rx_tail++; in at91ether_rx()
4079 desc = macb_rx_desc(q, q->rx_tail); in at91ether_rx()
4106 dev->stats.tx_errors++; in at91ether_interrupt()
4108 spin_lock(&lp->lock); in at91ether_interrupt()
4113 * - all pending packets transmitted (TGO, implies BNQ) in at91ether_interrupt()
4114 * - only first packet transmitted (!TGO && BNQ) in at91ether_interrupt()
4115 * - two frames pending (!TGO && !BNQ) in at91ether_interrupt()
4121 while (lp->rm9200_tx_len > qlen) { in at91ether_interrupt()
4122 desc = (lp->rm9200_tx_tail - lp->rm9200_tx_len) & 1; in at91ether_interrupt()
4123 dev_consume_skb_irq(lp->rm9200_txq[desc].skb); in at91ether_interrupt()
4124 lp->rm9200_txq[desc].skb = NULL; in at91ether_interrupt()
4125 dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping, in at91ether_interrupt()
4126 lp->rm9200_txq[desc].size, DMA_TO_DEVICE); in at91ether_interrupt()
4127 dev->stats.tx_packets++; in at91ether_interrupt()
4128 dev->stats.tx_bytes += lp->rm9200_txq[desc].size; in at91ether_interrupt()
4129 lp->rm9200_tx_len--; in at91ether_interrupt()
4132 if (lp->rm9200_tx_len < 2 && netif_queue_stopped(dev)) in at91ether_interrupt()
4135 spin_unlock(&lp->lock); in at91ether_interrupt()
4138 /* Work-around for EMAC Errata section 41.3.1 */ in at91ether_interrupt()
4158 at91ether_interrupt(dev->irq, dev); in at91ether_poll_controller()
4188 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); in at91ether_clk_init()
4194 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); in at91ether_clk_init()
4207 bp->queues[0].bp = bp; in at91ether_init()
4209 dev->netdev_ops = &at91ether_netdev_ops; in at91ether_init()
4210 dev->ethtool_ops = &macb_ethtool_ops; in at91ether_init()
4212 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, in at91ether_init()
4213 0, dev->name, dev); in at91ether_init()
4227 return mgmt->rate; in fu540_macb_tx_recalc_rate()
4260 iowrite32(1, mgmt->reg); in fu540_macb_tx_set_rate()
4262 iowrite32(0, mgmt->reg); in fu540_macb_tx_set_rate()
4263 mgmt->rate = rate; in fu540_macb_tx_set_rate()
4285 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); in fu540_c000_clk_init()
4287 return -ENOMEM; in fu540_c000_clk_init()
4289 init.name = "sifive-gemgxl-mgmt"; in fu540_c000_clk_init()
4294 mgmt->rate = 0; in fu540_c000_clk_init()
4295 mgmt->hw.init = &init; in fu540_c000_clk_init()
4297 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); in fu540_c000_clk_init()
4303 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); in fu540_c000_clk_init()
4305 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); in fu540_c000_clk_init()
4312 mgmt->reg = devm_platform_ioremap_resource(pdev, 1); in fu540_c000_init()
4313 if (IS_ERR(mgmt->reg)) in fu540_c000_init()
4314 return PTR_ERR(mgmt->reg); in fu540_c000_init()
4402 { .compatible = "cdns,at32ap7000-macb" },
4403 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4405 { .compatible = "cdns,np4-macb", .data = &np4_config },
4406 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4408 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
4409 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
4410 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
4411 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
4412 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4413 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4415 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
4416 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4417 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4438 struct clk **) = macb_config->clk_init; in macb_probe()
4439 int (*init)(struct platform_device *) = macb_config->init; in macb_probe()
4440 struct device_node *np = pdev->dev.of_node; in macb_probe()
4454 mem = devm_ioremap_resource(&pdev->dev, regs); in macb_probe()
4462 if (match && match->data) { in macb_probe()
4463 macb_config = match->data; in macb_probe()
4464 clk_init = macb_config->clk_init; in macb_probe()
4465 init = macb_config->init; in macb_probe()
4473 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); in macb_probe()
4474 pm_runtime_use_autosuspend(&pdev->dev); in macb_probe()
4475 pm_runtime_get_noresume(&pdev->dev); in macb_probe()
4476 pm_runtime_set_active(&pdev->dev); in macb_probe()
4477 pm_runtime_enable(&pdev->dev); in macb_probe()
4483 err = -ENOMEM; in macb_probe()
4487 dev->base_addr = regs->start; in macb_probe()
4489 SET_NETDEV_DEV(dev, &pdev->dev); in macb_probe()
4492 bp->pdev = pdev; in macb_probe()
4493 bp->dev = dev; in macb_probe()
4494 bp->regs = mem; in macb_probe()
4495 bp->native_io = native_io; in macb_probe()
4497 bp->macb_reg_readl = hw_readl_native; in macb_probe()
4498 bp->macb_reg_writel = hw_writel_native; in macb_probe()
4500 bp->macb_reg_readl = hw_readl; in macb_probe()
4501 bp->macb_reg_writel = hw_writel; in macb_probe()
4503 bp->num_queues = num_queues; in macb_probe()
4504 bp->queue_mask = queue_mask; in macb_probe()
4506 bp->dma_burst_length = macb_config->dma_burst_length; in macb_probe()
4507 bp->pclk = pclk; in macb_probe()
4508 bp->hclk = hclk; in macb_probe()
4509 bp->tx_clk = tx_clk; in macb_probe()
4510 bp->rx_clk = rx_clk; in macb_probe()
4511 bp->tsu_clk = tsu_clk; in macb_probe()
4513 bp->jumbo_max_len = macb_config->jumbo_max_len; in macb_probe()
4515 bp->wol = 0; in macb_probe()
4516 if (of_get_property(np, "magic-packet", NULL)) in macb_probe()
4517 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; in macb_probe()
4518 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); in macb_probe()
4520 spin_lock_init(&bp->lock); in macb_probe()
4527 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); in macb_probe()
4528 bp->hw_dma_cap |= HW_DMA_CAP_64B; in macb_probe()
4533 dev->irq = platform_get_irq(pdev, 0); in macb_probe()
4534 if (dev->irq < 0) { in macb_probe()
4535 err = dev->irq; in macb_probe()
4539 /* MTU range: 68 - 1500 or 10240 */ in macb_probe()
4540 dev->min_mtu = GEM_MTU_MIN_SIZE; in macb_probe()
4541 if (bp->caps & MACB_CAPS_JUMBO) in macb_probe()
4542 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; in macb_probe()
4544 dev->max_mtu = ETH_DATA_LEN; in macb_probe()
4546 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { in macb_probe()
4549 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4554 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4558 bp->rx_intr_mask = MACB_RX_INT_FLAGS; in macb_probe()
4559 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) in macb_probe()
4560 bp->rx_intr_mask |= MACB_BIT(RXUBR); in macb_probe()
4563 if (PTR_ERR(mac) == -EPROBE_DEFER) { in macb_probe()
4564 err = -EPROBE_DEFER; in macb_probe()
4567 ether_addr_copy(bp->dev->dev_addr, mac); in macb_probe()
4575 bp->phy_interface = PHY_INTERFACE_MODE_MII; in macb_probe()
4577 bp->phy_interface = interface; in macb_probe()
4592 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); in macb_probe()
4596 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task); in macb_probe()
4600 dev->base_addr, dev->irq, dev->dev_addr); in macb_probe()
4602 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_probe()
4603 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_probe()
4608 mdiobus_unregister(bp->mii_bus); in macb_probe()
4609 mdiobus_free(bp->mii_bus); in macb_probe()
4620 pm_runtime_disable(&pdev->dev); in macb_probe()
4621 pm_runtime_set_suspended(&pdev->dev); in macb_probe()
4622 pm_runtime_dont_use_autosuspend(&pdev->dev); in macb_probe()
4636 mdiobus_unregister(bp->mii_bus); in macb_remove()
4637 mdiobus_free(bp->mii_bus); in macb_remove()
4640 tasklet_kill(&bp->hresp_err_tasklet); in macb_remove()
4641 pm_runtime_disable(&pdev->dev); in macb_remove()
4642 pm_runtime_dont_use_autosuspend(&pdev->dev); in macb_remove()
4643 if (!pm_runtime_suspended(&pdev->dev)) { in macb_remove()
4644 clk_disable_unprepare(bp->tx_clk); in macb_remove()
4645 clk_disable_unprepare(bp->hclk); in macb_remove()
4646 clk_disable_unprepare(bp->pclk); in macb_remove()
4647 clk_disable_unprepare(bp->rx_clk); in macb_remove()
4648 clk_disable_unprepare(bp->tsu_clk); in macb_remove()
4649 pm_runtime_set_suspended(&pdev->dev); in macb_remove()
4651 phylink_destroy(bp->phylink); in macb_remove()
4662 struct macb_queue *queue = bp->queues; in macb_suspend()
4670 if (bp->wol & MACB_WOL_ENABLED) { in macb_suspend()
4671 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
4673 macb_writel(bp, TSR, -1); in macb_suspend()
4674 macb_writel(bp, RSR, -1); in macb_suspend()
4675 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
4678 queue_writel(queue, IDR, -1); in macb_suspend()
4680 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_suspend()
4681 queue_writel(queue, ISR, -1); in macb_suspend()
4686 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_suspend()
4688 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, in macb_suspend()
4689 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
4693 bp->queues[0].irq, err); in macb_suspend()
4694 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4697 queue_writel(bp->queues, IER, GEM_BIT(WOL)); in macb_suspend()
4700 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, in macb_suspend()
4701 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
4705 bp->queues[0].irq, err); in macb_suspend()
4706 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4709 queue_writel(bp->queues, IER, MACB_BIT(WOL)); in macb_suspend()
4712 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4714 enable_irq_wake(bp->queues[0].irq); in macb_suspend()
4718 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
4720 napi_disable(&queue->napi); in macb_suspend()
4722 if (!(bp->wol & MACB_WOL_ENABLED)) { in macb_suspend()
4724 phylink_stop(bp->phylink); in macb_suspend()
4726 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
4728 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4731 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_suspend()
4732 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); in macb_suspend()
4734 if (netdev->hw_features & NETIF_F_NTUPLE) in macb_suspend()
4735 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); in macb_suspend()
4737 if (bp->ptp_info) in macb_suspend()
4738 bp->ptp_info->ptp_remove(netdev); in macb_suspend()
4749 struct macb_queue *queue = bp->queues; in macb_resume()
4760 if (bp->wol & MACB_WOL_ENABLED) { in macb_resume()
4761 spin_lock_irqsave(&bp->lock, flags); in macb_resume()
4764 queue_writel(bp->queues, IDR, GEM_BIT(WOL)); in macb_resume()
4767 queue_writel(bp->queues, IDR, MACB_BIT(WOL)); in macb_resume()
4771 queue_readl(bp->queues, ISR); in macb_resume()
4772 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_resume()
4773 queue_writel(bp->queues, ISR, -1); in macb_resume()
4775 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_resume()
4776 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, in macb_resume()
4777 IRQF_SHARED, netdev->name, bp->queues); in macb_resume()
4781 bp->queues[0].irq, err); in macb_resume()
4782 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
4785 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
4787 disable_irq_wake(bp->queues[0].irq); in macb_resume()
4793 phylink_stop(bp->phylink); in macb_resume()
4797 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_resume()
4799 napi_enable(&queue->napi); in macb_resume()
4801 if (netdev->hw_features & NETIF_F_NTUPLE) in macb_resume()
4802 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); in macb_resume()
4804 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_resume()
4805 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); in macb_resume()
4812 phylink_start(bp->phylink); in macb_resume()
4816 if (bp->ptp_info) in macb_resume()
4817 bp->ptp_info->ptp_init(netdev); in macb_resume()
4828 clk_disable_unprepare(bp->tx_clk); in macb_runtime_suspend()
4829 clk_disable_unprepare(bp->hclk); in macb_runtime_suspend()
4830 clk_disable_unprepare(bp->pclk); in macb_runtime_suspend()
4831 clk_disable_unprepare(bp->rx_clk); in macb_runtime_suspend()
4833 clk_disable_unprepare(bp->tsu_clk); in macb_runtime_suspend()
4844 clk_prepare_enable(bp->pclk); in macb_runtime_resume()
4845 clk_prepare_enable(bp->hclk); in macb_runtime_resume()
4846 clk_prepare_enable(bp->tx_clk); in macb_runtime_resume()
4847 clk_prepare_enable(bp->rx_clk); in macb_runtime_resume()
4849 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()