Lines Matching +full:mac +full:- +full:clk +full:- +full:tx

1 // SPDX-License-Identifier: GPL-2.0
2 /* Atheros AR71xx built-in ethernet mac driver
11 * David Bauer <mail@david-bauer.net>
14 * Hauke Mehrtens <hauke@hauke-m.de>
15 * Johann Neuhauser <johann@it-neuhauser.de>
17 * Jo-Philipp Wich <jo@mein.io>
38 #include <linux/clk.h>
41 /* For our NAPI weight bigger does *NOT* mean better - it means more
42 * D-cache misses and lots more wasted cycles than we'll ever
69 #define MAC_CFG1_TXE BIT(0) /* Tx Enable */
70 #define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */
73 #define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */
127 #define FIFO_CFG0_TXS BIT(3) /* Tx System Module */
128 #define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */
193 #define TX_CTRL_TXE BIT(0) /* Tx Enable */
198 #define TX_STATUS_UR BIT(1) /* Tx Underrun */
245 { 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
246 { 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
247 { 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
248 { 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
249 { 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
250 { 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
251 { 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
269 { 0x00E0, GENMASK(23, 0), "Tx Byte", },
270 { 0x00E4, GENMASK(17, 0), "Tx Packet", },
271 { 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", },
272 { 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", },
273 { 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", },
274 { 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", },
275 { 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", },
276 { 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", },
277 { 0x0100, GENMASK(11, 0), "Tx Multiple Collision", },
278 { 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", },
279 { 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", },
280 { 0x010C, GENMASK(12, 0), "Tx Total Collision", },
281 { 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", },
282 { 0x0114, GENMASK(11, 0), "Tx Drop Frame", },
283 { 0x0118, GENMASK(11, 0), "Tx Jabber Frame", },
284 { 0x011C, GENMASK(11, 0), "Tx FCS Error", },
285 { 0x0120, GENMASK(11, 0), "Tx Control Frame", },
286 { 0x0124, GENMASK(11, 0), "Tx Oversize Frame", },
287 { 0x0128, GENMASK(11, 0), "Tx Undersize Frame", },
288 { 0x012C, GENMASK(11, 0), "Tx Fragment", },
309 } tx; member
322 /* "Cold" fields - not used in the data path. */
349 /* Critical data related to the per-packet data path are clustered
350 * early in this structure to help improve the D-cache footprint.
364 /* From this point onwards we're not looking at per-packet fields. */
384 struct clk *clk_mdio;
385 struct clk *clk_eth;
390 return (desc->ctrl & DESC_EMPTY) != 0; in ag71xx_desc_empty()
395 return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE]; in ag71xx_ring_desc()
400 return fls(size - 1); in ag71xx_ring_size_order()
405 return ag->dcfg->type == type; in ag71xx_is()
410 iowrite32(value, ag->mac_base + reg); in ag71xx_wr()
412 (void)ioread32(ag->mac_base + reg); in ag71xx_wr()
417 return ioread32(ag->mac_base + reg); in ag71xx_rr()
424 r = ag->mac_base + reg; in ag71xx_sb()
434 r = ag->mac_base + reg; in ag71xx_cb()
455 strlcpy(info->driver, "ag71xx", sizeof(info->driver)); in ag71xx_get_drvinfo()
456 strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node), in ag71xx_get_drvinfo()
457 sizeof(info->bus_info)); in ag71xx_get_drvinfo()
465 return phylink_ethtool_ksettings_get(ag->phylink, kset); in ag71xx_get_link_ksettings()
473 return phylink_ethtool_ksettings_set(ag->phylink, kset); in ag71xx_set_link_ksettings()
480 return phylink_ethtool_nway_reset(ag->phylink); in ag71xx_ethtool_nway_reset()
488 phylink_ethtool_get_pauseparam(ag->phylink, pause); in ag71xx_ethtool_get_pauseparam()
496 return phylink_ethtool_set_pauseparam(ag->phylink, pause); in ag71xx_ethtool_set_pauseparam()
526 return -EOPNOTSUPP; in ag71xx_ethtool_get_sset_count()
545 struct net_device *ndev = ag->ndev; in ag71xx_mdio_wait_busy()
562 return -ETIMEDOUT; in ag71xx_mdio_wait_busy()
567 struct ag71xx *ag = bus->priv; in ag71xx_mdio_mii_read()
587 netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n", in ag71xx_mdio_mii_read()
596 struct ag71xx *ag = bus->priv; in ag71xx_mdio_mii_write()
598 netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n", in ag71xx_mdio_mii_write()
626 ref_clock = clk_get_rate(ag->clk_mdio); in ag71xx_mdio_get_divider()
628 return -EINVAL; in ag71xx_mdio_get_divider()
651 return -ENOENT; in ag71xx_mdio_get_divider()
656 struct ag71xx *ag = bus->priv; in ag71xx_mdio_reset()
675 struct device *dev = &ag->pdev->dev; in ag71xx_mdio_probe()
676 struct net_device *ndev = ag->ndev; in ag71xx_mdio_probe()
681 np = dev->of_node; in ag71xx_mdio_probe()
682 ag->mii_bus = NULL; in ag71xx_mdio_probe()
684 ag->clk_mdio = devm_clk_get(dev, "mdio"); in ag71xx_mdio_probe()
685 if (IS_ERR(ag->clk_mdio)) { in ag71xx_mdio_probe()
686 netif_err(ag, probe, ndev, "Failed to get mdio clk.\n"); in ag71xx_mdio_probe()
687 return PTR_ERR(ag->clk_mdio); in ag71xx_mdio_probe()
690 err = clk_prepare_enable(ag->clk_mdio); in ag71xx_mdio_probe()
692 netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n"); in ag71xx_mdio_probe()
698 err = -ENOMEM; in ag71xx_mdio_probe()
702 ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); in ag71xx_mdio_probe()
703 if (IS_ERR(ag->mdio_reset)) { in ag71xx_mdio_probe()
705 err = PTR_ERR(ag->mdio_reset); in ag71xx_mdio_probe()
709 mii_bus->name = "ag71xx_mdio"; in ag71xx_mdio_probe()
710 mii_bus->read = ag71xx_mdio_mii_read; in ag71xx_mdio_probe()
711 mii_bus->write = ag71xx_mdio_mii_write; in ag71xx_mdio_probe()
712 mii_bus->reset = ag71xx_mdio_reset; in ag71xx_mdio_probe()
713 mii_bus->priv = ag; in ag71xx_mdio_probe()
714 mii_bus->parent = dev; in ag71xx_mdio_probe()
715 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx); in ag71xx_mdio_probe()
717 if (!IS_ERR(ag->mdio_reset)) { in ag71xx_mdio_probe()
718 reset_control_assert(ag->mdio_reset); in ag71xx_mdio_probe()
720 reset_control_deassert(ag->mdio_reset); in ag71xx_mdio_probe()
730 ag->mii_bus = mii_bus; in ag71xx_mdio_probe()
735 clk_disable_unprepare(ag->clk_mdio); in ag71xx_mdio_probe()
741 if (ag->mii_bus) in ag71xx_mdio_remove()
742 mdiobus_unregister(ag->mii_bus); in ag71xx_mdio_remove()
743 clk_disable_unprepare(ag->clk_mdio); in ag71xx_mdio_remove()
748 /* disable all interrupts and stop the rx/tx engine */ in ag71xx_hw_stop()
759 timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start; in ag71xx_check_dma_stuck()
763 if (!netif_carrier_ok(ag->ndev)) in ag71xx_check_dma_stuck()
781 struct ag71xx_ring *ring = &ag->tx_ring; in ag71xx_tx_packets()
783 struct net_device *ndev = ag->ndev; in ag71xx_tx_packets()
787 ring_mask = BIT(ring->order) - 1; in ag71xx_tx_packets()
788 ring_size = BIT(ring->order); in ag71xx_tx_packets()
790 netif_dbg(ag, tx_queued, ndev, "processing TX ring\n"); in ag71xx_tx_packets()
792 while (ring->dirty + n != ring->curr) { in ag71xx_tx_packets()
797 i = (ring->dirty + n) & ring_mask; in ag71xx_tx_packets()
799 skb = ring->buf[i].tx.skb; in ag71xx_tx_packets()
802 if (ag->dcfg->tx_hang_workaround && in ag71xx_tx_packets()
804 schedule_delayed_work(&ag->restart_work, in ag71xx_tx_packets()
812 desc->ctrl |= DESC_EMPTY; in ag71xx_tx_packets()
819 ring->buf[i].tx.skb = NULL; in ag71xx_tx_packets()
821 bytes_compl += ring->buf[i].tx.len; in ag71xx_tx_packets()
824 ring->dirty += n; in ag71xx_tx_packets()
828 n--; in ag71xx_tx_packets()
837 ag->ndev->stats.tx_bytes += bytes_compl; in ag71xx_tx_packets()
838 ag->ndev->stats.tx_packets += sent; in ag71xx_tx_packets()
840 netdev_completed_queue(ag->ndev, sent, bytes_compl); in ag71xx_tx_packets()
841 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) in ag71xx_tx_packets()
842 netif_wake_queue(ag->ndev); in ag71xx_tx_packets()
845 cancel_delayed_work(&ag->restart_work); in ag71xx_tx_packets()
852 struct net_device *ndev = ag->ndev; in ag71xx_dma_wait_stop()
856 u32 rx, tx; in ag71xx_dma_wait_stop() local
861 tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE; in ag71xx_dma_wait_stop()
862 if (!rx && !tx) in ag71xx_dma_wait_stop()
871 struct net_device *ndev = ag->ndev; in ag71xx_dma_reset()
875 /* stop RX and TX */ in ag71xx_dma_reset()
879 /* give the hardware some time to really stop all rx/tx activity in ag71xx_dma_reset()
885 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma); in ag71xx_dma_reset()
886 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma); in ag71xx_dma_reset()
888 /* clear pending RX/TX interrupts */ in ag71xx_dma_reset()
909 netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n", in ag71xx_dma_reset()
917 /* setup MAC configuration registers */ in ag71xx_hw_setup()
928 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]); in ag71xx_hw_setup()
929 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]); in ag71xx_hw_setup()
939 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac) in ag71xx_hw_set_macaddr() argument
943 t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16) in ag71xx_hw_set_macaddr()
944 | (((u32)mac[3]) << 8) | ((u32)mac[2]); in ag71xx_hw_set_macaddr()
948 t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16); in ag71xx_hw_set_macaddr()
954 struct net_device *dev = ag->ndev; in ag71xx_fast_reset()
965 reset_control_assert(ag->mac_reset); in ag71xx_fast_reset()
967 reset_control_deassert(ag->mac_reset); in ag71xx_fast_reset()
972 ag->tx_ring.curr = 0; in ag71xx_fast_reset()
973 ag->tx_ring.dirty = 0; in ag71xx_fast_reset()
974 netdev_reset_queue(ag->ndev); in ag71xx_fast_reset()
978 ag71xx_max_frame_len(ag->ndev->mtu)); in ag71xx_fast_reset()
981 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); in ag71xx_fast_reset()
984 ag71xx_hw_set_macaddr(ag, dev->dev_addr); in ag71xx_fast_reset()
995 netif_wake_queue(ag->ndev); in ag71xx_hw_start()
1001 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); in ag71xx_mac_config()
1009 if (ag->tx_ring.desc_split) { in ag71xx_mac_config()
1010 ag->fifodata[2] &= 0xffff; in ag71xx_mac_config()
1011 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16; in ag71xx_mac_config()
1014 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); in ag71xx_mac_config()
1021 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); in ag71xx_mac_validate()
1024 switch (state->interface) { in ag71xx_mac_validate()
1028 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) || in ag71xx_mac_validate()
1031 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) in ag71xx_mac_validate()
1035 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) || in ag71xx_mac_validate()
1036 (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) || in ag71xx_mac_validate()
1037 (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1)) in ag71xx_mac_validate()
1041 if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0) in ag71xx_mac_validate()
1045 if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0) in ag71xx_mac_validate()
1049 if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) || in ag71xx_mac_validate()
1050 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) in ag71xx_mac_validate()
1067 if (state->interface == PHY_INTERFACE_MODE_NA || in ag71xx_mac_validate()
1068 state->interface == PHY_INTERFACE_MODE_SGMII || in ag71xx_mac_validate()
1069 state->interface == PHY_INTERFACE_MODE_RGMII || in ag71xx_mac_validate()
1070 state->interface == PHY_INTERFACE_MODE_GMII) { in ag71xx_mac_validate()
1077 bitmap_and(state->advertising, state->advertising, mask, in ag71xx_mac_validate()
1088 state->link = 0; in ag71xx_mac_pcs_get_state()
1099 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); in ag71xx_mac_link_down()
1110 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); in ag71xx_mac_link_up()
1170 ag->phylink_config.dev = &ag->ndev->dev; in ag71xx_phylink_setup()
1171 ag->phylink_config.type = PHYLINK_NETDEV; in ag71xx_phylink_setup()
1173 phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode, in ag71xx_phylink_setup()
1174 ag->phy_if_mode, &ag71xx_phylink_mac_ops); in ag71xx_phylink_setup()
1178 ag->phylink = phylink; in ag71xx_phylink_setup()
1184 struct ag71xx_ring *ring = &ag->tx_ring; in ag71xx_ring_tx_clean()
1185 int ring_mask = BIT(ring->order) - 1; in ag71xx_ring_tx_clean()
1187 struct net_device *ndev = ag->ndev; in ag71xx_ring_tx_clean()
1189 while (ring->curr != ring->dirty) { in ag71xx_ring_tx_clean()
1191 u32 i = ring->dirty & ring_mask; in ag71xx_ring_tx_clean()
1195 desc->ctrl = 0; in ag71xx_ring_tx_clean()
1196 ndev->stats.tx_errors++; in ag71xx_ring_tx_clean()
1199 if (ring->buf[i].tx.skb) { in ag71xx_ring_tx_clean()
1200 bytes_compl += ring->buf[i].tx.len; in ag71xx_ring_tx_clean()
1202 dev_kfree_skb_any(ring->buf[i].tx.skb); in ag71xx_ring_tx_clean()
1204 ring->buf[i].tx.skb = NULL; in ag71xx_ring_tx_clean()
1205 ring->dirty++; in ag71xx_ring_tx_clean()
1216 struct ag71xx_ring *ring = &ag->tx_ring; in ag71xx_ring_tx_init()
1217 int ring_size = BIT(ring->order); in ag71xx_ring_tx_init()
1218 int ring_mask = ring_size - 1; in ag71xx_ring_tx_init()
1224 desc->next = (u32)(ring->descs_dma + in ag71xx_ring_tx_init()
1227 desc->ctrl = DESC_EMPTY; in ag71xx_ring_tx_init()
1228 ring->buf[i].tx.skb = NULL; in ag71xx_ring_tx_init()
1234 ring->curr = 0; in ag71xx_ring_tx_init()
1235 ring->dirty = 0; in ag71xx_ring_tx_init()
1236 netdev_reset_queue(ag->ndev); in ag71xx_ring_tx_init()
1241 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_ring_rx_clean()
1242 int ring_size = BIT(ring->order); in ag71xx_ring_rx_clean()
1245 if (!ring->buf) in ag71xx_ring_rx_clean()
1249 if (ring->buf[i].rx.rx_buf) { in ag71xx_ring_rx_clean()
1250 dma_unmap_single(&ag->pdev->dev, in ag71xx_ring_rx_clean()
1251 ring->buf[i].rx.dma_addr, in ag71xx_ring_rx_clean()
1252 ag->rx_buf_size, DMA_FROM_DEVICE); in ag71xx_ring_rx_clean()
1253 skb_free_frag(ring->buf[i].rx.rx_buf); in ag71xx_ring_rx_clean()
1259 return ag->rx_buf_size + in ag71xx_buffer_size()
1267 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_fill_rx_buf()
1271 desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]); in ag71xx_fill_rx_buf()
1277 buf->rx.rx_buf = data; in ag71xx_fill_rx_buf()
1278 buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size, in ag71xx_fill_rx_buf()
1280 desc->data = (u32)buf->rx.dma_addr + offset; in ag71xx_fill_rx_buf()
1286 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_ring_rx_init()
1287 struct net_device *ndev = ag->ndev; in ag71xx_ring_rx_init()
1288 int ring_mask = BIT(ring->order) - 1; in ag71xx_ring_rx_init()
1289 int ring_size = BIT(ring->order); in ag71xx_ring_rx_init()
1297 desc->next = (u32)(ring->descs_dma + in ag71xx_ring_rx_init()
1301 desc, desc->next); in ag71xx_ring_rx_init()
1307 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset, in ag71xx_ring_rx_init()
1309 ret = -ENOMEM; in ag71xx_ring_rx_init()
1313 desc->ctrl = DESC_EMPTY; in ag71xx_ring_rx_init()
1319 ring->curr = 0; in ag71xx_ring_rx_init()
1320 ring->dirty = 0; in ag71xx_ring_rx_init()
1327 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_ring_rx_refill()
1328 int ring_mask = BIT(ring->order) - 1; in ag71xx_ring_rx_refill()
1329 int offset = ag->rx_buf_offset; in ag71xx_ring_rx_refill()
1333 for (; ring->curr - ring->dirty > 0; ring->dirty++) { in ag71xx_ring_rx_refill()
1337 i = ring->dirty & ring_mask; in ag71xx_ring_rx_refill()
1340 if (!ring->buf[i].rx.rx_buf && in ag71xx_ring_rx_refill()
1341 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, in ag71xx_ring_rx_refill()
1345 desc->ctrl = DESC_EMPTY; in ag71xx_ring_rx_refill()
1352 netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n", in ag71xx_ring_rx_refill()
1360 struct ag71xx_ring *tx = &ag->tx_ring; in ag71xx_rings_init() local
1361 struct ag71xx_ring *rx = &ag->rx_ring; in ag71xx_rings_init()
1364 ring_size = BIT(tx->order) + BIT(rx->order); in ag71xx_rings_init()
1365 tx_size = BIT(tx->order); in ag71xx_rings_init()
1367 tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL); in ag71xx_rings_init()
1368 if (!tx->buf) in ag71xx_rings_init()
1369 return -ENOMEM; in ag71xx_rings_init()
1371 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, in ag71xx_rings_init()
1373 &tx->descs_dma, GFP_KERNEL); in ag71xx_rings_init()
1374 if (!tx->descs_cpu) { in ag71xx_rings_init()
1375 kfree(tx->buf); in ag71xx_rings_init()
1376 tx->buf = NULL; in ag71xx_rings_init()
1377 return -ENOMEM; in ag71xx_rings_init()
1380 rx->buf = &tx->buf[tx_size]; in ag71xx_rings_init()
1381 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; in ag71xx_rings_init()
1382 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; in ag71xx_rings_init()
1390 struct ag71xx_ring *tx = &ag->tx_ring; in ag71xx_rings_free() local
1391 struct ag71xx_ring *rx = &ag->rx_ring; in ag71xx_rings_free()
1394 ring_size = BIT(tx->order) + BIT(rx->order); in ag71xx_rings_free()
1396 if (tx->descs_cpu) in ag71xx_rings_free()
1397 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE, in ag71xx_rings_free()
1398 tx->descs_cpu, tx->descs_dma); in ag71xx_rings_free()
1400 kfree(tx->buf); in ag71xx_rings_free()
1402 tx->descs_cpu = NULL; in ag71xx_rings_free()
1403 rx->descs_cpu = NULL; in ag71xx_rings_free()
1404 tx->buf = NULL; in ag71xx_rings_free()
1405 rx->buf = NULL; in ag71xx_rings_free()
1414 netdev_reset_queue(ag->ndev); in ag71xx_rings_cleanup()
1424 reset_control_assert(ag->mac_reset); in ag71xx_hw_init()
1426 reset_control_deassert(ag->mac_reset); in ag71xx_hw_init()
1442 napi_enable(&ag->napi); in ag71xx_hw_enable()
1443 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); in ag71xx_hw_enable()
1444 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); in ag71xx_hw_enable()
1445 netif_start_queue(ag->ndev); in ag71xx_hw_enable()
1452 netif_stop_queue(ag->ndev); in ag71xx_hw_disable()
1457 napi_disable(&ag->napi); in ag71xx_hw_disable()
1458 del_timer_sync(&ag->oom_timer); in ag71xx_hw_disable()
1469 ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0); in ag71xx_open()
1476 max_frame_len = ag71xx_max_frame_len(ndev->mtu); in ag71xx_open()
1477 ag->rx_buf_size = in ag71xx_open()
1482 ag71xx_hw_set_macaddr(ag, ndev->dev_addr); in ag71xx_open()
1488 phylink_start(ag->phylink); in ag71xx_open()
1501 phylink_stop(ag->phylink); in ag71xx_stop()
1502 phylink_disconnect_phy(ag->phylink); in ag71xx_stop()
1513 ring_mask = BIT(ring->order) - 1; in ag71xx_fill_dma_desc()
1515 split = ring->desc_split; in ag71xx_fill_dma_desc()
1523 i = (ring->curr + ndesc) & ring_mask; in ag71xx_fill_dma_desc()
1527 return -1; in ag71xx_fill_dma_desc()
1532 /* TX will hang if DMA transfers <= 4 bytes, in ag71xx_fill_dma_desc()
1536 cur_len -= 4; in ag71xx_fill_dma_desc()
1539 desc->data = addr; in ag71xx_fill_dma_desc()
1541 len -= cur_len; in ag71xx_fill_dma_desc()
1546 /* prevent early tx attempt of this descriptor */ in ag71xx_fill_dma_desc()
1550 desc->ctrl = cur_len; in ag71xx_fill_dma_desc()
1566 ring = &ag->tx_ring; in ag71xx_hard_start_xmit()
1567 ring_mask = BIT(ring->order) - 1; in ag71xx_hard_start_xmit()
1568 ring_size = BIT(ring->order); in ag71xx_hard_start_xmit()
1570 if (skb->len <= 4) { in ag71xx_hard_start_xmit()
1575 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len, in ag71xx_hard_start_xmit()
1578 i = ring->curr & ring_mask; in ag71xx_hard_start_xmit()
1583 skb->len & ag->dcfg->desc_pktlen_mask); in ag71xx_hard_start_xmit()
1587 i = (ring->curr + n - 1) & ring_mask; in ag71xx_hard_start_xmit()
1588 ring->buf[i].tx.len = skb->len; in ag71xx_hard_start_xmit()
1589 ring->buf[i].tx.skb = skb; in ag71xx_hard_start_xmit()
1591 netdev_sent_queue(ndev, skb->len); in ag71xx_hard_start_xmit()
1595 desc->ctrl &= ~DESC_EMPTY; in ag71xx_hard_start_xmit()
1596 ring->curr += n; in ag71xx_hard_start_xmit()
1602 if (ring->desc_split) in ag71xx_hard_start_xmit()
1605 if (ring->curr - ring->dirty >= ring_size - ring_min) { in ag71xx_hard_start_xmit()
1606 netif_dbg(ag, tx_err, ndev, "tx queue full\n"); in ag71xx_hard_start_xmit()
1610 netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n"); in ag71xx_hard_start_xmit()
1612 /* enable TX engine */ in ag71xx_hard_start_xmit()
1618 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE); in ag71xx_hard_start_xmit()
1621 ndev->stats.tx_dropped++; in ag71xx_hard_start_xmit()
1631 napi_schedule(&ag->napi); in ag71xx_oom_timer_handler()
1638 netif_err(ag, tx_err, ndev, "tx timeout\n"); in ag71xx_tx_timeout()
1640 schedule_delayed_work(&ag->restart_work, 1); in ag71xx_tx_timeout()
1652 phylink_stop(ag->phylink); in ag71xx_restart_work_func()
1653 phylink_start(ag->phylink); in ag71xx_restart_work_func()
1660 struct net_device *ndev = ag->ndev; in ag71xx_rx_packets()
1667 ring = &ag->rx_ring; in ag71xx_rx_packets()
1668 pktlen_mask = ag->dcfg->desc_pktlen_mask; in ag71xx_rx_packets()
1669 offset = ag->rx_buf_offset; in ag71xx_rx_packets()
1670 ring_mask = BIT(ring->order) - 1; in ag71xx_rx_packets()
1671 ring_size = BIT(ring->order); in ag71xx_rx_packets()
1674 limit, ring->curr, ring->dirty); in ag71xx_rx_packets()
1679 unsigned int i = ring->curr & ring_mask; in ag71xx_rx_packets()
1687 if ((ring->dirty + ring_size) == ring->curr) { in ag71xx_rx_packets()
1694 pktlen = desc->ctrl & pktlen_mask; in ag71xx_rx_packets()
1695 pktlen -= ETH_FCS_LEN; in ag71xx_rx_packets()
1697 dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr, in ag71xx_rx_packets()
1698 ag->rx_buf_size, DMA_FROM_DEVICE); in ag71xx_rx_packets()
1700 ndev->stats.rx_packets++; in ag71xx_rx_packets()
1701 ndev->stats.rx_bytes += pktlen; in ag71xx_rx_packets()
1703 skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag)); in ag71xx_rx_packets()
1705 skb_free_frag(ring->buf[i].rx.rx_buf); in ag71xx_rx_packets()
1713 ndev->stats.rx_dropped++; in ag71xx_rx_packets()
1716 skb->dev = ndev; in ag71xx_rx_packets()
1717 skb->ip_summed = CHECKSUM_NONE; in ag71xx_rx_packets()
1718 list_add_tail(&skb->list, &rx_list); in ag71xx_rx_packets()
1722 ring->buf[i].rx.rx_buf = NULL; in ag71xx_rx_packets()
1725 ring->curr++; in ag71xx_rx_packets()
1731 skb->protocol = eth_type_trans(skb, ndev); in ag71xx_rx_packets()
1735 ring->curr, ring->dirty, done); in ag71xx_rx_packets()
1743 struct ag71xx_ring *rx_ring = &ag->rx_ring; in ag71xx_poll()
1744 int rx_ring_size = BIT(rx_ring->order); in ag71xx_poll()
1745 struct net_device *ndev = ag->ndev; in ag71xx_poll()
1754 if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf) in ag71xx_poll()
1760 ndev->stats.rx_fifo_errors++; in ag71xx_poll()
1774 netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n", in ag71xx_poll()
1785 netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n", in ag71xx_poll()
1792 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); in ag71xx_poll()
1812 netif_err(ag, intr, ndev, "TX BUS error\n"); in ag71xx_interrupt()
1823 napi_schedule(&ag->napi); in ag71xx_interrupt()
1833 ndev->mtu = new_mtu; in ag71xx_change_mtu()
1835 ag71xx_max_frame_len(ndev->mtu)); in ag71xx_change_mtu()
1857 struct device_node *np = pdev->dev.of_node; in ag71xx_probe()
1866 return -ENODEV; in ag71xx_probe()
1868 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag)); in ag71xx_probe()
1870 return -ENOMEM; in ag71xx_probe()
1874 return -EINVAL; in ag71xx_probe()
1876 dcfg = of_device_get_match_data(&pdev->dev); in ag71xx_probe()
1878 return -EINVAL; in ag71xx_probe()
1881 ag->mac_idx = -1; in ag71xx_probe()
1883 if (ar71xx_addr_ar7100[i] == res->start) in ag71xx_probe()
1884 ag->mac_idx = i; in ag71xx_probe()
1887 if (ag->mac_idx < 0) { in ag71xx_probe()
1888 netif_err(ag, probe, ndev, "unknown mac idx\n"); in ag71xx_probe()
1889 return -EINVAL; in ag71xx_probe()
1892 ag->clk_eth = devm_clk_get(&pdev->dev, "eth"); in ag71xx_probe()
1893 if (IS_ERR(ag->clk_eth)) { in ag71xx_probe()
1894 netif_err(ag, probe, ndev, "Failed to get eth clk.\n"); in ag71xx_probe()
1895 return PTR_ERR(ag->clk_eth); in ag71xx_probe()
1898 SET_NETDEV_DEV(ndev, &pdev->dev); in ag71xx_probe()
1900 ag->pdev = pdev; in ag71xx_probe()
1901 ag->ndev = ndev; in ag71xx_probe()
1902 ag->dcfg = dcfg; in ag71xx_probe()
1903 ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE); in ag71xx_probe()
1904 memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata)); in ag71xx_probe()
1906 ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); in ag71xx_probe()
1907 if (IS_ERR(ag->mac_reset)) { in ag71xx_probe()
1908 netif_err(ag, probe, ndev, "missing mac reset\n"); in ag71xx_probe()
1909 err = PTR_ERR(ag->mac_reset); in ag71xx_probe()
1913 ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); in ag71xx_probe()
1914 if (!ag->mac_base) { in ag71xx_probe()
1915 err = -ENOMEM; in ag71xx_probe()
1919 ndev->irq = platform_get_irq(pdev, 0); in ag71xx_probe()
1920 err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, in ag71xx_probe()
1921 0x0, dev_name(&pdev->dev), ndev); in ag71xx_probe()
1924 ndev->irq); in ag71xx_probe()
1928 ndev->netdev_ops = &ag71xx_netdev_ops; in ag71xx_probe()
1929 ndev->ethtool_ops = &ag71xx_ethtool_ops; in ag71xx_probe()
1931 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func); in ag71xx_probe()
1932 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0); in ag71xx_probe()
1935 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); in ag71xx_probe()
1937 ndev->min_mtu = 68; in ag71xx_probe()
1938 ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0); in ag71xx_probe()
1940 ag->rx_buf_offset = NET_SKB_PAD; in ag71xx_probe()
1942 ag->rx_buf_offset += NET_IP_ALIGN; in ag71xx_probe()
1945 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT; in ag71xx_probe()
1948 ag->tx_ring.order = ag71xx_ring_size_order(tx_size); in ag71xx_probe()
1950 ag->stop_desc = dmam_alloc_coherent(&pdev->dev, in ag71xx_probe()
1952 &ag->stop_desc_dma, GFP_KERNEL); in ag71xx_probe()
1953 if (!ag->stop_desc) { in ag71xx_probe()
1954 err = -ENOMEM; in ag71xx_probe()
1958 ag->stop_desc->data = 0; in ag71xx_probe()
1959 ag->stop_desc->ctrl = 0; in ag71xx_probe()
1960 ag->stop_desc->next = (u32)ag->stop_desc_dma; in ag71xx_probe()
1964 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); in ag71xx_probe()
1965 if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) { in ag71xx_probe()
1966 netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); in ag71xx_probe()
1967 eth_random_addr(ndev->dev_addr); in ag71xx_probe()
1970 err = of_get_phy_mode(np, &ag->phy_if_mode); in ag71xx_probe()
1972 netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); in ag71xx_probe()
1976 netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); in ag71xx_probe()
1978 err = clk_prepare_enable(ag->clk_eth); in ag71xx_probe()
1980 netif_err(ag, probe, ndev, "Failed to enable eth clk.\n"); in ag71xx_probe()
2008 (unsigned long)ag->mac_base, ndev->irq, in ag71xx_probe()
2009 phy_modes(ag->phy_if_mode)); in ag71xx_probe()
2016 clk_disable_unprepare(ag->clk_eth); in ag71xx_probe()
2033 clk_disable_unprepare(ag->clk_eth); in ag71xx_remove()
2055 .desc_pktlen_mask = SZ_4K - 1,
2063 .desc_pktlen_mask = SZ_4K - 1,
2071 .desc_pktlen_mask = SZ_4K - 1,
2079 .desc_pktlen_mask = SZ_4K - 1,
2086 .max_frame_len = SZ_16K - 1,
2087 .desc_pktlen_mask = SZ_16K - 1,
2094 .max_frame_len = SZ_16K - 1,
2095 .desc_pktlen_mask = SZ_16K - 1,
2103 .desc_pktlen_mask = SZ_16K - 1,
2108 { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
2109 { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
2110 { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
2111 { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
2112 { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
2113 { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
2114 { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
2115 { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
2116 { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
2117 { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },