Lines Matching defs:priv
25 static u32 owl_emac_reg_read(struct owl_emac_priv *priv, u32 reg)
27 return readl(priv->base + reg);
30 static void owl_emac_reg_write(struct owl_emac_priv *priv, u32 reg, u32 data)
32 writel(data, priv->base + reg);
35 static u32 owl_emac_reg_update(struct owl_emac_priv *priv,
40 data = owl_emac_reg_read(priv, reg);
46 owl_emac_reg_write(priv, reg, data);
51 static void owl_emac_reg_set(struct owl_emac_priv *priv, u32 reg, u32 bits)
53 owl_emac_reg_update(priv, reg, bits, bits);
56 static void owl_emac_reg_clear(struct owl_emac_priv *priv, u32 reg, u32 bits)
58 owl_emac_reg_update(priv, reg, bits, 0);
61 static struct device *owl_emac_get_dev(struct owl_emac_priv *priv)
63 return priv->netdev->dev.parent;
66 static void owl_emac_irq_enable(struct owl_emac_priv *priv)
73 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7,
79 static void owl_emac_irq_disable(struct owl_emac_priv *priv)
88 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7,
92 static u32 owl_emac_irq_status(struct owl_emac_priv *priv)
94 return owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR5);
97 static u32 owl_emac_irq_clear(struct owl_emac_priv *priv)
99 u32 val = owl_emac_irq_status(priv);
101 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR5, val);
106 static dma_addr_t owl_emac_dma_map_rx(struct owl_emac_priv *priv,
109 struct device *dev = owl_emac_get_dev(priv);
116 static void owl_emac_dma_unmap_rx(struct owl_emac_priv *priv,
119 struct device *dev = owl_emac_get_dev(priv);
124 static dma_addr_t owl_emac_dma_map_tx(struct owl_emac_priv *priv,
127 struct device *dev = owl_emac_get_dev(priv);
132 static void owl_emac_dma_unmap_tx(struct owl_emac_priv *priv,
135 struct device *dev = owl_emac_get_dev(priv);
179 static int owl_emac_ring_prepare_rx(struct owl_emac_priv *priv)
181 struct owl_emac_ring *ring = &priv->rx_ring;
182 struct device *dev = owl_emac_get_dev(priv);
183 struct net_device *netdev = priv->netdev;
194 dma_addr = owl_emac_dma_map_rx(priv, skb);
218 static void owl_emac_ring_prepare_tx(struct owl_emac_priv *priv)
220 struct owl_emac_ring *ring = &priv->tx_ring;
241 static void owl_emac_ring_unprepare_rx(struct owl_emac_priv *priv)
243 struct owl_emac_ring *ring = &priv->rx_ring;
252 owl_emac_dma_unmap_rx(priv, ring->skbs[i], ring->skbs_dma[i]);
260 static void owl_emac_ring_unprepare_tx(struct owl_emac_priv *priv)
262 struct owl_emac_ring *ring = &priv->tx_ring;
271 owl_emac_dma_unmap_tx(priv, ring->skbs[i], ring->skbs_dma[i]);
303 static void owl_emac_dma_cmd_resume_rx(struct owl_emac_priv *priv)
305 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR2,
309 static void owl_emac_dma_cmd_resume_tx(struct owl_emac_priv *priv)
311 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR1,
315 static u32 owl_emac_dma_cmd_set_tx(struct owl_emac_priv *priv, u32 status)
317 return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
321 static u32 owl_emac_dma_cmd_start_tx(struct owl_emac_priv *priv)
323 return owl_emac_dma_cmd_set_tx(priv, ~0);
326 static u32 owl_emac_dma_cmd_set(struct owl_emac_priv *priv, u32 status)
328 return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
332 static u32 owl_emac_dma_cmd_start(struct owl_emac_priv *priv)
334 return owl_emac_dma_cmd_set(priv, ~0);
337 static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv)
339 return owl_emac_dma_cmd_set(priv, 0);
344 struct owl_emac_priv *priv = netdev_priv(netdev);
352 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR17, addr_high);
353 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR16, addr_low);
356 static void owl_emac_update_link_state(struct owl_emac_priv *priv)
360 if (priv->pause) {
369 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR20, val);
371 val = (priv->speed == SPEED_100) ? OWL_EMAC_VAL_MAC_CSR6_SPEED_100M :
375 if (priv->duplex == DUPLEX_FULL)
378 spin_lock_bh(&priv->lock);
381 status = owl_emac_dma_cmd_stop(priv);
384 owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
389 owl_emac_dma_cmd_set(priv, status);
391 spin_unlock_bh(&priv->lock);
396 struct owl_emac_priv *priv = netdev_priv(netdev);
401 if (!priv->link) {
402 priv->link = phydev->link;
406 if (priv->speed != phydev->speed) {
407 priv->speed = phydev->speed;
411 if (priv->duplex != phydev->duplex) {
412 priv->duplex = phydev->duplex;
416 if (priv->pause != phydev->pause) {
417 priv->pause = phydev->pause;
421 if (priv->link) {
422 priv->link = phydev->link;
429 owl_emac_update_link_state(priv);
431 if (netif_msg_link(priv))
439 struct owl_emac_priv *priv = netdev_priv(netdev);
442 owl_emac_irq_disable(priv);
443 napi_schedule(&priv->napi);
462 owl_emac_setup_frame_prepare(struct owl_emac_priv *priv, struct sk_buff *skb)
465 const u8 *mac_addr = priv->netdev->dev_addr;
478 WARN_ON(priv->mcaddr_list.count >= OWL_EMAC_MAX_MULTICAST_ADDRS);
479 for (i = 0; i < priv->mcaddr_list.count; i++) {
480 mac_addr = priv->mcaddr_list.addrs[i];
490 static int owl_emac_setup_frame_xmit(struct owl_emac_priv *priv)
492 struct owl_emac_ring *ring = &priv->tx_ring;
493 struct net_device *netdev = priv->netdev;
505 owl_emac_setup_frame_prepare(priv, skb);
507 dma_addr = owl_emac_dma_map_tx(priv, skb);
508 if (dma_mapping_error(owl_emac_get_dev(priv), dma_addr)) {
513 spin_lock_bh(&priv->lock);
524 spin_unlock_bh(&priv->lock);
525 owl_emac_dma_unmap_tx(priv, skb, dma_addr);
545 status = owl_emac_dma_cmd_start_tx(priv);
548 owl_emac_dma_cmd_resume_tx(priv);
551 owl_emac_dma_cmd_set_tx(priv, status);
556 spin_unlock_bh(&priv->lock);
568 struct owl_emac_priv *priv = netdev_priv(netdev);
569 struct device *dev = owl_emac_get_dev(priv);
570 struct owl_emac_ring *ring = &priv->tx_ring;
576 dma_addr = owl_emac_dma_map_tx(priv, skb);
584 spin_lock_bh(&priv->lock);
596 spin_unlock_bh(&priv->lock);
599 owl_emac_irq_status(priv));
600 owl_emac_dma_unmap_tx(priv, skb, dma_addr);
617 owl_emac_dma_cmd_resume_tx(priv);
626 spin_unlock_bh(&priv->lock);
631 static bool owl_emac_tx_complete_tail(struct owl_emac_priv *priv)
633 struct owl_emac_ring *ring = &priv->tx_ring;
634 struct net_device *netdev = priv->netdev;
681 owl_emac_dma_unmap_tx(priv, skb, ring->skbs_dma[tx_tail]);
695 static void owl_emac_tx_complete(struct owl_emac_priv *priv)
697 struct owl_emac_ring *ring = &priv->tx_ring;
698 struct net_device *netdev = priv->netdev;
702 spin_lock(&priv->lock);
705 if (!owl_emac_tx_complete_tail(priv))
738 owl_emac_tx_complete_tail(priv);
743 spin_unlock(&priv->lock);
746 static int owl_emac_rx_process(struct owl_emac_priv *priv, int budget)
748 struct owl_emac_ring *ring = &priv->rx_ring;
749 struct device *dev = owl_emac_get_dev(priv);
750 struct net_device *netdev = priv->netdev;
759 spin_lock(&priv->lock);
768 spin_unlock(&priv->lock);
776 spin_unlock(&priv->lock);
819 new_dma = owl_emac_dma_map_rx(priv, new_skb);
826 owl_emac_dma_unmap_rx(priv, curr_skb, curr_dma);
848 spin_lock(&priv->lock);
859 spin_unlock(&priv->lock);
869 struct owl_emac_priv *priv;
872 priv = container_of(napi, struct owl_emac_priv, napi);
874 while ((status = owl_emac_irq_clear(priv)) &
880 owl_emac_tx_complete(priv);
893 recv = owl_emac_rx_process(priv, budget - work_done);
909 owl_emac_dma_cmd_resume_rx(priv);
911 recv = owl_emac_rx_process(priv, budget - work_done);
925 owl_emac_irq_enable(priv);
930 netdev_dbg(priv->netdev, "%s error status: 0x%08x\n",
934 schedule_work(&priv->mac_reset_task);
940 static void owl_emac_mdio_clock_enable(struct owl_emac_priv *priv)
947 val = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10);
953 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, val);
956 static void owl_emac_core_hw_reset(struct owl_emac_priv *priv)
959 reset_control_assert(priv->reset);
961 reset_control_deassert(priv->reset);
965 static int owl_emac_core_sw_reset(struct owl_emac_priv *priv)
971 owl_emac_reg_set(priv, OWL_EMAC_REG_MAC_CSR0, OWL_EMAC_BIT_MAC_CSR0_SWR);
972 ret = readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR0,
979 if (priv->phy_mode == PHY_INTERFACE_MODE_RMII) {
989 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CTRL, val);
992 owl_emac_mdio_clock_enable(priv);
997 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR19, val);
1001 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR18, val);
1006 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR11, val);
1009 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR3,
1010 (u32)(priv->rx_ring.descs_dma));
1011 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR4,
1012 (u32)(priv->tx_ring.descs_dma));
1017 owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
1020 owl_emac_reg_clear(priv, OWL_EMAC_REG_MAC_CSR6,
1023 priv->link = 0;
1024 priv->speed = SPEED_UNKNOWN;
1025 priv->duplex = DUPLEX_UNKNOWN;
1026 priv->pause = 0;
1027 priv->mcaddr_list.count = 0;
1034 struct owl_emac_priv *priv = netdev_priv(netdev);
1037 owl_emac_dma_cmd_stop(priv);
1038 owl_emac_irq_disable(priv);
1039 owl_emac_irq_clear(priv);
1041 owl_emac_ring_prepare_tx(priv);
1042 ret = owl_emac_ring_prepare_rx(priv);
1046 ret = owl_emac_core_sw_reset(priv);
1053 owl_emac_setup_frame_xmit(priv);
1056 napi_enable(&priv->napi);
1058 owl_emac_irq_enable(priv);
1059 owl_emac_dma_cmd_start(priv);
1069 owl_emac_ring_unprepare_rx(priv);
1070 owl_emac_ring_unprepare_tx(priv);
1077 struct owl_emac_priv *priv = netdev_priv(netdev);
1079 owl_emac_dma_cmd_stop(priv);
1080 owl_emac_irq_disable(priv);
1083 napi_disable(&priv->napi);
1088 owl_emac_ring_unprepare_rx(priv);
1089 owl_emac_ring_unprepare_tx(priv);
1106 struct owl_emac_priv *priv = netdev_priv(netdev);
1111 priv->mcaddr_list.count = 0;
1120 ether_addr_copy(priv->mcaddr_list.addrs[index++], ha->addr);
1123 priv->mcaddr_list.count = index;
1125 owl_emac_setup_frame_xmit(priv);
1130 struct owl_emac_priv *priv = netdev_priv(netdev);
1147 spin_lock_bh(&priv->lock);
1150 status = owl_emac_dma_cmd_stop(priv);
1153 owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
1158 owl_emac_dma_cmd_set(priv, status);
1160 spin_unlock_bh(&priv->lock);
1194 struct owl_emac_priv *priv = netdev_priv(netdev);
1196 schedule_work(&priv->mac_reset_task);
1201 struct owl_emac_priv *priv;
1203 priv = container_of(work, struct owl_emac_priv, mac_reset_task);
1205 netdev_dbg(priv->netdev, "resetting MAC\n");
1206 owl_emac_disable(priv->netdev, false);
1207 owl_emac_enable(priv->netdev, false);
1240 struct owl_emac_priv *priv = netdev_priv(netdev);
1242 return priv->msg_enable;
1247 struct owl_emac_priv *priv = netdev_priv(ndev);
1249 priv->msg_enable = val;
1261 static int owl_emac_mdio_wait(struct owl_emac_priv *priv)
1266 return readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR10,
1274 struct owl_emac_priv *priv = bus->priv;
1287 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data);
1289 ret = owl_emac_mdio_wait(priv);
1293 data = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10);
1302 struct owl_emac_priv *priv = bus->priv;
1316 owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data);
1318 return owl_emac_mdio_wait(priv);
1323 struct owl_emac_priv *priv = netdev_priv(netdev);
1324 struct device *dev = owl_emac_get_dev(priv);
1332 priv->mii = devm_mdiobus_alloc(dev);
1333 if (!priv->mii) {
1338 snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1339 priv->mii->name = "owl-emac-mdio";
1340 priv->mii->parent = dev;
1341 priv->mii->read = owl_emac_mdio_read;
1342 priv->mii->write = owl_emac_mdio_write;
1343 priv->mii->phy_mask = ~0; /* Mask out all PHYs from auto probing. */
1344 priv->mii->priv = priv;
1346 ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
1355 struct owl_emac_priv *priv = netdev_priv(netdev);
1356 struct device *dev = owl_emac_get_dev(priv);
1366 if (netif_msg_link(priv))
1388 struct owl_emac_priv *priv = netdev_priv(netdev);
1397 clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks);
1405 struct owl_emac_priv *priv = netdev_priv(netdev);
1408 ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks);
1413 owl_emac_core_hw_reset(priv);
1414 owl_emac_core_sw_reset(priv);
1418 clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks);
1432 struct owl_emac_priv *priv = data;
1434 clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks);
1437 static int owl_emac_clk_set_rate(struct owl_emac_priv *priv)
1439 struct device *dev = owl_emac_get_dev(priv);
1443 switch (priv->phy_mode) {
1454 priv->phy_mode);
1458 ret = clk_set_rate(priv->clks[OWL_EMAC_CLK_RMII].clk, rate);
1469 struct owl_emac_priv *priv;
1472 netdev = devm_alloc_etherdev(dev, sizeof(*priv));
1479 priv = netdev_priv(netdev);
1480 priv->netdev = netdev;
1481 priv->msg_enable = netif_msg_init(-1, OWL_EMAC_DEFAULT_MSG_ENABLE);
1483 ret = of_get_phy_mode(dev->of_node, &priv->phy_mode);
1489 spin_lock_init(&priv->lock);
1497 ret = owl_emac_ring_alloc(dev, &priv->rx_ring, OWL_EMAC_RX_RING_SIZE);
1501 ret = owl_emac_ring_alloc(dev, &priv->tx_ring, OWL_EMAC_TX_RING_SIZE);
1505 priv->base = devm_platform_ioremap_resource(pdev, 0);
1506 if (IS_ERR(priv->base))
1507 return PTR_ERR(priv->base);
1521 priv->clks[i].id = owl_emac_clk_names[i];
1523 ret = devm_clk_bulk_get(dev, OWL_EMAC_NCLKS, priv->clks);
1527 ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks);
1531 ret = devm_add_action_or_reset(dev, owl_emac_clk_disable_unprepare, priv);
1535 ret = owl_emac_clk_set_rate(priv);
1539 priv->reset = devm_reset_control_get_exclusive(dev, NULL);
1540 if (IS_ERR(priv->reset))
1541 return dev_err_probe(dev, PTR_ERR(priv->reset),
1546 owl_emac_core_hw_reset(priv);
1547 owl_emac_mdio_clock_enable(priv);
1561 INIT_WORK(&priv->mac_reset_task, owl_emac_reset_task);
1568 netif_napi_add(netdev, &priv->napi, owl_emac_poll);
1572 netif_napi_del(&priv->napi);
1582 struct owl_emac_priv *priv = platform_get_drvdata(pdev);
1584 netif_napi_del(&priv->napi);
1585 phy_disconnect(priv->netdev->phydev);
1586 cancel_work_sync(&priv->mac_reset_task);