Lines Matching +full:mac +full:- +full:clk +full:- +full:tx

1 // SPDX-License-Identifier: GPL-2.0-or-later
8 * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
22 #include <linux/dma-mapping.h>
37 return readb_relaxed(priv->base + reg); in nb8800_readb()
42 return readl_relaxed(priv->base + reg); in nb8800_readl()
47 writeb_relaxed(val, priv->base + reg); in nb8800_writeb()
52 writew_relaxed(val, priv->base + reg); in nb8800_writew()
57 writel_relaxed(val, priv->base + reg); in nb8800_writel()
114 struct nb8800_priv *priv = bus->priv; in nb8800_mdio_wait()
117 return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD, in nb8800_mdio_wait()
123 struct nb8800_priv *priv = bus->priv; in nb8800_mdio_cmd()
139 struct nb8800_priv *priv = bus->priv; in nb8800_mdio_read()
190 struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; in nb8800_alloc_rx()
191 struct nb8800_rx_buf *rxb = &priv->rx_bufs[i]; in nb8800_alloc_rx()
200 return -ENOMEM; in nb8800_alloc_rx()
203 offset = data - page_address(page); in nb8800_alloc_rx()
205 dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE, in nb8800_alloc_rx()
208 if (dma_mapping_error(&dev->dev, dma_addr)) { in nb8800_alloc_rx()
210 return -ENOMEM; in nb8800_alloc_rx()
213 rxb->page = page; in nb8800_alloc_rx()
214 rxb->offset = offset; in nb8800_alloc_rx()
215 rxd->desc.s_addr = dma_addr; in nb8800_alloc_rx()
224 struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; in nb8800_receive()
225 struct page *page = priv->rx_bufs[i].page; in nb8800_receive()
226 int offset = priv->rx_bufs[i].offset; in nb8800_receive()
228 dma_addr_t dma = rxd->desc.s_addr; in nb8800_receive()
235 skb = napi_alloc_skb(&priv->napi, size); in nb8800_receive()
238 dev->stats.rx_dropped++; in nb8800_receive()
243 dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE); in nb8800_receive()
245 dma_sync_single_for_device(&dev->dev, dma, len, in nb8800_receive()
251 dev->stats.rx_dropped++; in nb8800_receive()
256 dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE); in nb8800_receive()
258 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, in nb8800_receive()
259 offset + RX_COPYHDR, len - RX_COPYHDR, in nb8800_receive()
263 skb->protocol = eth_type_trans(skb, dev); in nb8800_receive()
264 napi_gro_receive(&priv->napi, skb); in nb8800_receive()
270 dev->stats.rx_length_errors++; in nb8800_rx_error()
273 dev->stats.rx_crc_errors++; in nb8800_rx_error()
276 dev->stats.rx_fifo_errors++; in nb8800_rx_error()
279 dev->stats.rx_frame_errors++; in nb8800_rx_error()
281 dev->stats.rx_errors++; in nb8800_rx_error()
286 struct net_device *dev = napi->dev; in nb8800_poll()
289 unsigned int last = priv->rx_eoc; in nb8800_poll()
301 rxd = &priv->rx_descs[next]; in nb8800_poll()
303 if (!rxd->report) in nb8800_poll()
306 len = RX_BYTES_TRANSFERRED(rxd->report); in nb8800_poll()
308 if (IS_RX_ERROR(rxd->report)) in nb8800_poll()
309 nb8800_rx_error(dev, rxd->report); in nb8800_poll()
313 dev->stats.rx_packets++; in nb8800_poll()
314 dev->stats.rx_bytes += len; in nb8800_poll()
316 if (rxd->report & RX_MULTICAST_PKT) in nb8800_poll()
317 dev->stats.multicast++; in nb8800_poll()
319 rxd->report = 0; in nb8800_poll()
325 priv->rx_descs[last].desc.config |= DESC_EOC; in nb8800_poll()
327 priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC; in nb8800_poll()
328 priv->rx_eoc = last; in nb8800_poll()
333 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); in nb8800_poll()
339 if (priv->rx_descs[next].report) in nb8800_poll()
354 txb = &priv->tx_bufs[priv->tx_queue]; in __nb8800_tx_dma_start()
355 if (!txb->ready) in __nb8800_tx_dma_start()
362 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); in __nb8800_tx_dma_start()
366 priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT; in __nb8800_tx_dma_start()
373 spin_lock_irq(&priv->tx_lock); in nb8800_tx_dma_start()
375 spin_unlock_irq(&priv->tx_lock); in nb8800_tx_dma_start()
382 spin_lock(&priv->tx_lock); in nb8800_tx_dma_start_irq()
384 spin_unlock(&priv->tx_lock); in nb8800_tx_dma_start_irq()
399 if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) { in nb8800_xmit()
404 align = (8 - (uintptr_t)skb->data) & 7; in nb8800_xmit()
406 dma_len = skb->len - align; in nb8800_xmit()
407 dma_addr = dma_map_single(&dev->dev, skb->data + align, in nb8800_xmit()
410 if (dma_mapping_error(&dev->dev, dma_addr)) { in nb8800_xmit()
411 netdev_err(dev, "tx dma mapping error\n"); in nb8800_xmit()
413 dev->stats.tx_dropped++; in nb8800_xmit()
418 if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) { in nb8800_xmit()
423 next = priv->tx_next; in nb8800_xmit()
424 txb = &priv->tx_bufs[next]; in nb8800_xmit()
425 txd = &priv->tx_descs[next]; in nb8800_xmit()
426 desc = &txd->desc[0]; in nb8800_xmit()
431 memcpy(txd->buf, skb->data, align); in nb8800_xmit()
433 desc->s_addr = in nb8800_xmit()
434 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); in nb8800_xmit()
435 desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]); in nb8800_xmit()
436 desc->config = DESC_BTS(2) | DESC_DS | align; in nb8800_xmit()
441 desc->s_addr = dma_addr; in nb8800_xmit()
442 desc->n_addr = priv->tx_bufs[next].dma_desc; in nb8800_xmit()
443 desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len; in nb8800_xmit()
446 desc->config |= DESC_EOC; in nb8800_xmit()
448 txb->skb = skb; in nb8800_xmit()
449 txb->dma_addr = dma_addr; in nb8800_xmit()
450 txb->dma_len = dma_len; in nb8800_xmit()
452 if (!priv->tx_chain) { in nb8800_xmit()
453 txb->chain_len = 1; in nb8800_xmit()
454 priv->tx_chain = txb; in nb8800_xmit()
456 priv->tx_chain->chain_len++; in nb8800_xmit()
459 netdev_sent_queue(dev, skb->len); in nb8800_xmit()
461 priv->tx_next = next; in nb8800_xmit()
465 priv->tx_chain->ready = true; in nb8800_xmit()
466 priv->tx_chain = NULL; in nb8800_xmit()
476 dev->stats.collisions++; in nb8800_tx_error()
479 dev->stats.tx_dropped++; in nb8800_tx_error()
482 dev->stats.tx_fifo_errors++; in nb8800_tx_error()
484 dev->stats.tx_errors++; in nb8800_tx_error()
490 unsigned int limit = priv->tx_next; in nb8800_tx_done()
491 unsigned int done = priv->tx_done; in nb8800_tx_done()
496 struct nb8800_tx_desc *txd = &priv->tx_descs[done]; in nb8800_tx_done()
497 struct nb8800_tx_buf *txb = &priv->tx_bufs[done]; in nb8800_tx_done()
500 if (!txd->report) in nb8800_tx_done()
503 skb = txb->skb; in nb8800_tx_done()
504 len += skb->len; in nb8800_tx_done()
506 dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len, in nb8800_tx_done()
509 if (IS_TX_ERROR(txd->report)) { in nb8800_tx_done()
510 nb8800_tx_error(dev, txd->report); in nb8800_tx_done()
516 dev->stats.tx_packets++; in nb8800_tx_done()
517 dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report); in nb8800_tx_done()
518 dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report); in nb8800_tx_done()
520 txb->skb = NULL; in nb8800_tx_done()
521 txb->ready = false; in nb8800_tx_done()
522 txd->report = 0; in nb8800_tx_done()
530 atomic_add(packets, &priv->tx_free); in nb8800_tx_done()
533 priv->tx_done = done; in nb8800_tx_done()
544 /* tx interrupt */ in nb8800_irq()
553 napi_schedule_irqoff(&priv->napi); in nb8800_irq()
556 netdev_err(dev, "TX DMA error\n"); in nb8800_irq()
560 netdev_err(dev, "TX Status FIFO overflow\n"); in nb8800_irq()
571 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll); in nb8800_irq()
572 napi_schedule_irqoff(&priv->napi); in nb8800_irq()
591 bool gigabit = priv->speed == SPEED_1000; in nb8800_mac_config()
598 if (!priv->duplex) in nb8800_mac_config()
602 if (phy_interface_is_rgmii(dev->phydev)) in nb8800_mac_config()
615 ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk)); in nb8800_mac_config()
625 struct phy_device *phydev = dev->phydev; in nb8800_pause_config()
628 if (priv->pause_aneg) { in nb8800_pause_config()
629 if (!phydev || !phydev->link) in nb8800_pause_config()
632 priv->pause_rx = phydev->pause; in nb8800_pause_config()
633 priv->pause_tx = phydev->pause ^ phydev->asym_pause; in nb8800_pause_config()
636 nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx); in nb8800_pause_config()
639 if (!!(rxcr & RCR_FL) == priv->pause_tx) in nb8800_pause_config()
643 napi_disable(&priv->napi); in nb8800_pause_config()
646 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); in nb8800_pause_config()
649 napi_enable(&priv->napi); in nb8800_pause_config()
651 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); in nb8800_pause_config()
658 struct phy_device *phydev = dev->phydev; in nb8800_link_reconfigure()
661 if (phydev->link) { in nb8800_link_reconfigure()
662 if (phydev->speed != priv->speed) { in nb8800_link_reconfigure()
663 priv->speed = phydev->speed; in nb8800_link_reconfigure()
667 if (phydev->duplex != priv->duplex) { in nb8800_link_reconfigure()
668 priv->duplex = phydev->duplex; in nb8800_link_reconfigure()
678 if (phydev->link != priv->link) { in nb8800_link_reconfigure()
679 priv->link = phydev->link; in nb8800_link_reconfigure()
693 nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]); in nb8800_update_mac_addr()
696 nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]); in nb8800_update_mac_addr()
704 return -EBUSY; in nb8800_set_mac_address()
706 ether_addr_copy(dev->dev_addr, sock->sa_data); in nb8800_set_mac_address()
717 readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val, in nb8800_mc_init()
727 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { in nb8800_set_rx_mode()
737 nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]); in nb8800_set_rx_mode()
751 if (priv->rx_bufs) { in nb8800_dma_free()
753 if (priv->rx_bufs[i].page) in nb8800_dma_free()
754 put_page(priv->rx_bufs[i].page); in nb8800_dma_free()
756 kfree(priv->rx_bufs); in nb8800_dma_free()
757 priv->rx_bufs = NULL; in nb8800_dma_free()
760 if (priv->tx_bufs) { in nb8800_dma_free()
762 kfree_skb(priv->tx_bufs[i].skb); in nb8800_dma_free()
764 kfree(priv->tx_bufs); in nb8800_dma_free()
765 priv->tx_bufs = NULL; in nb8800_dma_free()
768 if (priv->rx_descs) { in nb8800_dma_free()
769 dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs, in nb8800_dma_free()
770 priv->rx_desc_dma); in nb8800_dma_free()
771 priv->rx_descs = NULL; in nb8800_dma_free()
774 if (priv->tx_descs) { in nb8800_dma_free()
775 dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs, in nb8800_dma_free()
776 priv->tx_desc_dma); in nb8800_dma_free()
777 priv->tx_descs = NULL; in nb8800_dma_free()
789 dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd); in nb8800_dma_reset()
791 rxd = &priv->rx_descs[i]; in nb8800_dma_reset()
792 rxd->desc.n_addr = rx_dma + sizeof(*rxd); in nb8800_dma_reset()
793 rxd->desc.r_addr = in nb8800_dma_reset()
795 rxd->desc.config = priv->rx_dma_config; in nb8800_dma_reset()
796 rxd->report = 0; in nb8800_dma_reset()
799 rxd->desc.n_addr = priv->rx_desc_dma; in nb8800_dma_reset()
800 rxd->desc.config |= DESC_EOC; in nb8800_dma_reset()
802 priv->rx_eoc = RX_DESC_COUNT - 1; in nb8800_dma_reset()
805 struct nb8800_tx_buf *txb = &priv->tx_bufs[i]; in nb8800_dma_reset()
806 dma_addr_t r_dma = txb->dma_desc + in nb8800_dma_reset()
809 txd = &priv->tx_descs[i]; in nb8800_dma_reset()
810 txd->desc[0].r_addr = r_dma; in nb8800_dma_reset()
811 txd->desc[1].r_addr = r_dma; in nb8800_dma_reset()
812 txd->report = 0; in nb8800_dma_reset()
815 priv->tx_next = 0; in nb8800_dma_reset()
816 priv->tx_queue = 0; in nb8800_dma_reset()
817 priv->tx_done = 0; in nb8800_dma_reset()
818 atomic_set(&priv->tx_free, TX_DESC_COUNT); in nb8800_dma_reset()
820 nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma); in nb8800_dma_reset()
833 priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE, in nb8800_dma_init()
834 &priv->rx_desc_dma, GFP_KERNEL); in nb8800_dma_init()
835 if (!priv->rx_descs) in nb8800_dma_init()
838 priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL); in nb8800_dma_init()
839 if (!priv->rx_bufs) in nb8800_dma_init()
848 priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE, in nb8800_dma_init()
849 &priv->tx_desc_dma, GFP_KERNEL); in nb8800_dma_init()
850 if (!priv->tx_descs) in nb8800_dma_init()
853 priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL); in nb8800_dma_init()
854 if (!priv->tx_bufs) in nb8800_dma_init()
858 priv->tx_bufs[i].dma_desc = in nb8800_dma_init()
859 priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc); in nb8800_dma_init()
868 return -ENOMEM; in nb8800_dma_init()
874 struct nb8800_tx_buf *txb = &priv->tx_bufs[0]; in nb8800_dma_stop()
875 struct nb8800_tx_desc *txd = &priv->tx_descs[0]; in nb8800_dma_stop()
882 /* wait for tx to finish */ in nb8800_dma_stop()
883 err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr, in nb8800_dma_stop()
885 priv->tx_done == priv->tx_next, in nb8800_dma_stop()
895 * the tx queue. in nb8800_dma_stop()
899 priv->rx_descs[i].desc.config |= DESC_EOC; in nb8800_dma_stop()
901 txd->desc[0].s_addr = in nb8800_dma_stop()
902 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); in nb8800_dma_stop()
903 txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8; in nb8800_dma_stop()
904 memset(txd->buf, 0, sizeof(txd->buf)); in nb8800_dma_stop()
910 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); in nb8800_dma_stop()
914 err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR, in nb8800_dma_stop()
917 } while (err && --retry); in nb8800_dma_stop()
923 return retry ? 0 : -ETIMEDOUT; in nb8800_dma_stop()
929 struct phy_device *phydev = dev->phydev; in nb8800_pause_adv()
934 phy_set_asym_pause(phydev, priv->pause_rx, priv->pause_tx); in nb8800_pause_adv()
951 err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev); in nb8800_open()
958 phydev = of_phy_connect(dev, priv->phy_node, in nb8800_open()
960 priv->phy_mode); in nb8800_open()
962 err = -ENODEV; in nb8800_open()
969 napi_enable(&priv->napi); in nb8800_open()
978 free_irq(dev->irq, dev); in nb8800_open()
988 struct phy_device *phydev = dev->phydev; in nb8800_stop()
993 napi_disable(&priv->napi); in nb8800_stop()
1001 free_irq(dev->irq, dev); in nb8800_stop()
1023 pp->autoneg = priv->pause_aneg; in nb8800_get_pauseparam()
1024 pp->rx_pause = priv->pause_rx; in nb8800_get_pauseparam()
1025 pp->tx_pause = priv->pause_tx; in nb8800_get_pauseparam()
1032 struct phy_device *phydev = dev->phydev; in nb8800_set_pauseparam()
1034 priv->pause_aneg = pp->autoneg; in nb8800_set_pauseparam()
1035 priv->pause_rx = pp->rx_pause; in nb8800_set_pauseparam()
1036 priv->pause_tx = pp->tx_pause; in nb8800_set_pauseparam()
1040 if (!priv->pause_aneg) in nb8800_set_pauseparam()
1103 return -EOPNOTSUPP; in nb8800_get_sset_count()
1125 u32 rx, tx; in nb8800_get_ethtool_stats() local
1129 tx = nb8800_read_stat(dev, i | 0x80); in nb8800_get_ethtool_stats()
1131 st[i + NB8800_NUM_STATS / 2] = tx; in nb8800_get_ethtool_stats()
1164 /* TX cycles per deferral period */ in nb8800_hw_init()
1171 /* RX/TX FIFO threshold for partial empty (64-bit entries) */ in nb8800_hw_init()
1174 /* RX/TX FIFO threshold for partial full (64-bit entries) */ in nb8800_hw_init()
1177 /* Buffer size for transmit (64-bit entries) */ in nb8800_hw_init()
1180 /* Configure tx DMA */ in nb8800_hw_init()
1185 val |= TCR_RS; /* automatically store tx status */ in nb8800_hw_init()
1188 val |= TCR_BTS(2); /* 32-byte bus transaction size */ in nb8800_hw_init()
1191 /* TX complete interrupt after 10 ms or 7 frames (see above) */ in nb8800_hw_init()
1192 val = clk_get_rate(priv->clk) / 100; in nb8800_hw_init()
1203 val |= RCR_BTS(2); /* 32-byte bus transaction size */ in nb8800_hw_init()
1209 priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000; in nb8800_hw_init()
1214 priv->rx_itr_poll = clk_get_rate(priv->clk) / 100; in nb8800_hw_init()
1216 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); in nb8800_hw_init()
1218 priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF; in nb8800_hw_init()
1227 /* Auto-negotiate by default */ in nb8800_hw_init()
1228 priv->pause_aneg = true; in nb8800_hw_init()
1229 priv->pause_rx = true; in nb8800_hw_init()
1230 priv->pause_tx = true; in nb8800_hw_init()
1242 switch (priv->phy_mode) { in nb8800_tangox_init()
1256 dev_err(dev->dev.parent, "unsupported phy mode %s\n", in nb8800_tangox_init()
1257 phy_modes(priv->phy_mode)); in nb8800_tangox_init()
1258 return -EINVAL; in nb8800_tangox_init()
1277 clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK); in nb8800_tangox_reset()
1305 priv->rx_dma_config |= DESC_ID; in nb8800_tango4_init()
1320 .compatible = "sigma,smp8642-ethernet",
1324 .compatible = "sigma,smp8734-ethernet",
1339 const unsigned char *mac; in nb8800_probe() local
1344 match = of_match_device(nb8800_dt_ids, &pdev->dev); in nb8800_probe()
1346 ops = match->data; in nb8800_probe()
1350 return -EINVAL; in nb8800_probe()
1353 base = devm_ioremap_resource(&pdev->dev, res); in nb8800_probe()
1357 dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start); in nb8800_probe()
1361 return -ENOMEM; in nb8800_probe()
1364 SET_NETDEV_DEV(dev, &pdev->dev); in nb8800_probe()
1367 priv->base = base; in nb8800_probe()
1369 ret = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode); in nb8800_probe()
1371 priv->phy_mode = PHY_INTERFACE_MODE_RGMII; in nb8800_probe()
1373 priv->clk = devm_clk_get(&pdev->dev, NULL); in nb8800_probe()
1374 if (IS_ERR(priv->clk)) { in nb8800_probe()
1375 dev_err(&pdev->dev, "failed to get clock\n"); in nb8800_probe()
1376 ret = PTR_ERR(priv->clk); in nb8800_probe()
1380 ret = clk_prepare_enable(priv->clk); in nb8800_probe()
1384 spin_lock_init(&priv->tx_lock); in nb8800_probe()
1386 if (ops && ops->reset) { in nb8800_probe()
1387 ret = ops->reset(dev); in nb8800_probe()
1392 bus = devm_mdiobus_alloc(&pdev->dev); in nb8800_probe()
1394 ret = -ENOMEM; in nb8800_probe()
1398 bus->name = "nb8800-mii"; in nb8800_probe()
1399 bus->read = nb8800_mdio_read; in nb8800_probe()
1400 bus->write = nb8800_mdio_write; in nb8800_probe()
1401 bus->parent = &pdev->dev; in nb8800_probe()
1402 snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii", in nb8800_probe()
1403 (unsigned long)res->start); in nb8800_probe()
1404 bus->priv = priv; in nb8800_probe()
1406 ret = of_mdiobus_register(bus, pdev->dev.of_node); in nb8800_probe()
1408 dev_err(&pdev->dev, "failed to register MII bus\n"); in nb8800_probe()
1412 if (of_phy_is_fixed_link(pdev->dev.of_node)) { in nb8800_probe()
1413 ret = of_phy_register_fixed_link(pdev->dev.of_node); in nb8800_probe()
1415 dev_err(&pdev->dev, "bad fixed-link spec\n"); in nb8800_probe()
1418 priv->phy_node = of_node_get(pdev->dev.of_node); in nb8800_probe()
1421 if (!priv->phy_node) in nb8800_probe()
1422 priv->phy_node = of_parse_phandle(pdev->dev.of_node, in nb8800_probe()
1423 "phy-handle", 0); in nb8800_probe()
1425 if (!priv->phy_node) { in nb8800_probe()
1426 dev_err(&pdev->dev, "no PHY specified\n"); in nb8800_probe()
1427 ret = -ENODEV; in nb8800_probe()
1431 priv->mii_bus = bus; in nb8800_probe()
1437 if (ops && ops->init) { in nb8800_probe()
1438 ret = ops->init(dev); in nb8800_probe()
1443 dev->netdev_ops = &nb8800_netdev_ops; in nb8800_probe()
1444 dev->ethtool_ops = &nb8800_ethtool_ops; in nb8800_probe()
1445 dev->flags |= IFF_MULTICAST; in nb8800_probe()
1446 dev->irq = irq; in nb8800_probe()
1448 mac = of_get_mac_address(pdev->dev.of_node); in nb8800_probe()
1449 if (!IS_ERR(mac)) in nb8800_probe()
1450 ether_addr_copy(dev->dev_addr, mac); in nb8800_probe()
1452 if (!is_valid_ether_addr(dev->dev_addr)) in nb8800_probe()
1465 netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT); in nb8800_probe()
1467 netdev_info(dev, "MAC address %pM\n", dev->dev_addr); in nb8800_probe()
1474 if (of_phy_is_fixed_link(pdev->dev.of_node)) in nb8800_probe()
1475 of_phy_deregister_fixed_link(pdev->dev.of_node); in nb8800_probe()
1477 of_node_put(priv->phy_node); in nb8800_probe()
1480 clk_disable_unprepare(priv->clk); in nb8800_probe()
1493 if (of_phy_is_fixed_link(pdev->dev.of_node)) in nb8800_remove()
1494 of_phy_deregister_fixed_link(pdev->dev.of_node); in nb8800_remove()
1495 of_node_put(priv->phy_node); in nb8800_remove()
1497 mdiobus_unregister(priv->mii_bus); in nb8800_remove()
1499 clk_disable_unprepare(priv->clk); in nb8800_remove()
1518 MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");