Lines Matching full:rp
521 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
523 void __iomem *ioaddr = rp->base;
534 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
539 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
541 rhine_wait_bit(rp, reg, mask, false);
544 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
546 rhine_wait_bit(rp, reg, mask, true);
549 static u32 rhine_get_events(struct rhine_private *rp)
551 void __iomem *ioaddr = rp->base;
556 if (rp->quirks & rqStatusWBRace)
561 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
563 void __iomem *ioaddr = rp->base;
565 if (rp->quirks & rqStatusWBRace)
576 struct rhine_private *rp = netdev_priv(dev);
577 void __iomem *ioaddr = rp->base;
580 if (rp->quirks & rqWOL) {
590 if (rp->quirks & rq6patterns)
595 if (rp->quirks & rq6patterns)
600 if (rp->quirks & rq6patterns)
632 struct rhine_private *rp = netdev_priv(dev);
633 void __iomem *ioaddr = rp->base;
643 if (rp->quirks & rqForceReset)
647 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
651 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
702 struct rhine_private *rp = netdev_priv(dev);
703 void __iomem *ioaddr = rp->base;
719 enable_mmio(pioaddr, rp->quirks);
722 if (rp->quirks & rqWOL)
730 struct rhine_private *rp = netdev_priv(dev);
731 const int irq = rp->irq;
739 static void rhine_kick_tx_threshold(struct rhine_private *rp)
741 if (rp->tx_thresh < 0xe0) {
742 void __iomem *ioaddr = rp->base;
744 rp->tx_thresh += 0x20;
745 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
749 static void rhine_tx_err(struct rhine_private *rp, u32 status)
751 struct net_device *dev = rp->dev;
754 netif_info(rp, tx_err, dev,
759 rhine_kick_tx_threshold(rp);
760 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
761 "Tx threshold now %02x\n", rp->tx_thresh);
765 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
769 rhine_kick_tx_threshold(rp);
770 netif_info(rp, tx_err, dev, "Unspecified error. "
771 "Tx threshold now %02x\n", rp->tx_thresh);
777 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
779 void __iomem *ioaddr = rp->base;
780 struct net_device_stats *stats = &rp->dev->stats;
818 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
819 struct net_device *dev = rp->dev;
820 void __iomem *ioaddr = rp->base;
825 status = rhine_get_events(rp);
826 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
834 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
836 netif_warn(rp, tx_err, dev, "Tx still on\n");
842 rhine_tx_err(rp, status);
846 spin_lock(&rp->lock);
847 rhine_update_rx_crc_and_missed_errord(rp);
848 spin_unlock(&rp->lock);
853 schedule_work(&rp->slow_event_task);
865 struct rhine_private *rp = netdev_priv(dev);
871 if (rp->quirks & rqRhineI)
900 struct rhine_private *rp;
919 rp = netdev_priv(dev);
920 rp->dev = dev;
921 rp->quirks = quirks;
922 rp->pioaddr = pioaddr;
923 rp->base = ioaddr;
924 rp->irq = irq;
925 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
927 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
929 u64_stats_init(&rp->tx_stats.syncp);
930 u64_stats_init(&rp->rx_stats.syncp);
952 spin_lock_init(&rp->lock);
953 mutex_init(&rp->task_lock);
954 INIT_WORK(&rp->reset_task, rhine_reset_task);
955 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
957 rp->mii_if.dev = dev;
958 rp->mii_if.mdio_read = mdio_read;
959 rp->mii_if.mdio_write = mdio_write;
960 rp->mii_if.phy_id_mask = 0x1f;
961 rp->mii_if.reg_num_mask = 0x1f;
968 netif_napi_add(dev, &rp->napi, rhine_napipoll);
970 if (rp->quirks & rqRhineI)
973 if (rp->quirks & rqMgmt)
983 if (rp->quirks & rqRhineI)
985 else if (rp->quirks & rqStatusWBRace)
987 else if (rp->quirks & rqMgmt)
993 name, ioaddr, dev->dev_addr, rp->irq);
1003 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1007 mii_status, rp->mii_if.advertising,
1018 rp->mii_if.phy_id = phy_id;
1020 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1140 struct rhine_private *rp = netdev_priv(dev);
1154 if (rp->quirks & rqRhineI) {
1155 rp->tx_bufs = dma_alloc_coherent(hwdev,
1157 &rp->tx_bufs_dma,
1159 if (rp->tx_bufs == NULL) {
1168 rp->rx_ring = ring;
1169 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1170 rp->rx_ring_dma = ring_dma;
1171 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1178 struct rhine_private *rp = netdev_priv(dev);
1184 rp->rx_ring, rp->rx_ring_dma);
1185 rp->tx_ring = NULL;
1187 if (rp->tx_bufs)
1189 rp->tx_bufs, rp->tx_bufs_dma);
1191 rp->tx_bufs = NULL;
1203 struct rhine_private *rp = netdev_priv(dev);
1205 const int size = rp->rx_buf_sz;
1213 netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1221 static void rhine_reset_rbufs(struct rhine_private *rp)
1225 rp->cur_rx = 0;
1228 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1231 static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1234 rp->rx_skbuff_dma[entry] = sd->dma;
1235 rp->rx_skbuff[entry] = sd->skb;
1237 rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1245 struct rhine_private *rp = netdev_priv(dev);
1249 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1250 next = rp->rx_ring_dma;
1254 rp->rx_ring[i].rx_status = 0;
1255 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1257 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1258 rp->rx_skbuff[i] = NULL;
1261 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1273 rhine_skb_dma_nic_store(rp, &sd, i);
1276 rhine_reset_rbufs(rp);
1283 struct rhine_private *rp = netdev_priv(dev);
1289 rp->rx_ring[i].rx_status = 0;
1290 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1291 if (rp->rx_skbuff[i]) {
1293 rp->rx_skbuff_dma[i],
1294 rp->rx_buf_sz, DMA_FROM_DEVICE);
1295 dev_kfree_skb(rp->rx_skbuff[i]);
1297 rp->rx_skbuff[i] = NULL;
1303 struct rhine_private *rp = netdev_priv(dev);
1307 rp->dirty_tx = rp->cur_tx = 0;
1308 next = rp->tx_ring_dma;
1310 rp->tx_skbuff[i] = NULL;
1311 rp->tx_ring[i].tx_status = 0;
1312 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1314 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1315 if (rp->quirks & rqRhineI)
1316 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1318 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1325 struct rhine_private *rp = netdev_priv(dev);
1330 rp->tx_ring[i].tx_status = 0;
1331 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1332 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1333 if (rp->tx_skbuff[i]) {
1334 if (rp->tx_skbuff_dma[i]) {
1336 rp->tx_skbuff_dma[i],
1337 rp->tx_skbuff[i]->len,
1340 dev_kfree_skb(rp->tx_skbuff[i]);
1342 rp->tx_skbuff[i] = NULL;
1343 rp->tx_buf[i] = NULL;
1349 struct rhine_private *rp = netdev_priv(dev);
1350 void __iomem *ioaddr = rp->base;
1352 if (!rp->mii_if.force_media)
1353 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1355 if (rp->mii_if.full_duplex)
1362 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1363 rp->mii_if.force_media, netif_carrier_ok(dev));
1370 struct rhine_private *rp = netdev_priv(dev);
1380 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1490 struct rhine_private *rp = netdev_priv(dev);
1491 void __iomem *ioaddr = rp->base;
1510 struct rhine_private *rp = netdev_priv(dev);
1511 void __iomem *ioaddr = rp->base;
1516 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1527 struct rhine_private *rp = netdev_priv(dev);
1529 spin_lock_bh(&rp->lock);
1530 set_bit(vid, rp->active_vlans);
1532 spin_unlock_bh(&rp->lock);
1538 struct rhine_private *rp = netdev_priv(dev);
1540 spin_lock_bh(&rp->lock);
1541 clear_bit(vid, rp->active_vlans);
1543 spin_unlock_bh(&rp->lock);
1549 struct rhine_private *rp = netdev_priv(dev);
1550 void __iomem *ioaddr = rp->base;
1560 rp->tx_thresh = 0x20;
1561 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1563 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1564 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1568 if (rp->quirks & rqMgmt)
1571 napi_enable_locked(&rp->napi);
1581 static void rhine_enable_linkmon(struct rhine_private *rp)
1583 void __iomem *ioaddr = rp->base;
1589 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1595 static void rhine_disable_linkmon(struct rhine_private *rp)
1597 void __iomem *ioaddr = rp->base;
1601 if (rp->quirks & rqRhineI) {
1610 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1616 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1623 struct rhine_private *rp = netdev_priv(dev);
1624 void __iomem *ioaddr = rp->base;
1627 rhine_disable_linkmon(rp);
1633 rhine_wait_bit_low(rp, MIICmd, 0x40);
1636 rhine_enable_linkmon(rp);
1642 struct rhine_private *rp = netdev_priv(dev);
1643 void __iomem *ioaddr = rp->base;
1645 rhine_disable_linkmon(rp);
1652 rhine_wait_bit_low(rp, MIICmd, 0x20);
1654 rhine_enable_linkmon(rp);
1657 static void rhine_task_disable(struct rhine_private *rp)
1659 mutex_lock(&rp->task_lock);
1660 rp->task_enable = false;
1661 mutex_unlock(&rp->task_lock);
1663 cancel_work_sync(&rp->slow_event_task);
1664 cancel_work_sync(&rp->reset_task);
1667 static void rhine_task_enable(struct rhine_private *rp)
1669 mutex_lock(&rp->task_lock);
1670 rp->task_enable = true;
1671 mutex_unlock(&rp->task_lock);
1676 struct rhine_private *rp = netdev_priv(dev);
1677 void __iomem *ioaddr = rp->base;
1680 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1684 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1695 enable_mmio(rp->pioaddr, rp->quirks);
1698 rhine_task_enable(rp);
1704 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1706 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1716 free_irq(rp->irq, dev);
1722 struct rhine_private *rp = container_of(work, struct rhine_private,
1724 struct net_device *dev = rp->dev;
1726 mutex_lock(&rp->task_lock);
1728 if (!rp->task_enable)
1731 napi_disable(&rp->napi);
1735 spin_lock_bh(&rp->lock);
1741 rhine_reset_rbufs(rp);
1747 spin_unlock_bh(&rp->lock);
1755 mutex_unlock(&rp->task_lock);
1760 struct rhine_private *rp = netdev_priv(dev);
1761 void __iomem *ioaddr = rp->base;
1765 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1767 schedule_work(&rp->reset_task);
1770 static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1772 return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1778 struct rhine_private *rp = netdev_priv(dev);
1780 void __iomem *ioaddr = rp->base;
1787 entry = rp->cur_tx % TX_RING_SIZE;
1792 rp->tx_skbuff[entry] = skb;
1794 if ((rp->quirks & rqRhineI) &&
1800 rp->tx_skbuff[entry] = NULL;
1806 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1808 memset(rp->tx_buf[entry] + skb->len, 0,
1810 rp->tx_skbuff_dma[entry] = 0;
1811 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1812 (rp->tx_buf[entry] -
1813 rp->tx_bufs));
1815 rp->tx_skbuff_dma[entry] =
1818 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1820 rp->tx_skbuff_dma[entry] = 0;
1824 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1827 rp->tx_ring[entry].desc_length =
1836 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1838 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1841 rp->tx_ring[entry].tx_status = 0;
1846 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1849 rp->cur_tx++;
1869 if (rhine_tx_queue_full(rp)) {
1873 if (!rhine_tx_queue_full(rp))
1877 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1878 rp->cur_tx - 1, entry);
1883 static void rhine_irq_disable(struct rhine_private *rp)
1885 iowrite16(0x0000, rp->base + IntrEnable);
1893 struct rhine_private *rp = netdev_priv(dev);
1897 status = rhine_get_events(rp);
1899 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1904 rhine_irq_disable(rp);
1905 napi_schedule(&rp->napi);
1909 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1920 struct rhine_private *rp = netdev_priv(dev);
1923 unsigned int dirty_tx = rp->dirty_tx;
1934 cur_tx = rp->cur_tx;
1938 u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1940 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1944 skb = rp->tx_skbuff[entry];
1946 netif_dbg(rp, tx_done, dev,
1957 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1960 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1965 if (rp->quirks & rqRhineI)
1969 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1972 u64_stats_update_begin(&rp->tx_stats.syncp);
1973 rp->tx_stats.bytes += skb->len;
1974 rp->tx_stats.packets++;
1975 u64_stats_update_end(&rp->tx_stats.syncp);
1978 if (rp->tx_skbuff_dma[entry]) {
1980 rp->tx_skbuff_dma[entry],
1987 rp->tx_skbuff[entry] = NULL;
1991 rp->dirty_tx = dirty_tx;
1998 if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
2002 if (rhine_tx_queue_full(rp))
2037 struct rhine_private *rp = netdev_priv(dev);
2039 int entry = rp->cur_rx % RX_RING_SIZE;
2042 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
2043 entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2047 struct rx_desc *desc = rp->rx_ring + entry;
2054 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2067 netif_dbg(rp, rx_err, dev,
2079 spin_lock(&rp->lock);
2081 spin_unlock(&rp->lock);
2097 rp->rx_skbuff_dma[entry],
2098 rp->rx_buf_sz,
2102 rp->rx_skbuff[entry]->data,
2106 rp->rx_skbuff_dma[entry],
2107 rp->rx_buf_sz,
2115 skb = rp->rx_skbuff[entry];
2118 rp->rx_skbuff_dma[entry],
2119 rp->rx_buf_sz,
2121 rhine_skb_dma_nic_store(rp, &sd, entry);
2132 u64_stats_update_begin(&rp->rx_stats.syncp);
2133 rp->rx_stats.bytes += pkt_len;
2134 rp->rx_stats.packets++;
2135 u64_stats_update_end(&rp->rx_stats.syncp);
2139 entry = (++rp->cur_rx) % RX_RING_SIZE;
2150 struct rhine_private *rp = netdev_priv(dev);
2151 void __iomem *ioaddr = rp->base;
2152 int entry = rp->dirty_tx % TX_RING_SIZE;
2159 intr_status = rhine_get_events(rp);
2164 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2170 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2180 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2188 struct rhine_private *rp =
2190 struct net_device *dev = rp->dev;
2193 mutex_lock(&rp->task_lock);
2195 if (!rp->task_enable)
2198 intr_status = rhine_get_events(rp);
2199 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2205 netif_warn(rp, hw, dev, "PCI error\n");
2207 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2210 mutex_unlock(&rp->task_lock);
2216 struct rhine_private *rp = netdev_priv(dev);
2219 spin_lock_bh(&rp->lock);
2220 rhine_update_rx_crc_and_missed_errord(rp);
2221 spin_unlock_bh(&rp->lock);
2226 start = u64_stats_fetch_begin(&rp->rx_stats.syncp);
2227 stats->rx_packets = rp->rx_stats.packets;
2228 stats->rx_bytes = rp->rx_stats.bytes;
2229 } while (u64_stats_fetch_retry(&rp->rx_stats.syncp, start));
2232 start = u64_stats_fetch_begin(&rp->tx_stats.syncp);
2233 stats->tx_packets = rp->tx_stats.packets;
2234 stats->tx_bytes = rp->tx_stats.bytes;
2235 } while (u64_stats_fetch_retry(&rp->tx_stats.syncp, start));
2240 struct rhine_private *rp = netdev_priv(dev);
2241 void __iomem *ioaddr = rp->base;
2255 } else if (rp->quirks & rqMgmt) {
2277 if (rp->quirks & rqMgmt) {
2297 struct rhine_private *rp = netdev_priv(dev);
2299 mutex_lock(&rp->task_lock);
2300 mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2301 mutex_unlock(&rp->task_lock);
2309 struct rhine_private *rp = netdev_priv(dev);
2312 mutex_lock(&rp->task_lock);
2313 rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2314 rhine_set_carrier(&rp->mii_if);
2315 mutex_unlock(&rp->task_lock);
2322 struct rhine_private *rp = netdev_priv(dev);
2324 return mii_nway_restart(&rp->mii_if);
2329 struct rhine_private *rp = netdev_priv(dev);
2331 return mii_link_ok(&rp->mii_if);
2336 struct rhine_private *rp = netdev_priv(dev);
2338 return rp->msg_enable;
2343 struct rhine_private *rp = netdev_priv(dev);
2345 rp->msg_enable = value;
2350 struct rhine_private *rp = netdev_priv(dev);
2352 if (!(rp->quirks & rqWOL))
2355 spin_lock_irq(&rp->lock);
2358 wol->wolopts = rp->wolopts;
2359 spin_unlock_irq(&rp->lock);
2364 struct rhine_private *rp = netdev_priv(dev);
2368 if (!(rp->quirks & rqWOL))
2374 spin_lock_irq(&rp->lock);
2375 rp->wolopts = wol->wolopts;
2376 spin_unlock_irq(&rp->lock);
2395 struct rhine_private *rp = netdev_priv(dev);
2401 mutex_lock(&rp->task_lock);
2402 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2403 rhine_set_carrier(&rp->mii_if);
2404 mutex_unlock(&rp->task_lock);
2411 struct rhine_private *rp = netdev_priv(dev);
2412 void __iomem *ioaddr = rp->base;
2414 rhine_task_disable(rp);
2415 napi_disable(&rp->napi);
2418 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2422 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2424 rhine_irq_disable(rp);
2429 free_irq(rp->irq, dev);
2441 struct rhine_private *rp = netdev_priv(dev);
2445 pci_iounmap(pdev, rp->base);
2455 struct rhine_private *rp = netdev_priv(dev);
2459 iounmap(rp->base);
2467 struct rhine_private *rp = netdev_priv(dev);
2468 void __iomem *ioaddr = rp->base;
2470 if (!(rp->quirks & rqWOL))
2476 if (rp->quirks & rq6patterns)
2479 spin_lock(&rp->lock);
2481 if (rp->wolopts & WAKE_MAGIC) {
2490 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2493 if (rp->wolopts & WAKE_PHY)
2496 if (rp->wolopts & WAKE_UCAST)
2499 if (rp->wolopts) {
2505 spin_unlock(&rp->lock);
2519 struct rhine_private *rp = netdev_priv(dev);
2524 rhine_task_disable(rp);
2525 rhine_irq_disable(rp);
2526 napi_disable(&rp->napi);
2539 struct rhine_private *rp = netdev_priv(dev);
2544 enable_mmio(rp->pioaddr, rp->quirks);
2548 rhine_reset_rbufs(rp);
2549 rhine_task_enable(rp);
2552 spin_lock_bh(&rp->lock);
2554 spin_unlock_bh(&rp->lock);