Lines Matching +full:reg +full:- +full:names

3 	Written/copyright 1999-2001 by Donald Becker.
23 [link no longer provides useful info -jgarzik]
62 /* Updated to recommendations in pci-skeleton v2.03. */
64 /* The user-configurable values.
72 static int debug = -1;
76 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
80 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
101 There are no ill effects from too-large receive rings. */
121 * The nic writes 32-bit values, even if the upper bytes of
122 * a 32-bit value are beyond the end of the buffer.
149 "DP8381x copy breakpoint for copy-only-tiny-frames");
152 "DP8381x: Bits 0-3: media type, bit 17: full duplex");
163 II. Board-specific settings
166 It honors the EEPROM-set values.
172 This driver uses two statically allocated fixed-size descriptor lists
180 This driver uses a zero-copy receive and transmit scheme.
182 open() time and passes the skb->data field to the chip as receive data
189 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
190 using a full-sized skbuff for small frames vs. the copying costs of larger
201 skbuff at an offset of "+2", 16-byte aligning the IP header.
205 Most operations are synchronized on the np->lock irq spinlock, except the
232 MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */
258 Unlike software-only systems, device drivers interact with complex hardware.
259 It's not useful to define symbolic names for every register bit in the
413 * - 256 byte DMA burst length
414 * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
415 * - 64 bytes initial drain threshold (i.e. begin actual transmission
417 * - on tx underruns, increase drain threshold by 64.
418 * - at most use a drain threshold of 1472 bytes: The sum of the fill
511 /* Note that using only 32 bit fields simplifies conversion to big-endian
543 /* The addresses of receive-in-place skbuffs */
546 /* address of a sent-in-place packet/buffer, for later free() */
568 /* external phy that is used: only valid if dev->if_port != PORT_TP */
599 static int mdio_read(struct net_device *dev, int reg);
600 static void mdio_write(struct net_device *dev, int reg, u16 data);
602 static int miiport_read(struct net_device *dev, int phy_id, int reg);
603 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
659 device_create_file(&_dev->dev, &dev_attr_##_name)
661 device_remove_file(&_dev->dev, &dev_attr_##_name)
671 return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off"); in natsemi_show_dspcfg_workaround()
683 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) in natsemi_set_dspcfg_workaround()
685 else if (!strncmp("off", buf, count - 1) || in natsemi_set_dspcfg_workaround()
686 !strncmp("0", buf, count - 1)) in natsemi_set_dspcfg_workaround()
691 spin_lock_irqsave(&np->lock, flags); in natsemi_set_dspcfg_workaround()
693 np->dspcfg_workaround = new_setting; in natsemi_set_dspcfg_workaround()
695 spin_unlock_irqrestore(&np->lock, flags); in natsemi_set_dspcfg_workaround()
702 return (void __iomem *) dev->base_addr; in ns_ioaddr()
727 * - the address on the external phy that is used for transmission. in move_int_phy()
728 * - the address that we want to access. User space can access phys in move_int_phy()
734 target--; in move_int_phy()
735 if (target == np->phy_addr_external) in move_int_phy()
736 target--; in move_int_phy()
747 if (np->ignore_phy) in natsemi_init_media()
754 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10; in natsemi_init_media()
755 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF; in natsemi_init_media()
756 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE; in natsemi_init_media()
757 np->advertising= mdio_read(dev, MII_ADVERTISE); in natsemi_init_media()
759 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL && in natsemi_init_media()
763 pci_name(np->pci_dev), in natsemi_init_media()
766 (np->advertising & in natsemi_init_media()
769 (np->advertising & in natsemi_init_media()
776 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR), in natsemi_init_media()
777 np->advertising); in natsemi_init_media()
802 int i, option, irq, chip_idx = ent->driver_data; in natsemi_probe1()
803 static int find_cnt = -1; in natsemi_probe1()
821 /* natsemi has a non-standard PM control register in natsemi_probe1()
835 irq = pdev->irq; in natsemi_probe1()
841 return -ENOMEM; in natsemi_probe1()
842 SET_NETDEV_DEV(dev, &pdev->dev); in natsemi_probe1()
850 i = -ENOMEM; in natsemi_probe1()
858 dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15); in natsemi_probe1()
859 dev->dev_addr[i*2+1] = eedata >> 7; in natsemi_probe1()
864 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); in natsemi_probe1()
866 dev->base_addr = (unsigned long __force) ioaddr; in natsemi_probe1()
867 dev->irq = irq; in natsemi_probe1()
870 netif_napi_add(dev, &np->napi, natsemi_poll, 64); in natsemi_probe1()
871 np->dev = dev; in natsemi_probe1()
873 np->pci_dev = pdev; in natsemi_probe1()
875 np->iosize = iosize; in natsemi_probe1()
876 spin_lock_init(&np->lock); in natsemi_probe1()
877 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; in natsemi_probe1()
878 np->hands_off = 0; in natsemi_probe1()
879 np->intr_status = 0; in natsemi_probe1()
880 np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size; in natsemi_probe1()
882 np->ignore_phy = 1; in natsemi_probe1()
884 np->ignore_phy = 0; in natsemi_probe1()
885 np->dspcfg_workaround = dspcfg_workaround; in natsemi_probe1()
888 * - If configured to ignore the PHY set up for external. in natsemi_probe1()
889 * - If the nic was configured to use an external phy and if find_mii in natsemi_probe1()
891 * - Otherwise: internal port. in natsemi_probe1()
896 if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy) in natsemi_probe1()
897 dev->if_port = PORT_MII; in natsemi_probe1()
899 dev->if_port = PORT_TP; in natsemi_probe1()
904 if (dev->if_port != PORT_TP) { in natsemi_probe1()
905 np->phy_addr_external = find_mii(dev); in natsemi_probe1()
908 if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) { in natsemi_probe1()
909 dev->if_port = PORT_TP; in natsemi_probe1()
910 np->phy_addr_external = PHY_ADDR_INTERNAL; in natsemi_probe1()
913 np->phy_addr_external = PHY_ADDR_INTERNAL; in natsemi_probe1()
917 if (dev->mem_start) in natsemi_probe1()
918 option = dev->mem_start; in natsemi_probe1()
923 np->full_duplex = 1; in natsemi_probe1()
927 pci_name(np->pci_dev), option & 15); in natsemi_probe1()
930 np->full_duplex = 1; in natsemi_probe1()
932 dev->netdev_ops = &natsemi_netdev_ops; in natsemi_probe1()
933 dev->watchdog_timeo = TX_TIMEOUT; in natsemi_probe1()
938 dev->mtu = mtu; in natsemi_probe1()
943 np->srr = readl(ioaddr + SiliconRev); in natsemi_probe1()
946 pci_name(np->pci_dev), np->srr); in natsemi_probe1()
958 dev->name, natsemi_pci_info[chip_idx].name, in natsemi_probe1()
959 (unsigned long long)iostart, pci_name(np->pci_dev), in natsemi_probe1()
960 dev->dev_addr, irq); in natsemi_probe1()
961 if (dev->if_port == PORT_TP) in natsemi_probe1()
963 else if (np->ignore_phy) in natsemi_probe1()
966 printk(", port MII, phy ad %d.\n", np->phy_addr_external); in natsemi_probe1()
991 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
1001 /* The EEPROM commands include the alway-set leading bit. */
1016 for (i = 10; i >= 0; i--) { in eeprom_read()
1068 for (i = (1 << (len-1)); i; i >>= 1) in mii_send_bits()
1080 static int miiport_read(struct net_device *dev, int phy_id, int reg) in miiport_read() argument
1088 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ in miiport_read()
1090 cmd = (0x06 << 10) | (phy_id << 5) | reg; in miiport_read()
1105 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data) in miiport_write() argument
1111 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ in miiport_write()
1113 cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data; in miiport_write()
1119 static int mdio_read(struct net_device *dev, int reg) in mdio_read() argument
1125 * - an internal transceiver in mdio_read()
1126 * - an external mii bus in mdio_read()
1128 if (dev->if_port == PORT_TP) in mdio_read()
1129 return readw(ioaddr+BasicControl+(reg<<2)); in mdio_read()
1131 return miiport_read(dev, np->phy_addr_external, reg); in mdio_read()
1134 static void mdio_write(struct net_device *dev, int reg, u16 data) in mdio_write() argument
1140 if (dev->if_port == PORT_TP) in mdio_write()
1141 writew(data, ioaddr+BasicControl+(reg<<2)); in mdio_write()
1143 miiport_write(dev, np->phy_addr_external, reg, data); in mdio_write()
1156 if (np->autoneg == AUTONEG_ENABLE) { in init_phy_fixup()
1159 np->advertising != mdio_read(dev, MII_ADVERTISE)) in init_phy_fixup()
1163 mdio_write(dev, MII_ADVERTISE, np->advertising); in init_phy_fixup()
1168 if (np->speed == SPEED_100) in init_phy_fixup()
1170 if (np->duplex == DUPLEX_FULL) in init_phy_fixup()
1185 np->mii = (mdio_read(dev, MII_PHYSID1) << 16) in init_phy_fixup()
1189 switch (np->mii) { in init_phy_fixup()
1194 if (dev->if_port == PORT_FIBRE) in init_phy_fixup()
1224 np->dspcfg = (np->srr <= SRR_DP83815_C)? in init_phy_fixup()
1226 writew(np->dspcfg, ioaddr + DSPCFG); in init_phy_fixup()
1235 if (np->dspcfg == dspcfg) in init_phy_fixup()
1243 dev->name, i*10); in init_phy_fixup()
1247 dev->name, i*10); in init_phy_fixup()
1252 * and Auto-Negotiation Completion are among the affected. in init_phy_fixup()
1271 dev->name); in switch_port_external()
1286 move_int_phy(dev, np->phy_addr_external); in switch_port_external()
1306 dev->name); in switch_port_internal()
1328 dev->name, i*10); in switch_port_internal()
1338 * - Do not scan while the internal phy is enabled. The internal phy will
1341 * - The internal phy must be moved around, an external phy could
1365 np->mii = (mdio_read(dev, MII_PHYSID1) << 16) in find_mii()
1369 pci_name(np->pci_dev), np->mii, i); in find_mii()
1402 * on a normal power-up (see the spec EEPROM map). This assumes in natsemi_reset()
1432 dev->name, i*5); in natsemi_reset()
1435 dev->name, i*5); in natsemi_reset()
1441 if (dev->if_port == PORT_TP) in natsemi_reset()
1470 np->intr_status &= ~RxResetDone; in reset_rx()
1475 np->intr_status |= readl(ioaddr + IntrStatus); in reset_rx()
1476 if (np->intr_status & RxResetDone) in reset_rx()
1482 dev->name, i*15); in reset_rx()
1485 dev->name, i*15); in reset_rx()
1503 pci_name(np->pci_dev), i*50); in natsemi_reload_eeprom()
1506 pci_name(np->pci_dev), i*50); in natsemi_reload_eeprom()
1524 dev->name, i*5); in natsemi_stop_rxtx()
1527 dev->name, i*5); in natsemi_stop_rxtx()
1540 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); in netdev_open()
1545 dev->name, dev->irq); in netdev_open()
1548 free_irq(dev->irq, dev); in netdev_open()
1551 napi_enable(&np->napi); in netdev_open()
1554 spin_lock_irq(&np->lock); in netdev_open()
1556 /* now set the MAC address according to dev->dev_addr */ in netdev_open()
1558 u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i]; in netdev_open()
1563 writel(np->cur_rx_mode, ioaddr + RxFilterAddr); in netdev_open()
1564 spin_unlock_irq(&np->lock); in netdev_open()
1570 dev->name, (int)readl(ioaddr + ChipCmd)); in netdev_open()
1573 init_timer(&np->timer); in netdev_open()
1574 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); in netdev_open()
1575 np->timer.data = (unsigned long)dev; in netdev_open()
1576 np->timer.function = netdev_timer; /* timer handler */ in netdev_open()
1577 add_timer(&np->timer); in netdev_open()
1587 if (dev->if_port != PORT_TP) in do_cable_magic()
1590 if (np->srr >= SRR_DP83816_A5) in do_cable_magic()
1615 /* the bug has been triggered - fix the coefficient */ in do_cable_magic()
1619 np->dspcfg = data | DSPCFG_LOCK; in do_cable_magic()
1620 writew(np->dspcfg, ioaddr + DSPCFG); in do_cable_magic()
1632 if (dev->if_port != PORT_TP) in undo_cable_magic()
1635 if (np->srr >= SRR_DP83816_A5) in undo_cable_magic()
1641 np->dspcfg = data & ~DSPCFG_LOCK; in undo_cable_magic()
1642 writew(np->dspcfg, ioaddr + DSPCFG); in undo_cable_magic()
1650 int duplex = np->duplex; in check_link()
1654 if (np->ignore_phy) in check_link()
1668 dev->name); in check_link()
1676 printk(KERN_NOTICE "%s: link up.\n", dev->name); in check_link()
1681 duplex = np->full_duplex; in check_link()
1685 np->advertising & mdio_read(dev, MII_LPA)); in check_link()
1694 if (duplex ^ !!(np->rx_config & RxAcceptTx)) { in check_link()
1697 "%s: Setting %s-duplex based on negotiated " in check_link()
1698 "link capability.\n", dev->name, in check_link()
1701 np->rx_config |= RxAcceptTx; in check_link()
1702 np->tx_config |= TxCarrierIgn | TxHeartIgn; in check_link()
1704 np->rx_config &= ~RxAcceptTx; in check_link()
1705 np->tx_config &= ~(TxCarrierIgn | TxHeartIgn); in check_link()
1707 writel(np->tx_config, ioaddr + TxConfig); in check_link()
1708 writel(np->rx_config, ioaddr + RxConfig); in check_link()
1722 writel(np->ring_dma, ioaddr + RxRingPtr); in init_registers()
1723 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc), in init_registers()
1728 * Configure for standard, in-spec Ethernet. in init_registers()
1729 * Start with half-duplex. check_link will update in init_registers()
1740 np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | in init_registers()
1742 writel(np->tx_config, ioaddr + TxConfig); in init_registers()
1747 np->rx_config = RxMxdma_256 | RX_DRTH_VAL; in init_registers()
1749 if (np->rx_buf_sz > NATSEMI_LONGPKT) in init_registers()
1750 np->rx_config |= RxAcceptLong; in init_registers()
1752 writel(np->rx_config, ioaddr + RxConfig); in init_registers()
1760 np->SavedClkRun = readl(ioaddr + ClkRun); in init_registers()
1761 writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun); in init_registers()
1762 if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) { in init_registers()
1763 printk(KERN_NOTICE "%s: Wake-up event %#08x\n", in init_registers()
1764 dev->name, readl(ioaddr + WOLCmd)); in init_registers()
1803 dev->name); in netdev_timer()
1806 if (dev->if_port == PORT_TP) { in netdev_timer()
1809 spin_lock_irq(&np->lock); in netdev_timer()
1810 /* check for a nasty random phy-reset - use dspcfg as a flag */ in netdev_timer()
1814 if (np->dspcfg_workaround && dspcfg != np->dspcfg) { in netdev_timer()
1816 spin_unlock_irq(&np->lock); in netdev_timer()
1819 "re-initializing\n", dev->name); in netdev_timer()
1820 disable_irq(dev->irq); in netdev_timer()
1821 spin_lock_irq(&np->lock); in netdev_timer()
1826 spin_unlock_irq(&np->lock); in netdev_timer()
1827 enable_irq(dev->irq); in netdev_timer()
1831 spin_unlock_irq(&np->lock); in netdev_timer()
1836 spin_unlock_irq(&np->lock); in netdev_timer()
1839 spin_lock_irq(&np->lock); in netdev_timer()
1841 spin_unlock_irq(&np->lock); in netdev_timer()
1843 if (np->oom) { in netdev_timer()
1844 disable_irq(dev->irq); in netdev_timer()
1845 np->oom = 0; in netdev_timer()
1847 enable_irq(dev->irq); in netdev_timer()
1848 if (!np->oom) { in netdev_timer()
1856 mod_timer(&np->timer, round_jiffies(jiffies + next_tick)); in netdev_timer()
1858 mod_timer(&np->timer, jiffies + next_tick); in netdev_timer()
1867 printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring); in dump_ring()
1870 i, np->tx_ring[i].next_desc, in dump_ring()
1871 np->tx_ring[i].cmd_status, in dump_ring()
1872 np->tx_ring[i].addr); in dump_ring()
1874 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); in dump_ring()
1877 i, np->rx_ring[i].next_desc, in dump_ring()
1878 np->rx_ring[i].cmd_status, in dump_ring()
1879 np->rx_ring[i].addr); in dump_ring()
1889 disable_irq(dev->irq); in ns_tx_timeout()
1890 spin_lock_irq(&np->lock); in ns_tx_timeout()
1891 if (!np->hands_off) { in ns_tx_timeout()
1896 dev->name, readl(ioaddr + IntrStatus)); in ns_tx_timeout()
1905 dev->name); in ns_tx_timeout()
1907 spin_unlock_irq(&np->lock); in ns_tx_timeout()
1908 enable_irq(dev->irq); in ns_tx_timeout()
1910 dev->trans_start = jiffies; /* prevent tx timeout */ in ns_tx_timeout()
1911 dev->stats.tx_errors++; in ns_tx_timeout()
1918 np->rx_ring = pci_alloc_consistent(np->pci_dev, in alloc_ring()
1920 &np->ring_dma); in alloc_ring()
1921 if (!np->rx_ring) in alloc_ring()
1922 return -ENOMEM; in alloc_ring()
1923 np->tx_ring = &np->rx_ring[RX_RING_SIZE]; in alloc_ring()
1932 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { in refill_rx()
1934 int entry = np->dirty_rx % RX_RING_SIZE; in refill_rx()
1935 if (np->rx_skbuff[entry] == NULL) { in refill_rx()
1936 unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING; in refill_rx()
1938 np->rx_skbuff[entry] = skb; in refill_rx()
1941 skb->dev = dev; /* Mark as being used by this device. */ in refill_rx()
1942 np->rx_dma[entry] = pci_map_single(np->pci_dev, in refill_rx()
1943 skb->data, buflen, PCI_DMA_FROMDEVICE); in refill_rx()
1944 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); in refill_rx()
1946 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); in refill_rx()
1948 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) { in refill_rx()
1950 printk(KERN_WARNING "%s: going OOM.\n", dev->name); in refill_rx()
1951 np->oom = 1; in refill_rx()
1958 if (dev->mtu <= ETH_DATA_LEN) in set_bufsize()
1959 np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS; in set_bufsize()
1961 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS; in set_bufsize()
1971 np->dirty_tx = np->cur_tx = 0; in init_ring()
1973 np->tx_skbuff[i] = NULL; in init_ring()
1974 np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma in init_ring()
1977 np->tx_ring[i].cmd_status = 0; in init_ring()
1981 np->dirty_rx = 0; in init_ring()
1982 np->cur_rx = RX_RING_SIZE; in init_ring()
1983 np->oom = 0; in init_ring()
1986 np->rx_head_desc = &np->rx_ring[0]; in init_ring()
1988 /* Please be careful before changing this loop - at least gcc-2.95.1 in init_ring()
1993 np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma in init_ring()
1996 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); in init_ring()
1997 np->rx_skbuff[i] = NULL; in init_ring()
2009 if (np->tx_skbuff[i]) { in drain_tx()
2010 pci_unmap_single(np->pci_dev, in drain_tx()
2011 np->tx_dma[i], np->tx_skbuff[i]->len, in drain_tx()
2013 dev_kfree_skb(np->tx_skbuff[i]); in drain_tx()
2014 dev->stats.tx_dropped++; in drain_tx()
2016 np->tx_skbuff[i] = NULL; in drain_tx()
2023 unsigned int buflen = np->rx_buf_sz; in drain_rx()
2028 np->rx_ring[i].cmd_status = 0; in drain_rx()
2029 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in drain_rx()
2030 if (np->rx_skbuff[i]) { in drain_rx()
2031 pci_unmap_single(np->pci_dev, np->rx_dma[i], in drain_rx()
2034 dev_kfree_skb(np->rx_skbuff[i]); in drain_rx()
2036 np->rx_skbuff[i] = NULL; in drain_rx()
2049 pci_free_consistent(np->pci_dev, in free_ring()
2051 np->rx_ring, np->ring_dma); in free_ring()
2060 np->dirty_rx = 0; in reinit_rx()
2061 np->cur_rx = RX_RING_SIZE; in reinit_rx()
2062 np->rx_head_desc = &np->rx_ring[0]; in reinit_rx()
2065 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); in reinit_rx()
2077 np->dirty_tx = np->cur_tx = 0; in reinit_ring()
2079 np->tx_ring[i].cmd_status = 0; in reinit_ring()
2095 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
2097 np->tx_skbuff[entry] = skb; in start_tx()
2098 np->tx_dma[entry] = pci_map_single(np->pci_dev, in start_tx()
2099 skb->data,skb->len, PCI_DMA_TODEVICE); in start_tx()
2101 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); in start_tx()
2103 spin_lock_irqsave(&np->lock, flags); in start_tx()
2105 if (!np->hands_off) { in start_tx()
2106 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); in start_tx()
2107 /* StrongARM: Explicitly cache flush np->tx_ring and in start_tx()
2108 * skb->data,skb->len. */ in start_tx()
2110 np->cur_tx++; in start_tx()
2111 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) { in start_tx()
2113 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) in start_tx()
2116 /* Wake the potentially-idle transmit channel. */ in start_tx()
2120 dev->stats.tx_dropped++; in start_tx()
2122 spin_unlock_irqrestore(&np->lock, flags); in start_tx()
2126 dev->name, np->cur_tx, entry); in start_tx()
2135 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in netdev_tx_done()
2136 int entry = np->dirty_tx % TX_RING_SIZE; in netdev_tx_done()
2137 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn)) in netdev_tx_done()
2142 dev->name, np->dirty_tx, in netdev_tx_done()
2143 le32_to_cpu(np->tx_ring[entry].cmd_status)); in netdev_tx_done()
2144 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) { in netdev_tx_done()
2145 dev->stats.tx_packets++; in netdev_tx_done()
2146 dev->stats.tx_bytes += np->tx_skbuff[entry]->len; in netdev_tx_done()
2149 le32_to_cpu(np->tx_ring[entry].cmd_status); in netdev_tx_done()
2151 dev->stats.tx_aborted_errors++; in netdev_tx_done()
2153 dev->stats.tx_fifo_errors++; in netdev_tx_done()
2155 dev->stats.tx_carrier_errors++; in netdev_tx_done()
2157 dev->stats.tx_window_errors++; in netdev_tx_done()
2158 dev->stats.tx_errors++; in netdev_tx_done()
2160 pci_unmap_single(np->pci_dev,np->tx_dma[entry], in netdev_tx_done()
2161 np->tx_skbuff[entry]->len, in netdev_tx_done()
2164 dev_kfree_skb_irq(np->tx_skbuff[entry]); in netdev_tx_done()
2165 np->tx_skbuff[entry] = NULL; in netdev_tx_done()
2168 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { in netdev_tx_done()
2185 if (np->hands_off || !readl(ioaddr + IntrEnable)) in intr_handler()
2188 np->intr_status = readl(ioaddr + IntrStatus); in intr_handler()
2190 if (!np->intr_status) in intr_handler()
2196 dev->name, np->intr_status, in intr_handler()
2199 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); in intr_handler()
2201 if (napi_schedule_prep(&np->napi)) { in intr_handler()
2204 __napi_schedule(&np->napi); in intr_handler()
2208 dev->name, np->intr_status, in intr_handler()
2220 struct net_device *dev = np->dev; in natsemi_poll()
2228 dev->name, np->intr_status, in natsemi_poll()
2233 if (np->intr_status & in natsemi_poll()
2239 if (np->intr_status & in natsemi_poll()
2241 spin_lock(&np->lock); in natsemi_poll()
2243 spin_unlock(&np->lock); in natsemi_poll()
2247 if (np->intr_status & IntrAbnormalSummary) in natsemi_poll()
2248 netdev_error(dev, np->intr_status); in natsemi_poll()
2253 np->intr_status = readl(ioaddr + IntrStatus); in natsemi_poll()
2254 } while (np->intr_status); in natsemi_poll()
2260 spin_lock(&np->lock); in natsemi_poll()
2261 if (!np->hands_off) in natsemi_poll()
2263 spin_unlock(&np->lock); in natsemi_poll()
2273 int entry = np->cur_rx % RX_RING_SIZE; in netdev_rx()
2274 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx; in netdev_rx()
2275 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); in netdev_rx()
2276 unsigned int buflen = np->rx_buf_sz; in netdev_rx()
2286 if (--boguscnt < 0) in netdev_rx()
2294 pkt_len = (desc_status & DescSizeMask) - 4; in netdev_rx()
2304 "status %#08x.\n", dev->name, in netdev_rx()
2305 np->cur_rx, desc_status); in netdev_rx()
2306 dev->stats.rx_length_errors++; in netdev_rx()
2311 * AN-1287. */ in netdev_rx()
2313 spin_lock_irqsave(&np->lock, flags); in netdev_rx()
2316 writel(np->ring_dma, ioaddr + RxRingPtr); in netdev_rx()
2318 spin_unlock_irqrestore(&np->lock, flags); in netdev_rx()
2326 dev->stats.rx_errors++; in netdev_rx()
2328 dev->stats.rx_over_errors++; in netdev_rx()
2330 dev->stats.rx_length_errors++; in netdev_rx()
2332 dev->stats.rx_frame_errors++; in netdev_rx()
2334 dev->stats.rx_crc_errors++; in netdev_rx()
2336 } else if (pkt_len > np->rx_buf_sz) { in netdev_rx()
2345 * without copying to a minimally-sized skbuff. */ in netdev_rx()
2350 pci_dma_sync_single_for_cpu(np->pci_dev, in netdev_rx()
2351 np->rx_dma[entry], in netdev_rx()
2355 np->rx_skbuff[entry]->data, pkt_len); in netdev_rx()
2357 pci_dma_sync_single_for_device(np->pci_dev, in netdev_rx()
2358 np->rx_dma[entry], in netdev_rx()
2362 pci_unmap_single(np->pci_dev, np->rx_dma[entry], in netdev_rx()
2365 skb_put(skb = np->rx_skbuff[entry], pkt_len); in netdev_rx()
2366 np->rx_skbuff[entry] = NULL; in netdev_rx()
2368 skb->protocol = eth_type_trans(skb, dev); in netdev_rx()
2370 dev->stats.rx_packets++; in netdev_rx()
2371 dev->stats.rx_bytes += pkt_len; in netdev_rx()
2373 entry = (++np->cur_rx) % RX_RING_SIZE; in netdev_rx()
2374 np->rx_head_desc = &np->rx_ring[entry]; in netdev_rx()
2375 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); in netdev_rx()
2380 if (np->oom) in netdev_rx()
2381 mod_timer(&np->timer, jiffies + 1); in netdev_rx()
2391 spin_lock(&np->lock); in netdev_error()
2398 " %#04x partner %#04x.\n", dev->name, in netdev_error()
2399 np->advertising, lpa); in netdev_error()
2410 if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) { in netdev_error()
2411 np->tx_config += TX_DRTH_VAL_INC; in netdev_error()
2415 dev->name, np->tx_config); in netdev_error()
2420 dev->name, np->tx_config); in netdev_error()
2422 writel(np->tx_config, ioaddr + TxConfig); in netdev_error()
2426 printk(KERN_NOTICE "%s: Link wake-up event %#08x\n", in netdev_error()
2427 dev->name, wol_status); in netdev_error()
2432 dev->name); in netdev_error()
2434 dev->stats.rx_fifo_errors++; in netdev_error()
2435 dev->stats.rx_errors++; in netdev_error()
2439 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, in netdev_error()
2441 dev->stats.tx_fifo_errors++; in netdev_error()
2442 dev->stats.tx_errors++; in netdev_error()
2443 dev->stats.rx_fifo_errors++; in netdev_error()
2444 dev->stats.rx_errors++; in netdev_error()
2446 spin_unlock(&np->lock); in netdev_error()
2454 dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs); in __get_stats()
2455 dev->stats.rx_missed_errors += readl(ioaddr + RxMissed); in __get_stats()
2463 spin_lock_irq(&np->lock); in get_stats()
2464 if (netif_running(dev) && !np->hands_off) in get_stats()
2466 spin_unlock_irq(&np->lock); in get_stats()
2468 return &dev->stats; in get_stats()
2474 disable_irq(dev->irq); in natsemi_poll_controller()
2475 intr_handler(dev->irq, dev); in natsemi_poll_controller()
2476 enable_irq(dev->irq); in natsemi_poll_controller()
2488 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ in __set_rx_mode()
2492 (dev->flags & IFF_ALLMULTI)) { in __set_rx_mode()
2501 int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff; in __set_rx_mode()
2513 np->cur_rx_mode = rx_mode; in __set_rx_mode()
2518 if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS) in natsemi_change_mtu()
2519 return -EINVAL; in natsemi_change_mtu()
2521 dev->mtu = new_mtu; in natsemi_change_mtu()
2528 disable_irq(dev->irq); in natsemi_change_mtu()
2529 spin_lock(&np->lock); in natsemi_change_mtu()
2537 writel(np->ring_dma, ioaddr + RxRingPtr); in natsemi_change_mtu()
2540 spin_unlock(&np->lock); in natsemi_change_mtu()
2541 enable_irq(dev->irq); in natsemi_change_mtu()
2549 spin_lock_irq(&np->lock); in set_rx_mode()
2550 if (!np->hands_off) in set_rx_mode()
2552 spin_unlock_irq(&np->lock); in set_rx_mode()
2558 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); in get_drvinfo()
2559 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); in get_drvinfo()
2560 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in get_drvinfo()
2571 return np->eeprom_size; in get_eeprom_len()
2577 spin_lock_irq(&np->lock); in get_settings()
2579 spin_unlock_irq(&np->lock); in get_settings()
2587 spin_lock_irq(&np->lock); in set_settings()
2589 spin_unlock_irq(&np->lock); in set_settings()
2596 spin_lock_irq(&np->lock); in get_wol()
2597 netdev_get_wol(dev, &wol->supported, &wol->wolopts); in get_wol()
2598 netdev_get_sopass(dev, wol->sopass); in get_wol()
2599 spin_unlock_irq(&np->lock); in get_wol()
2606 spin_lock_irq(&np->lock); in set_wol()
2607 netdev_set_wol(dev, wol->wolopts); in set_wol()
2608 res = netdev_set_sopass(dev, wol->sopass); in set_wol()
2609 spin_unlock_irq(&np->lock); in set_wol()
2616 regs->version = NATSEMI_REGS_VER; in get_regs()
2617 spin_lock_irq(&np->lock); in get_regs()
2619 spin_unlock_irq(&np->lock); in get_regs()
2625 return np->msg_enable; in get_msglevel()
2631 np->msg_enable = val; in set_msglevel()
2637 int r = -EINVAL; in nway_reset()
2650 /* LSTATUS is latched low until a read - so read twice */ in get_link()
2661 eebuf = kmalloc(np->eeprom_size, GFP_KERNEL); in get_eeprom()
2663 return -ENOMEM; in get_eeprom()
2665 eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16); in get_eeprom()
2666 spin_lock_irq(&np->lock); in get_eeprom()
2668 spin_unlock_irq(&np->lock); in get_eeprom()
2670 memcpy(data, eebuf+eeprom->offset, eeprom->len); in get_eeprom()
2710 if (np->srr >= SRR_DP83815_D) { in netdev_set_wol()
2730 if (np->srr >= SRR_DP83815_D) { in netdev_get_wol()
2764 if (np->srr < SRR_DP83815_D) { in netdev_set_sopass()
2783 /* re-enable the RX filter */ in netdev_set_sopass()
2796 if (np->srr < SRR_DP83815_D) { in netdev_get_sopass()
2823 ecmd->port = dev->if_port; in netdev_get_ecmd()
2824 ethtool_cmd_speed_set(ecmd, np->speed); in netdev_get_ecmd()
2825 ecmd->duplex = np->duplex; in netdev_get_ecmd()
2826 ecmd->autoneg = np->autoneg; in netdev_get_ecmd()
2827 ecmd->advertising = 0; in netdev_get_ecmd()
2828 if (np->advertising & ADVERTISE_10HALF) in netdev_get_ecmd()
2829 ecmd->advertising |= ADVERTISED_10baseT_Half; in netdev_get_ecmd()
2830 if (np->advertising & ADVERTISE_10FULL) in netdev_get_ecmd()
2831 ecmd->advertising |= ADVERTISED_10baseT_Full; in netdev_get_ecmd()
2832 if (np->advertising & ADVERTISE_100HALF) in netdev_get_ecmd()
2833 ecmd->advertising |= ADVERTISED_100baseT_Half; in netdev_get_ecmd()
2834 if (np->advertising & ADVERTISE_100FULL) in netdev_get_ecmd()
2835 ecmd->advertising |= ADVERTISED_100baseT_Full; in netdev_get_ecmd()
2836 ecmd->supported = (SUPPORTED_Autoneg | in netdev_get_ecmd()
2840 ecmd->phy_address = np->phy_addr_external; in netdev_get_ecmd()
2847 * # ethtool -s ethX port mii in netdev_get_ecmd()
2852 * # ethtool -s ethX port tp;ethtool -s ethX port mii in netdev_get_ecmd()
2860 switch (ecmd->port) { in netdev_get_ecmd()
2863 ecmd->advertising |= ADVERTISED_TP; in netdev_get_ecmd()
2864 ecmd->transceiver = XCVR_INTERNAL; in netdev_get_ecmd()
2867 ecmd->advertising |= ADVERTISED_MII; in netdev_get_ecmd()
2868 ecmd->transceiver = XCVR_EXTERNAL; in netdev_get_ecmd()
2871 ecmd->advertising |= ADVERTISED_FIBRE; in netdev_get_ecmd()
2872 ecmd->transceiver = XCVR_EXTERNAL; in netdev_get_ecmd()
2877 if (ecmd->autoneg == AUTONEG_ENABLE) { in netdev_get_ecmd()
2878 ecmd->advertising |= ADVERTISED_Autoneg; in netdev_get_ecmd()
2880 np->advertising & mdio_read(dev, MII_LPA)); in netdev_get_ecmd()
2886 ecmd->duplex = DUPLEX_FULL; in netdev_get_ecmd()
2888 ecmd->duplex = DUPLEX_HALF; in netdev_get_ecmd()
2900 if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE) in netdev_set_ecmd()
2901 return -EINVAL; in netdev_set_ecmd()
2902 if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL) in netdev_set_ecmd()
2903 return -EINVAL; in netdev_set_ecmd()
2904 if (ecmd->autoneg == AUTONEG_ENABLE) { in netdev_set_ecmd()
2905 if ((ecmd->advertising & (ADVERTISED_10baseT_Half | in netdev_set_ecmd()
2909 return -EINVAL; in netdev_set_ecmd()
2911 } else if (ecmd->autoneg == AUTONEG_DISABLE) { in netdev_set_ecmd()
2914 return -EINVAL; in netdev_set_ecmd()
2915 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) in netdev_set_ecmd()
2916 return -EINVAL; in netdev_set_ecmd()
2918 return -EINVAL; in netdev_set_ecmd()
2926 if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE || in netdev_set_ecmd()
2927 ecmd->port == PORT_TP)) in netdev_set_ecmd()
2928 return -EINVAL; in netdev_set_ecmd()
2935 * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and in netdev_set_ecmd()
2936 * selects based on ecmd->port. in netdev_set_ecmd()
2946 dev->if_port = ecmd->port; in netdev_set_ecmd()
2947 np->autoneg = ecmd->autoneg; in netdev_set_ecmd()
2948 np->phy_addr_external = ecmd->phy_address & PhyAddrMask; in netdev_set_ecmd()
2949 if (np->autoneg == AUTONEG_ENABLE) { in netdev_set_ecmd()
2951 np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); in netdev_set_ecmd()
2952 if (ecmd->advertising & ADVERTISED_10baseT_Half) in netdev_set_ecmd()
2953 np->advertising |= ADVERTISE_10HALF; in netdev_set_ecmd()
2954 if (ecmd->advertising & ADVERTISED_10baseT_Full) in netdev_set_ecmd()
2955 np->advertising |= ADVERTISE_10FULL; in netdev_set_ecmd()
2956 if (ecmd->advertising & ADVERTISED_100baseT_Half) in netdev_set_ecmd()
2957 np->advertising |= ADVERTISE_100HALF; in netdev_set_ecmd()
2958 if (ecmd->advertising & ADVERTISED_100baseT_Full) in netdev_set_ecmd()
2959 np->advertising |= ADVERTISE_100FULL; in netdev_set_ecmd()
2961 np->speed = ethtool_cmd_speed(ecmd); in netdev_set_ecmd()
2962 np->duplex = ecmd->duplex; in netdev_set_ecmd()
2964 if (np->duplex == DUPLEX_HALF) in netdev_set_ecmd()
2965 np->full_duplex = 0; in netdev_set_ecmd()
2969 if (ecmd->port == PORT_TP) in netdev_set_ecmd()
2988 /* read non-mii page 0 of registers */ in netdev_get_regs()
3013 /* the interrupt status is clear-on-read - see if we missed any */ in netdev_get_regs()
3017 dev->name, rbuf[4] & rbuf[5]); in netdev_get_regs()
3040 for (i = 0; i < np->eeprom_size/2; i++) { in netdev_get_eeprom()
3042 /* The EEPROM itself stores data bit-swapped, but eeprom_read in netdev_get_eeprom()
3057 data->phy_id = np->phy_addr_external; in netdev_ioctl()
3065 if (dev->if_port == PORT_TP) { in netdev_ioctl()
3066 if ((data->phy_id & 0x1f) == np->phy_addr_external) in netdev_ioctl()
3067 data->val_out = mdio_read(dev, in netdev_ioctl()
3068 data->reg_num & 0x1f); in netdev_ioctl()
3070 data->val_out = 0; in netdev_ioctl()
3072 move_int_phy(dev, data->phy_id & 0x1f); in netdev_ioctl()
3073 data->val_out = miiport_read(dev, data->phy_id & 0x1f, in netdev_ioctl()
3074 data->reg_num & 0x1f); in netdev_ioctl()
3079 if (dev->if_port == PORT_TP) { in netdev_ioctl()
3080 if ((data->phy_id & 0x1f) == np->phy_addr_external) { in netdev_ioctl()
3081 if ((data->reg_num & 0x1f) == MII_ADVERTISE) in netdev_ioctl()
3082 np->advertising = data->val_in; in netdev_ioctl()
3083 mdio_write(dev, data->reg_num & 0x1f, in netdev_ioctl()
3084 data->val_in); in netdev_ioctl()
3087 if ((data->phy_id & 0x1f) == np->phy_addr_external) { in netdev_ioctl()
3088 if ((data->reg_num & 0x1f) == MII_ADVERTISE) in netdev_ioctl()
3089 np->advertising = data->val_in; in netdev_ioctl()
3091 move_int_phy(dev, data->phy_id & 0x1f); in netdev_ioctl()
3092 miiport_write(dev, data->phy_id & 0x1f, in netdev_ioctl()
3093 data->reg_num & 0x1f, in netdev_ioctl()
3094 data->val_in); in netdev_ioctl()
3098 return -EOPNOTSUPP; in netdev_ioctl()
3108 printk(KERN_INFO "%s: remaining active for wake-on-lan\n", in enable_wol_mode()
3109 dev->name); in enable_wol_mode()
3121 writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun); in enable_wol_mode()
3143 dev->name, (int)readl(ioaddr + ChipCmd)); in netdev_close()
3147 dev->name, np->cur_tx, np->dirty_tx, in netdev_close()
3148 np->cur_rx, np->dirty_rx); in netdev_close()
3150 napi_disable(&np->napi); in netdev_close()
3159 del_timer_sync(&np->timer); in netdev_close()
3160 disable_irq(dev->irq); in netdev_close()
3161 spin_lock_irq(&np->lock); in netdev_close()
3163 np->hands_off = 1; in netdev_close()
3164 spin_unlock_irq(&np->lock); in netdev_close()
3165 enable_irq(dev->irq); in netdev_close()
3167 free_irq(dev->irq, dev); in netdev_close()
3173 spin_lock_irq(&np->lock); in netdev_close()
3174 np->hands_off = 0; in netdev_close()
3185 spin_unlock_irq(&np->lock); in netdev_close()
3187 /* clear the carrier last - an interrupt could reenable it otherwise */ in netdev_close()
3204 writel(np->SavedClkRun, ioaddr + ClkRun); in netdev_close()
3236 * No function accesses the hardware without checking np->hands_off.
3237 * the check occurs under spin_lock_irq(&np->lock);
3260 del_timer_sync(&np->timer); in natsemi_suspend()
3262 disable_irq(dev->irq); in natsemi_suspend()
3263 spin_lock_irq(&np->lock); in natsemi_suspend()
3266 np->hands_off = 1; in natsemi_suspend()
3270 spin_unlock_irq(&np->lock); in natsemi_suspend()
3271 enable_irq(dev->irq); in natsemi_suspend()
3273 napi_disable(&np->napi); in natsemi_suspend()
3278 /* pci_power_off(pdev, -1); */ in natsemi_suspend()
3291 writel(np->SavedClkRun, ioaddr + ClkRun); in natsemi_suspend()
3311 BUG_ON(!np->hands_off); in natsemi_resume()
3314 dev_err(&pdev->dev, in natsemi_resume()
3320 napi_enable(&np->napi); in natsemi_resume()
3324 disable_irq(dev->irq); in natsemi_resume()
3325 spin_lock_irq(&np->lock); in natsemi_resume()
3326 np->hands_off = 0; in natsemi_resume()
3329 spin_unlock_irq(&np->lock); in natsemi_resume()
3330 enable_irq(dev->irq); in natsemi_resume()
3332 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ)); in natsemi_resume()