Lines Matching defs:lp

448 	struct pcnet32_private *lp = netdev_priv(dev);
451 napi_disable(&lp->napi);
457 struct pcnet32_private *lp = netdev_priv(dev);
462 val = lp->a->read_csr(ioaddr, CSR3);
464 lp->a->write_csr(ioaddr, CSR3, val);
465 napi_enable_locked(&lp->napi);
473 * Must be called with lp->lock held.
476 struct pcnet32_private *lp,
488 dma_alloc_coherent(&lp->pci_dev->dev,
502 kfree(lp->tx_skbuff);
503 kfree(lp->tx_dma_addr);
504 dma_free_coherent(&lp->pci_dev->dev,
505 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
506 lp->tx_ring, lp->tx_ring_dma_addr);
508 lp->tx_ring_size = entries;
509 lp->tx_mod_mask = lp->tx_ring_size - 1;
510 lp->tx_len_bits = (size << 12);
511 lp->tx_ring = new_tx_ring;
512 lp->tx_ring_dma_addr = new_ring_dma_addr;
513 lp->tx_dma_addr = new_dma_addr_list;
514 lp->tx_skbuff = new_skb_list;
520 dma_free_coherent(&lp->pci_dev->dev,
533 * Must be called with lp->lock held.
536 struct pcnet32_private *lp,
547 dma_alloc_coherent(&lp->pci_dev->dev,
562 overlap = min(entries, lp->rx_ring_size);
564 new_rx_ring[new] = lp->rx_ring[new];
565 new_dma_addr_list[new] = lp->rx_dma_addr[new];
566 new_skb_list[new] = lp->rx_skbuff[new];
575 netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
582 dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
584 if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) {
585 netif_err(lp, drv, dev, "%s dma mapping failed\n",
595 for (; new < lp->rx_ring_size; new++) {
596 if (lp->rx_skbuff[new]) {
597 if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[new]))
598 dma_unmap_single(&lp->pci_dev->dev,
599 lp->rx_dma_addr[new],
602 dev_kfree_skb(lp->rx_skbuff[new]);
606 kfree(lp->rx_skbuff);
607 kfree(lp->rx_dma_addr);
608 dma_free_coherent(&lp->pci_dev->dev,
609 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
610 lp->rx_ring, lp->rx_ring_dma_addr);
612 lp->rx_ring_size = entries;
613 lp->rx_mod_mask = lp->rx_ring_size - 1;
614 lp->rx_len_bits = (size << 4);
615 lp->rx_ring = new_rx_ring;
616 lp->rx_ring_dma_addr = new_ring_dma_addr;
617 lp->rx_dma_addr = new_dma_addr_list;
618 lp->rx_skbuff = new_skb_list;
622 while (--new >= lp->rx_ring_size) {
624 if (!dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new]))
625 dma_unmap_single(&lp->pci_dev->dev,
636 dma_free_coherent(&lp->pci_dev->dev,
643 struct pcnet32_private *lp = netdev_priv(dev);
647 for (i = 0; i < lp->rx_ring_size; i++) {
648 lp->rx_ring[i].status = 0; /* CPU owns buffer */
650 if (lp->rx_skbuff[i]) {
651 if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i]))
652 dma_unmap_single(&lp->pci_dev->dev,
653 lp->rx_dma_addr[i],
656 dev_kfree_skb_any(lp->rx_skbuff[i]);
658 lp->rx_skbuff[i] = NULL;
659 lp->rx_dma_addr[i] = 0;
673 * lp->lock must be held.
679 struct pcnet32_private *lp = netdev_priv(dev);
680 const struct pcnet32_access *a = lp->a;
685 if (lp->chip_version < PCNET32_79C970A)
695 spin_unlock_irqrestore(&lp->lock, *flags);
700 spin_lock_irqsave(&lp->lock, *flags);
703 netif_printk(lp, hw, KERN_DEBUG, dev,
711 static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr)
713 int csr5 = lp->a->read_csr(ioaddr, CSR5);
715 lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND);
721 struct pcnet32_private *lp = netdev_priv(dev);
724 spin_lock_irqsave(&lp->lock, flags);
725 if (lp->mii) {
726 mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
727 } else if (lp->chip_version == PCNET32_79C970A) {
728 if (lp->autoneg) {
730 if (lp->a->read_bcr(dev->base_addr, 4) == 0xc0)
736 cmd->base.port = lp->port_tp ? PORT_TP : PORT_AUI;
738 cmd->base.duplex = lp->fdx ? DUPLEX_FULL : DUPLEX_HALF;
744 spin_unlock_irqrestore(&lp->lock, flags);
751 struct pcnet32_private *lp = netdev_priv(dev);
757 spin_lock_irqsave(&lp->lock, flags);
758 if (lp->mii) {
759 r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
760 } else if (lp->chip_version == PCNET32_79C970A) {
763 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
765 lp->autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
766 bcr2 = lp->a->read_bcr(ioaddr, 2);
768 lp->a->write_bcr(ioaddr, 2, bcr2 | 0x0002);
770 lp->a->write_bcr(ioaddr, 2, bcr2 & ~0x0002);
772 lp->port_tp = cmd->base.port == PORT_TP;
773 csr15 = lp->a->read_csr(ioaddr, CSR15) & ~0x0180;
776 lp->a->write_csr(ioaddr, CSR15, csr15);
777 lp->init_block->mode = cpu_to_le16(csr15);
779 lp->fdx = cmd->base.duplex == DUPLEX_FULL;
780 bcr9 = lp->a->read_bcr(ioaddr, 9) & ~0x0003;
783 lp->a->write_bcr(ioaddr, 9, bcr9);
786 pcnet32_clr_suspend(lp, ioaddr);
791 spin_unlock_irqrestore(&lp->lock, flags);
798 struct pcnet32_private *lp = netdev_priv(dev);
801 if (lp->pci_dev)
802 strscpy(info->bus_info, pci_name(lp->pci_dev),
811 struct pcnet32_private *lp = netdev_priv(dev);
815 spin_lock_irqsave(&lp->lock, flags);
816 if (lp->mii) {
817 r = mii_link_ok(&lp->mii_if);
818 } else if (lp->chip_version == PCNET32_79C970A) {
821 if (!lp->autoneg && lp->port_tp)
822 r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
825 } else if (lp->chip_version > PCNET32_79C970A) {
827 r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
831 spin_unlock_irqrestore(&lp->lock, flags);
838 struct pcnet32_private *lp = netdev_priv(dev);
839 return lp->msg_enable;
844 struct pcnet32_private *lp = netdev_priv(dev);
845 lp->msg_enable = value;
850 struct pcnet32_private *lp = netdev_priv(dev);
854 if (lp->mii) {
855 spin_lock_irqsave(&lp->lock, flags);
856 r = mii_nway_restart(&lp->mii_if);
857 spin_unlock_irqrestore(&lp->lock, flags);
867 struct pcnet32_private *lp = netdev_priv(dev);
870 ering->tx_pending = lp->tx_ring_size;
872 ering->rx_pending = lp->rx_ring_size;
880 struct pcnet32_private *lp = netdev_priv(dev);
893 spin_lock_irqsave(&lp->lock, flags);
894 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
905 if ((1 << i) != lp->tx_ring_size)
906 pcnet32_realloc_tx_ring(dev, lp, i);
913 if ((1 << i) != lp->rx_ring_size)
914 pcnet32_realloc_rx_ring(dev, lp, i);
916 lp->napi.weight = lp->rx_ring_size / 2;
923 spin_unlock_irqrestore(&lp->lock, flags);
926 netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
927 lp->rx_ring_size, lp->tx_ring_size);
951 struct pcnet32_private *lp = netdev_priv(dev);
957 netif_printk(lp, hw, KERN_DEBUG, dev,
961 netif_printk(lp, hw, KERN_DEBUG, dev,
964 netif_printk(lp, hw, KERN_DEBUG, dev,
970 struct pcnet32_private *lp = netdev_priv(dev);
971 const struct pcnet32_access *a = lp->a; /* access to registers */
991 spin_lock_irqsave(&lp->lock, flags);
992 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
994 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
997 lp->a->reset(ioaddr);
998 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
1001 lp->a->write_bcr(ioaddr, 20, 2);
1006 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
1013 netif_printk(lp, hw, KERN_DEBUG, dev,
1020 lp->tx_skbuff[x] = skb;
1021 lp->tx_ring[x].length = cpu_to_le16(-skb->len);
1022 lp->tx_ring[x].misc = 0;
1038 lp->tx_dma_addr[x] =
1039 dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
1041 if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) {
1042 netif_printk(lp, hw, KERN_DEBUG, dev,
1047 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
1049 lp->tx_ring[x].status = cpu_to_le16(status);
1057 lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
1060 lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
1066 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
1067 spin_unlock_irqrestore(&lp->lock, flags);
1069 spin_lock_irqsave(&lp->lock, flags);
1074 netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
1079 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
1081 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
1086 skb = lp->rx_skbuff[x];
1096 skb = lp->rx_skbuff[x];
1097 packet = lp->tx_skbuff[x]->data;
1100 netif_printk(lp, hw, KERN_DEBUG, dev,
1125 lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
1127 spin_unlock_irqrestore(&lp->lock, flags);
1136 struct pcnet32_private *lp = netdev_priv(dev);
1137 const struct pcnet32_access *a = lp->a;
1145 spin_lock_irqsave(&lp->lock, flags);
1147 lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
1148 spin_unlock_irqrestore(&lp->lock, flags);
1154 spin_lock_irqsave(&lp->lock, flags);
1157 spin_unlock_irqrestore(&lp->lock, flags);
1162 spin_lock_irqsave(&lp->lock, flags);
1164 a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
1165 spin_unlock_irqrestore(&lp->lock, flags);
1175 struct pcnet32_private *lp,
1208 netif_err(lp, drv, dev, "Impossible packet size %d!\n",
1214 netif_err(lp, rx_err, dev, "Runt packet!\n");
1230 new_dma_addr = dma_map_single(&lp->pci_dev->dev,
1234 if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr)) {
1235 netif_err(lp, rx_err, dev,
1240 skb = lp->rx_skbuff[entry];
1241 dma_unmap_single(&lp->pci_dev->dev,
1242 lp->rx_dma_addr[entry],
1246 lp->rx_skbuff[entry] = newskb;
1247 lp->rx_dma_addr[entry] = new_dma_addr;
1263 dma_sync_single_for_cpu(&lp->pci_dev->dev,
1264 lp->rx_dma_addr[entry], pkt_len,
1267 (unsigned char *)(lp->rx_skbuff[entry]->data),
1269 dma_sync_single_for_device(&lp->pci_dev->dev,
1270 lp->rx_dma_addr[entry], pkt_len,
1281 struct pcnet32_private *lp = netdev_priv(dev);
1282 int entry = lp->cur_rx & lp->rx_mod_mask;
1283 struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
1288 pcnet32_rx_entry(dev, lp, rxp, entry);
1297 entry = (++lp->cur_rx) & lp->rx_mod_mask;
1298 rxp = &lp->rx_ring[entry];
1306 struct pcnet32_private *lp = netdev_priv(dev);
1307 unsigned int dirty_tx = lp->dirty_tx;
1311 while (dirty_tx != lp->cur_tx) {
1312 int entry = dirty_tx & lp->tx_mod_mask;
1313 int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
1318 lp->tx_ring[entry].base = 0;
1322 int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
1324 netif_err(lp, tx_err, dev,
1338 netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
1344 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
1347 netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
1359 if (lp->tx_skbuff[entry]) {
1360 dma_unmap_single(&lp->pci_dev->dev,
1361 lp->tx_dma_addr[entry],
1362 lp->tx_skbuff[entry]->len,
1364 dev_kfree_skb_any(lp->tx_skbuff[entry]);
1365 lp->tx_skbuff[entry] = NULL;
1366 lp->tx_dma_addr[entry] = 0;
1371 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
1372 if (delta > lp->tx_ring_size) {
1373 netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1374 dirty_tx, lp->cur_tx, lp->tx_full);
1375 dirty_tx += lp->tx_ring_size;
1376 delta -= lp->tx_ring_size;
1379 if (lp->tx_full &&
1381 delta < lp->tx_ring_size - 2) {
1383 lp->tx_full = 0;
1386 lp->dirty_tx = dirty_tx;
1393 struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
1394 struct net_device *dev = lp->dev;
1402 spin_lock_irqsave(&lp->lock, flags);
1405 lp->a->reset(ioaddr);
1406 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
1413 val = lp->a->read_csr(ioaddr, CSR3);
1415 lp->a->write_csr(ioaddr, CSR3, val);
1418 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
1421 spin_unlock_irqrestore(&lp->lock, flags);
1429 struct pcnet32_private *lp = netdev_priv(dev);
1430 int j = lp->phycount * PCNET32_REGS_PER_PHY;
1440 struct pcnet32_private *lp = netdev_priv(dev);
1441 const struct pcnet32_access *a = lp->a;
1445 spin_lock_irqsave(&lp->lock, flags);
1472 if (lp->mii) {
1475 if (lp->phymask & (1 << j)) {
1477 lp->a->write_bcr(ioaddr, 33,
1479 *buff++ = lp->a->read_bcr(ioaddr, 34);
1486 pcnet32_clr_suspend(lp, ioaddr);
1488 spin_unlock_irqrestore(&lp->lock, flags);
1598 struct pcnet32_private *lp;
1749 dev = alloc_etherdev(sizeof(*lp));
1841 lp = netdev_priv(dev);
1843 lp->init_block = dma_alloc_coherent(&pdev->dev,
1844 sizeof(*lp->init_block),
1845 &lp->init_dma_addr, GFP_KERNEL);
1846 if (!lp->init_block) {
1852 lp->pci_dev = pdev;
1854 lp->dev = dev;
1856 spin_lock_init(&lp->lock);
1858 lp->name = chipname;
1859 lp->shared_irq = shared;
1860 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
1861 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
1862 lp->tx_mod_mask = lp->tx_ring_size - 1;
1863 lp->rx_mod_mask = lp->rx_ring_size - 1;
1864 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1865 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1866 lp->mii_if.full_duplex = fdx;
1867 lp->mii_if.phy_id_mask = 0x1f;
1868 lp->mii_if.reg_num_mask = 0x1f;
1869 lp->dxsuflo = dxsuflo;
1870 lp->mii = mii;
1871 lp->chip_version = chip_version;
1872 lp->msg_enable = pcnet32_debug;
1875 lp->options = PCNET32_PORT_ASEL;
1877 lp->options = options_mapping[options[cards_found]];
1879 if (lp->chip_version == PCNET32_79C970A)
1880 lp->options = PCNET32_PORT_10BT;
1881 lp->mii_if.dev = dev;
1882 lp->mii_if.mdio_read = mdio_read;
1883 lp->mii_if.mdio_write = mdio_write;
1886 lp->napi.weight = lp->rx_ring_size / 2;
1888 netif_napi_add_weight(dev, &lp->napi, pcnet32_poll,
1889 lp->rx_ring_size / 2);
1891 if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
1893 lp->options |= PCNET32_PORT_FD;
1895 lp->a = a;
1898 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
1905 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1907 lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */
1908 lp->init_block->tlen_rlen =
1909 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
1911 lp->init_block->phys_addr[i] = dev->dev_addr[i];
1912 lp->init_block->filter[0] = 0x00000000;
1913 lp->init_block->filter[1] = 0x00000000;
1914 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
1915 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
1920 a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
1921 a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
1951 if (lp->mii) {
1952 /* lp->phycount and lp->phymask are set to 0 by memset above */
1954 lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
1967 lp->phycount++;
1968 lp->phymask |= (1 << i);
1969 lp->mii_if.phy_id = i;
1974 lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
1975 if (lp->phycount > 1)
1976 lp->options |= PCNET32_PORT_MII;
1979 timer_setup(&lp->watchdog_timer, pcnet32_watchdog, 0);
1993 lp->next = pcnet32_dev;
1998 pr_info("%s: registered as %s\n", dev->name, lp->name);
2008 dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
2009 lp->init_block, lp->init_dma_addr);
2020 struct pcnet32_private *lp = netdev_priv(dev);
2022 lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
2023 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
2024 &lp->tx_ring_dma_addr, GFP_KERNEL);
2025 if (!lp->tx_ring) {
2026 netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
2030 lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
2031 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
2032 &lp->rx_ring_dma_addr, GFP_KERNEL);
2033 if (!lp->rx_ring) {
2034 netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
2038 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
2040 if (!lp->tx_dma_addr)
2043 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
2045 if (!lp->rx_dma_addr)
2048 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
2050 if (!lp->tx_skbuff)
2053 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
2055 if (!lp->rx_skbuff)
2063 struct pcnet32_private *lp = netdev_priv(dev);
2065 kfree(lp->tx_skbuff);
2066 lp->tx_skbuff = NULL;
2068 kfree(lp->rx_skbuff);
2069 lp->rx_skbuff = NULL;
2071 kfree(lp->tx_dma_addr);
2072 lp->tx_dma_addr = NULL;
2074 kfree(lp->rx_dma_addr);
2075 lp->rx_dma_addr = NULL;
2077 if (lp->tx_ring) {
2078 dma_free_coherent(&lp->pci_dev->dev,
2079 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
2080 lp->tx_ring, lp->tx_ring_dma_addr);
2081 lp->tx_ring = NULL;
2084 if (lp->rx_ring) {
2085 dma_free_coherent(&lp->pci_dev->dev,
2086 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
2087 lp->rx_ring, lp->rx_ring_dma_addr);
2088 lp->rx_ring = NULL;
2094 struct pcnet32_private *lp = netdev_priv(dev);
2095 struct pci_dev *pdev = lp->pci_dev;
2103 lp->shared_irq ? IRQF_SHARED : 0, dev->name,
2109 spin_lock_irqsave(&lp->lock, flags);
2117 lp->a->reset(ioaddr);
2120 lp->a->write_bcr(ioaddr, 20, 2);
2122 netif_printk(lp, ifup, KERN_DEBUG, dev,
2124 __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
2125 (u32) (lp->rx_ring_dma_addr),
2126 (u32) (lp->init_dma_addr));
2128 lp->autoneg = !!(lp->options & PCNET32_PORT_ASEL);
2129 lp->port_tp = !!(lp->options & PCNET32_PORT_10BT);
2130 lp->fdx = !!(lp->options & PCNET32_PORT_FD);
2133 val = lp->a->read_bcr(ioaddr, 2) & ~2;
2134 if (lp->options & PCNET32_PORT_ASEL)
2136 lp->a->write_bcr(ioaddr, 2, val);
2139 if (lp->mii_if.full_duplex) {
2140 val = lp->a->read_bcr(ioaddr, 9) & ~3;
2141 if (lp->options & PCNET32_PORT_FD) {
2143 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
2145 } else if (lp->options & PCNET32_PORT_ASEL) {
2147 if (lp->chip_version == 0x2627)
2150 lp->a->write_bcr(ioaddr, 9, val);
2154 val = lp->a->read_csr(ioaddr, 124) & ~0x10;
2155 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
2157 lp->a->write_csr(ioaddr, 124, val);
2163 if (lp->options & PCNET32_PORT_ASEL) {
2164 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
2165 netif_printk(lp, link, KERN_DEBUG, dev,
2169 if (lp->phycount < 2) {
2175 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
2176 lp->a->write_bcr(ioaddr, 32,
2177 lp->a->read_bcr(ioaddr, 32) | 0x0080);
2179 val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
2180 if (lp->options & PCNET32_PORT_FD)
2182 if (lp->options & PCNET32_PORT_100)
2184 lp->a->write_bcr(ioaddr, 32, val);
2186 if (lp->options & PCNET32_PORT_ASEL) {
2187 lp->a->write_bcr(ioaddr, 32,
2188 lp->a->read_bcr(ioaddr,
2191 val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
2193 lp->a->write_bcr(ioaddr, 32, val);
2206 val = lp->a->read_bcr(ioaddr, 2);
2207 lp->a->write_bcr(ioaddr, 2, val & ~2);
2208 val = lp->a->read_bcr(ioaddr, 32);
2209 lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
2211 if (!(lp->options & PCNET32_PORT_ASEL)) {
2217 (lp->options & PCNET32_PORT_100) ?
2219 bcr9 = lp->a->read_bcr(ioaddr, 9);
2221 if (lp->options & PCNET32_PORT_FD) {
2228 lp->a->write_bcr(ioaddr, 9, bcr9);
2232 if (lp->phymask & (1 << i)) {
2244 lp->mii_if.phy_id = i;
2246 if (lp->options & PCNET32_PORT_ASEL) {
2247 mii_ethtool_gset(&lp->mii_if, &ecmd);
2250 mii_ethtool_sset(&lp->mii_if, &ecmd);
2253 lp->mii_if.phy_id = first_phy;
2254 netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
2258 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
2259 val = lp->a->read_csr(ioaddr, CSR3);
2261 lp->a->write_csr(ioaddr, CSR3, val);
2265 lp->init_block->mode =
2266 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
2274 napi_enable_locked(&lp->napi);
2277 lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
2278 lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
2280 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
2281 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
2285 if (lp->chip_version >= PCNET32_79C970A) {
2288 mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
2293 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
2299 lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
2301 netif_printk(lp, ifup, KERN_DEBUG, dev,
2304 (u32) (lp->init_dma_addr),
2305 lp->a->read_csr(ioaddr, CSR0));
2307 spin_unlock_irqrestore(&lp->lock, flags);
2320 lp->a->write_bcr(ioaddr, 20, 4);
2323 spin_unlock_irqrestore(&lp->lock, flags);
2344 struct pcnet32_private *lp = netdev_priv(dev);
2347 for (i = 0; i < lp->tx_ring_size; i++) {
2348 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2350 if (lp->tx_skbuff[i]) {
2351 if (!dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[i]))
2352 dma_unmap_single(&lp->pci_dev->dev,
2353 lp->tx_dma_addr[i],
2354 lp->tx_skbuff[i]->len,
2356 dev_kfree_skb_any(lp->tx_skbuff[i]);
2358 lp->tx_skbuff[i] = NULL;
2359 lp->tx_dma_addr[i] = 0;
2366 struct pcnet32_private *lp = netdev_priv(dev);
2369 lp->tx_full = 0;
2370 lp->cur_rx = lp->cur_tx = 0;
2371 lp->dirty_rx = lp->dirty_tx = 0;
2373 for (i = 0; i < lp->rx_ring_size; i++) {
2374 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
2376 lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
2377 rx_skbuff = lp->rx_skbuff[i];
2380 netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
2388 if (lp->rx_dma_addr[i] == 0) {
2389 lp->rx_dma_addr[i] =
2390 dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
2392 if (dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) {
2394 netif_err(lp, drv, dev,
2400 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
2401 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
2403 lp->rx_ring[i].status = cpu_to_le16(0x8000);
2407 for (i = 0; i < lp->tx_ring_size; i++) {
2408 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2410 lp->tx_ring[i].base = 0;
2411 lp->tx_dma_addr[i] = 0;
2414 lp->init_block->tlen_rlen =
2415 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
2417 lp->init_block->phys_addr[i] = dev->dev_addr[i];
2418 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
2419 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
2430 struct pcnet32_private *lp = netdev_priv(dev);
2436 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
2440 netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
2448 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
2451 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
2454 lp->a->write_csr(ioaddr, CSR0, csr0_bits);
2459 struct pcnet32_private *lp = netdev_priv(dev);
2462 spin_lock_irqsave(&lp->lock, flags);
2466 dev->name, lp->a->read_csr(ioaddr, CSR0));
2467 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
2469 if (netif_msg_tx_err(lp)) {
2473 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
2474 lp->cur_rx);
2475 for (i = 0; i < lp->rx_ring_size; i++)
2477 le32_to_cpu(lp->rx_ring[i].base),
2478 (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
2479 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
2480 le16_to_cpu(lp->rx_ring[i].status));
2481 for (i = 0; i < lp->tx_ring_size; i++)
2483 le32_to_cpu(lp->tx_ring[i].base),
2484 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
2485 le32_to_cpu(lp->tx_ring[i].misc),
2486 le16_to_cpu(lp->tx_ring[i].status));
2494 spin_unlock_irqrestore(&lp->lock, flags);
2500 struct pcnet32_private *lp = netdev_priv(dev);
2506 spin_lock_irqsave(&lp->lock, flags);
2508 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
2510 __func__, lp->a->read_csr(ioaddr, CSR0));
2520 entry = lp->cur_tx & lp->tx_mod_mask;
2525 lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
2527 lp->tx_ring[entry].misc = 0x00000000;
2529 lp->tx_dma_addr[entry] =
2530 dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
2532 if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[entry])) {
2537 lp->tx_skbuff[entry] = skb;
2538 lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
2540 lp->tx_ring[entry].status = cpu_to_le16(status);
2542 lp->cur_tx++;
2546 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
2548 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
2549 lp->tx_full = 1;
2553 spin_unlock_irqrestore(&lp->lock, flags);
2562 struct pcnet32_private *lp;
2568 lp = netdev_priv(dev);
2570 spin_lock(&lp->lock);
2572 csr0 = lp->a->read_csr(ioaddr, CSR0);
2577 lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
2579 netif_printk(lp, intr, KERN_DEBUG, dev,
2581 csr0, lp->a->read_csr(ioaddr, CSR0));
2601 netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
2605 if (napi_schedule_prep(&lp->napi)) {
2608 val = lp->a->read_csr(ioaddr, CSR3);
2610 lp->a->write_csr(ioaddr, CSR3, val);
2612 __napi_schedule(&lp->napi);
2615 csr0 = lp->a->read_csr(ioaddr, CSR0);
2618 netif_printk(lp, intr, KERN_DEBUG, dev,
2620 lp->a->read_csr(ioaddr, CSR0));
2622 spin_unlock(&lp->lock);
2630 struct pcnet32_private *lp = netdev_priv(dev);
2633 timer_delete_sync(&lp->watchdog_timer);
2636 napi_disable(&lp->napi);
2638 spin_lock_irqsave(&lp->lock, flags);
2640 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
2642 netif_printk(lp, ifdown, KERN_DEBUG, dev,
2644 lp->a->read_csr(ioaddr, CSR0));
2647 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
2653 lp->a->write_bcr(ioaddr, 20, 4);
2655 spin_unlock_irqrestore(&lp->lock, flags);
2659 spin_lock_irqsave(&lp->lock, flags);
2664 spin_unlock_irqrestore(&lp->lock, flags);
2671 struct pcnet32_private *lp = netdev_priv(dev);
2675 spin_lock_irqsave(&lp->lock, flags);
2676 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
2677 spin_unlock_irqrestore(&lp->lock, flags);
2685 struct pcnet32_private *lp = netdev_priv(dev);
2686 volatile struct pcnet32_init_block *ib = lp->init_block;
2697 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
2698 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
2699 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
2700 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
2714 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
2724 struct pcnet32_private *lp = netdev_priv(dev);
2727 spin_lock_irqsave(&lp->lock, flags);
2729 csr15 = lp->a->read_csr(ioaddr, CSR15);
2732 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
2733 lp->init_block->mode =
2734 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
2736 lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
2738 lp->init_block->mode =
2739 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
2740 lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
2745 pcnet32_clr_suspend(lp, ioaddr);
2747 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
2752 spin_unlock_irqrestore(&lp->lock, flags);
2755 /* This routine assumes that the lp->lock is held */
2758 struct pcnet32_private *lp = netdev_priv(dev);
2762 if (!lp->mii)
2765 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2766 val_out = lp->a->read_bcr(ioaddr, 34);
2771 /* This routine assumes that the lp->lock is held */
2774 struct pcnet32_private *lp = netdev_priv(dev);
2777 if (!lp->mii)
2780 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2781 lp->a->write_bcr(ioaddr, 34, val);
2786 struct pcnet32_private *lp = netdev_priv(dev);
2791 if (lp->mii) {
2792 spin_lock_irqsave(&lp->lock, flags);
2793 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
2794 spin_unlock_irqrestore(&lp->lock, flags);
2804 struct pcnet32_private *lp = netdev_priv(dev);
2805 struct mii_if_info mii = lp->mii_if;
2810 if (i == lp->mii_if.phy_id)
2812 if (lp->phymask & (1 << i)) {
2816 netif_info(lp, link, dev, "Using PHY number %d\n",
2821 mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
2822 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
2831 lp->mii_if.phy_id = i;
2844 * Caller is assumed to hold and release the lp->lock.
2849 struct pcnet32_private *lp = netdev_priv(dev);
2854 if (lp->mii) {
2855 curr_link = mii_link_ok(&lp->mii_if);
2856 } else if (lp->chip_version == PCNET32_79C970A) {
2859 if (!lp->autoneg && lp->port_tp)
2860 curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
2865 curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
2870 netif_info(lp, link, dev, "link down\n");
2872 if (lp->phycount > 1) {
2877 if (lp->mii) {
2878 if (netif_msg_link(lp)) {
2881 mii_ethtool_gset(&lp->mii_if, &ecmd);
2887 bcr9 = lp->a->read_bcr(dev->base_addr, 9);
2888 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
2889 if (lp->mii_if.full_duplex)
2893 lp->a->write_bcr(dev->base_addr, 9, bcr9);
2896 netif_info(lp, link, dev, "link up\n");
2908 struct pcnet32_private *lp = timer_container_of(lp, t, watchdog_timer);
2909 struct net_device *dev = lp->dev;
2913 spin_lock_irqsave(&lp->lock, flags);
2915 spin_unlock_irqrestore(&lp->lock, flags);
2917 mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
2949 struct pcnet32_private *lp = netdev_priv(dev);
2954 dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
2955 lp->init_block, lp->init_dma_addr);
3032 struct pcnet32_private *lp = netdev_priv(pcnet32_dev);
3033 next_dev = lp->next;
3037 dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
3038 lp->init_block, lp->init_dma_addr);