Lines Matching +full:suppress +full:- +full:preamble

8  *	-----<snip>-----
12 * Copyright (C) 2000-2001 Toshiba Corporation
15 * -----<snip>-----
21 * (C) Copyright TOSHIBA CORPORATION 2004-2005
122 /* DMA_Ctl bit assign ------------------------------------------------------- */
137 /* RxFragSize bit assign ---------------------------------------------------- */
141 /* MAC_Ctl bit assign ------------------------------------------------------- */
155 /* PROM_Ctl bit assign ------------------------------------------------------ */
165 /* CAM_Ctl bit assign ------------------------------------------------------- */
173 /* CAM_Ena bit assign ------------------------------------------------------- */
175 #define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */
181 /* Tx_Ctl bit assign -------------------------------------------------------- */
184 #define Tx_NoPad 0x00000004 /* 1:Suppress Padding */
185 #define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */
186 #define Tx_FBack 0x00000010 /* 1:Fast Back-off */
195 /* Tx_Stat bit assign ------------------------------------------------------- */
211 /* Rx_Ctl bit assign -------------------------------------------------------- */
225 /* Rx_Stat bit assign ------------------------------------------------------- */
241 /* Int_En bit assign -------------------------------------------------------- */
242 #define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
256 /* Int_Src bit assign ------------------------------------------------------- */
273 /* MD_CA bit assign --------------------------------------------------------- */
274 #define MD_CA_PreSup 0x00001000 /* 1:Preamble Suppress */
299 /* Frame Descriptor bit assign ---------------------------------------------- */
312 /* Buffer Descriptor bit assign --------------------------------------------- */
326 /* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
335 #define HAVE_DMA_RXALIGN(lp) likely((lp)->chiptype != TC35815CF)
416 * Receiving: Non-Packing Mode.
441 return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf); in fd_virt_to_bus()
446 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma)); in fd_bus_to_virt()
457 *dma_handle = dma_map_single(&hwdev->dev, skb->data, RX_BUF_SIZE, in alloc_rxbuf_skb()
459 if (dma_mapping_error(&hwdev->dev, *dma_handle)) { in alloc_rxbuf_skb()
469 dma_unmap_single(&hwdev->dev, dma_handle, RX_BUF_SIZE, in free_rxbuf_skb()
492 /* Example routines you must write ;->. */
504 struct net_device *dev = bus->priv; in tc_mdio_read()
506 (struct tc35815_regs __iomem *)dev->base_addr; in tc_mdio_read()
509 tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA); in tc_mdio_read()
511 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { in tc_mdio_read()
513 return -EIO; in tc_mdio_read()
516 return tc_readl(&tr->MD_Data) & 0xffff; in tc_mdio_read()
521 struct net_device *dev = bus->priv; in tc_mdio_write()
523 (struct tc35815_regs __iomem *)dev->base_addr; in tc_mdio_write()
526 tc_writel(val, &tr->MD_Data); in tc_mdio_write()
528 &tr->MD_CA); in tc_mdio_write()
530 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { in tc_mdio_write()
532 return -EIO; in tc_mdio_write()
541 struct phy_device *phydev = dev->phydev; in tc_handle_link_change()
545 spin_lock_irqsave(&lp->lock, flags); in tc_handle_link_change()
546 if (phydev->link && in tc_handle_link_change()
547 (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) { in tc_handle_link_change()
549 (struct tc35815_regs __iomem *)dev->base_addr; in tc_handle_link_change()
552 reg = tc_readl(&tr->MAC_Ctl); in tc_handle_link_change()
554 tc_writel(reg, &tr->MAC_Ctl); in tc_handle_link_change()
555 if (phydev->duplex == DUPLEX_FULL) in tc_handle_link_change()
559 tc_writel(reg, &tr->MAC_Ctl); in tc_handle_link_change()
561 tc_writel(reg, &tr->MAC_Ctl); in tc_handle_link_change()
572 if (phydev->duplex == DUPLEX_HALF && in tc_handle_link_change()
573 lp->chiptype != TC35815_TX4939) in tc_handle_link_change()
574 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, in tc_handle_link_change()
575 &tr->Tx_Ctl); in tc_handle_link_change()
577 lp->speed = phydev->speed; in tc_handle_link_change()
578 lp->duplex = phydev->duplex; in tc_handle_link_change()
582 if (phydev->link != lp->link) { in tc_handle_link_change()
583 if (phydev->link) { in tc_handle_link_change()
585 if (dev->flags & IFF_PROMISC) in tc_handle_link_change()
588 lp->speed = 0; in tc_handle_link_change()
589 lp->duplex = -1; in tc_handle_link_change()
591 lp->link = phydev->link; in tc_handle_link_change()
595 spin_unlock_irqrestore(&lp->lock, flags); in tc_handle_link_change()
600 dev->name, in tc_handle_link_change()
613 phydev = phy_find_first(lp->mii_bus); in tc_mii_probe()
615 printk(KERN_ERR "%s: no PHY found\n", dev->name); in tc_mii_probe()
616 return -ENODEV; in tc_mii_probe()
622 lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); in tc_mii_probe()
624 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); in tc_mii_probe()
646 linkmode_andnot(phydev->supported, phydev->supported, mask); in tc_mii_probe()
647 linkmode_copy(phydev->advertising, phydev->supported); in tc_mii_probe()
649 lp->link = 0; in tc_mii_probe()
650 lp->speed = 0; in tc_mii_probe()
651 lp->duplex = -1; in tc_mii_probe()
661 lp->mii_bus = mdiobus_alloc(); in tc_mii_init()
662 if (lp->mii_bus == NULL) { in tc_mii_init()
663 err = -ENOMEM; in tc_mii_init()
667 lp->mii_bus->name = "tc35815_mii_bus"; in tc_mii_init()
668 lp->mii_bus->read = tc_mdio_read; in tc_mii_init()
669 lp->mii_bus->write = tc_mdio_write; in tc_mii_init()
670 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", in tc_mii_init()
671 (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn); in tc_mii_init()
672 lp->mii_bus->priv = dev; in tc_mii_init()
673 lp->mii_bus->parent = &lp->pci_dev->dev; in tc_mii_init()
674 err = mdiobus_register(lp->mii_bus); in tc_mii_init()
683 mdiobus_unregister(lp->mii_bus); in tc_mii_init()
685 mdiobus_free(lp->mii_bus); in tc_mii_init()
693 * should provide a "tc35815-mac" device with a MAC address in its
700 unsigned int id = pci_dev->irq; in tc35815_mac_match()
701 return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id; in tc35815_mac_match()
708 lp->pci_dev, tc35815_mac_match); in tc35815_read_plat_dev_addr()
710 if (pd->platform_data) in tc35815_read_plat_dev_addr()
711 memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN); in tc35815_read_plat_dev_addr()
713 return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV; in tc35815_read_plat_dev_addr()
715 return -ENODEV; in tc35815_read_plat_dev_addr()
720 return -ENODEV; in tc35815_read_plat_dev_addr()
727 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_init_dev_addr()
730 while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) in tc35815_init_dev_addr()
734 tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl); in tc35815_init_dev_addr()
735 while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) in tc35815_init_dev_addr()
737 data = tc_readl(&tr->PROM_Data); in tc35815_init_dev_addr()
738 dev->dev_addr[i] = data & 0xff; in tc35815_init_dev_addr()
739 dev->dev_addr[i+1] = data >> 8; in tc35815_init_dev_addr()
741 if (!is_valid_ether_addr(dev->dev_addr)) in tc35815_init_dev_addr()
772 dev_printk(KERN_DEBUG, &pdev->dev, in tc35815_init_one()
777 if (!pdev->irq) { in tc35815_init_one()
778 dev_warn(&pdev->dev, "no IRQ assigned.\n"); in tc35815_init_one()
779 return -ENODEV; in tc35815_init_one()
785 return -ENOMEM; in tc35815_init_one()
787 SET_NETDEV_DEV(dev, &pdev->dev); in tc35815_init_one()
789 lp->dev = dev; in tc35815_init_one()
791 /* enable device (incl. PCI PM wakeup), and bus-mastering */ in tc35815_init_one()
802 dev->netdev_ops = &tc35815_netdev_ops; in tc35815_init_one()
803 dev->ethtool_ops = &tc35815_ethtool_ops; in tc35815_init_one()
804 dev->watchdog_timeo = TC35815_TX_TIMEOUT; in tc35815_init_one()
805 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); in tc35815_init_one()
807 dev->irq = pdev->irq; in tc35815_init_one()
808 dev->base_addr = (unsigned long)ioaddr; in tc35815_init_one()
810 INIT_WORK(&lp->restart_work, tc35815_restart_work); in tc35815_init_one()
811 spin_lock_init(&lp->lock); in tc35815_init_one()
812 spin_lock_init(&lp->rx_lock); in tc35815_init_one()
813 lp->pci_dev = pdev; in tc35815_init_one()
814 lp->chiptype = ent->driver_data; in tc35815_init_one()
816 lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK; in tc35815_init_one()
824 dev_warn(&pdev->dev, "not valid ether addr\n"); in tc35815_init_one()
833 dev->name, in tc35815_init_one()
834 chip_info[ent->driver_data].name, in tc35815_init_one()
835 dev->base_addr, in tc35815_init_one()
836 dev->dev_addr, in tc35815_init_one()
837 dev->irq); in tc35815_init_one()
858 phy_disconnect(dev->phydev); in tc35815_remove_one()
859 mdiobus_unregister(lp->mii_bus); in tc35815_remove_one()
860 mdiobus_free(lp->mii_bus); in tc35815_remove_one()
872 if (!lp->fd_buf) { in tc35815_init_queues()
879 lp->fd_buf = dma_alloc_coherent(&lp->pci_dev->dev, in tc35815_init_queues()
881 &lp->fd_buf_dma, GFP_ATOMIC); in tc35815_init_queues()
882 if (!lp->fd_buf) in tc35815_init_queues()
883 return -ENOMEM; in tc35815_init_queues()
885 lp->rx_skbs[i].skb = in tc35815_init_queues()
886 alloc_rxbuf_skb(dev, lp->pci_dev, in tc35815_init_queues()
887 &lp->rx_skbs[i].skb_dma); in tc35815_init_queues()
888 if (!lp->rx_skbs[i].skb) { in tc35815_init_queues()
889 while (--i >= 0) { in tc35815_init_queues()
890 free_rxbuf_skb(lp->pci_dev, in tc35815_init_queues()
891 lp->rx_skbs[i].skb, in tc35815_init_queues()
892 lp->rx_skbs[i].skb_dma); in tc35815_init_queues()
893 lp->rx_skbs[i].skb = NULL; in tc35815_init_queues()
895 dma_free_coherent(&lp->pci_dev->dev, in tc35815_init_queues()
897 lp->fd_buf, lp->fd_buf_dma); in tc35815_init_queues()
898 lp->fd_buf = NULL; in tc35815_init_queues()
899 return -ENOMEM; in tc35815_init_queues()
903 dev->name, lp->fd_buf); in tc35815_init_queues()
907 clear_page((void *)((unsigned long)lp->fd_buf + in tc35815_init_queues()
910 fd_addr = (unsigned long)lp->fd_buf; in tc35815_init_queues()
913 lp->rfd_base = (struct RxFD *)fd_addr; in tc35815_init_queues()
916 lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); in tc35815_init_queues()
917 lp->rfd_cur = lp->rfd_base; in tc35815_init_queues()
918 lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); in tc35815_init_queues()
921 lp->tfd_base = (struct TxFD *)fd_addr; in tc35815_init_queues()
924 lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1])); in tc35815_init_queues()
925 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); in tc35815_init_queues()
926 lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0); in tc35815_init_queues()
928 lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0])); in tc35815_init_queues()
929 lp->tfd_start = 0; in tc35815_init_queues()
930 lp->tfd_end = 0; in tc35815_init_queues()
933 lp->fbl_ptr = (struct FrFD *)fd_addr; in tc35815_init_queues()
934 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); in tc35815_init_queues()
935 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); in tc35815_init_queues()
941 lp->fbl_count = 0; in tc35815_init_queues()
943 if (lp->rx_skbs[i].skb) { in tc35815_init_queues()
944 if (i != lp->fbl_count) { in tc35815_init_queues()
945 lp->rx_skbs[lp->fbl_count].skb = in tc35815_init_queues()
946 lp->rx_skbs[i].skb; in tc35815_init_queues()
947 lp->rx_skbs[lp->fbl_count].skb_dma = in tc35815_init_queues()
948 lp->rx_skbs[i].skb_dma; in tc35815_init_queues()
950 lp->fbl_count++; in tc35815_init_queues()
954 if (i >= lp->fbl_count) { in tc35815_init_queues()
955 lp->fbl_ptr->bd[i].BuffData = 0; in tc35815_init_queues()
956 lp->fbl_ptr->bd[i].BDCtl = 0; in tc35815_init_queues()
959 lp->fbl_ptr->bd[i].BuffData = in tc35815_init_queues()
960 cpu_to_le32(lp->rx_skbs[i].skb_dma); in tc35815_init_queues()
962 lp->fbl_ptr->bd[i].BDCtl = in tc35815_init_queues()
968 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); in tc35815_init_queues()
979 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); in tc35815_clear_queues()
982 lp->tx_skbs[fdsystem].skb : NULL; in tc35815_clear_queues()
984 if (lp->tx_skbs[i].skb != skb) { in tc35815_clear_queues()
985 printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); in tc35815_clear_queues()
989 BUG_ON(lp->tx_skbs[i].skb != skb); in tc35815_clear_queues()
992 dma_unmap_single(&lp->pci_dev->dev, in tc35815_clear_queues()
993 lp->tx_skbs[i].skb_dma, skb->len, in tc35815_clear_queues()
995 lp->tx_skbs[i].skb = NULL; in tc35815_clear_queues()
996 lp->tx_skbs[i].skb_dma = 0; in tc35815_clear_queues()
999 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); in tc35815_clear_queues()
1011 if (lp->tfd_base) { in tc35815_free_queues()
1013 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); in tc35815_free_queues()
1016 lp->tx_skbs[fdsystem].skb : NULL; in tc35815_free_queues()
1018 if (lp->tx_skbs[i].skb != skb) { in tc35815_free_queues()
1019 printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); in tc35815_free_queues()
1023 BUG_ON(lp->tx_skbs[i].skb != skb); in tc35815_free_queues()
1026 dma_unmap_single(&lp->pci_dev->dev, in tc35815_free_queues()
1027 lp->tx_skbs[i].skb_dma, in tc35815_free_queues()
1028 skb->len, DMA_TO_DEVICE); in tc35815_free_queues()
1030 lp->tx_skbs[i].skb = NULL; in tc35815_free_queues()
1031 lp->tx_skbs[i].skb_dma = 0; in tc35815_free_queues()
1033 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); in tc35815_free_queues()
1037 lp->rfd_base = NULL; in tc35815_free_queues()
1038 lp->rfd_limit = NULL; in tc35815_free_queues()
1039 lp->rfd_cur = NULL; in tc35815_free_queues()
1040 lp->fbl_ptr = NULL; in tc35815_free_queues()
1043 if (lp->rx_skbs[i].skb) { in tc35815_free_queues()
1044 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, in tc35815_free_queues()
1045 lp->rx_skbs[i].skb_dma); in tc35815_free_queues()
1046 lp->rx_skbs[i].skb = NULL; in tc35815_free_queues()
1049 if (lp->fd_buf) { in tc35815_free_queues()
1050 dma_free_coherent(&lp->pci_dev->dev, PAGE_SIZE * FD_PAGE_NUM, in tc35815_free_queues()
1051 lp->fd_buf, lp->fd_buf_dma); in tc35815_free_queues()
1052 lp->fd_buf = NULL; in tc35815_free_queues()
1060 le32_to_cpu(fd->fd.FDNext), in dump_txfd()
1061 le32_to_cpu(fd->fd.FDSystem), in dump_txfd()
1062 le32_to_cpu(fd->fd.FDStat), in dump_txfd()
1063 le32_to_cpu(fd->fd.FDCtl)); in dump_txfd()
1066 le32_to_cpu(fd->bd.BuffData), in dump_txfd()
1067 le32_to_cpu(fd->bd.BDCtl)); in dump_txfd()
1074 int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; in dump_rxfd()
1078 le32_to_cpu(fd->fd.FDNext), in dump_rxfd()
1079 le32_to_cpu(fd->fd.FDSystem), in dump_rxfd()
1080 le32_to_cpu(fd->fd.FDStat), in dump_rxfd()
1081 le32_to_cpu(fd->fd.FDCtl)); in dump_rxfd()
1082 if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD) in dump_rxfd()
1087 le32_to_cpu(fd->bd[i].BuffData), in dump_rxfd()
1088 le32_to_cpu(fd->bd[i].BDCtl)); in dump_rxfd()
1099 le32_to_cpu(fd->fd.FDNext), in dump_frfd()
1100 le32_to_cpu(fd->fd.FDSystem), in dump_frfd()
1101 le32_to_cpu(fd->fd.FDStat), in dump_frfd()
1102 le32_to_cpu(fd->fd.FDCtl)); in dump_frfd()
1106 le32_to_cpu(fd->bd[i].BuffData), in dump_frfd()
1107 le32_to_cpu(fd->bd[i].BDCtl)); in dump_frfd()
1118 lp->tfd_base, lp->tfd_start, lp->tfd_end); in panic_queues()
1120 lp->rfd_base, lp->rfd_limit, lp->rfd_cur); in panic_queues()
1121 printk("FrFD %p\n", lp->fbl_ptr); in panic_queues()
1123 dump_txfd(&lp->tfd_base[i]); in panic_queues()
1125 int bd_count = dump_rxfd(&lp->rfd_base[i]); in panic_queues()
1128 dump_frfd(lp->fbl_ptr); in panic_queues()
1129 panic("%s: Illegal queue state.", dev->name); in panic_queues()
1143 return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end; in tc35815_tx_full()
1151 if (dev->phydev) { in tc35815_restart()
1152 ret = phy_init_hw(dev->phydev); in tc35815_restart()
1154 printk(KERN_ERR "%s: PHY init failed.\n", dev->name); in tc35815_restart()
1157 spin_lock_bh(&lp->rx_lock); in tc35815_restart()
1158 spin_lock_irq(&lp->lock); in tc35815_restart()
1164 spin_unlock_irq(&lp->lock); in tc35815_restart()
1165 spin_unlock_bh(&lp->rx_lock); in tc35815_restart()
1174 struct net_device *dev = lp->dev; in tc35815_restart_work()
1183 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_schedule_restart()
1187 spin_lock_irqsave(&lp->lock, flags); in tc35815_schedule_restart()
1188 tc_writel(0, &tr->Int_En); in tc35815_schedule_restart()
1189 tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl); in tc35815_schedule_restart()
1190 schedule_work(&lp->restart_work); in tc35815_schedule_restart()
1191 spin_unlock_irqrestore(&lp->lock, flags); in tc35815_schedule_restart()
1197 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_tx_timeout()
1200 dev->name, tc_readl(&tr->Tx_Stat)); in tc35815_tx_timeout()
1204 dev->stats.tx_errors++; in tc35815_tx_timeout()
1213 * there is non-reboot way to recover if something goes wrong.
1222 * See 3c503.c for an example of selecting the IRQ at config-time. in tc35815_open()
1224 if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED, in tc35815_open()
1225 dev->name, dev)) in tc35815_open()
1226 return -EAGAIN; in tc35815_open()
1231 free_irq(dev->irq, dev); in tc35815_open()
1232 return -EAGAIN; in tc35815_open()
1235 napi_enable(&lp->napi); in tc35815_open()
1238 spin_lock_irq(&lp->lock); in tc35815_open()
1240 spin_unlock_irq(&lp->lock); in tc35815_open()
1244 phy_start(dev->phydev); in tc35815_open()
1280 spin_lock_irqsave(&lp->lock, flags); in tc35815_send_packet()
1283 if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM > in tc35815_send_packet()
1288 print_eth(skb->data); in tc35815_send_packet()
1290 if (lp->tx_skbs[lp->tfd_start].skb) { in tc35815_send_packet()
1291 printk("%s: tx_skbs conflict.\n", dev->name); in tc35815_send_packet()
1295 BUG_ON(lp->tx_skbs[lp->tfd_start].skb); in tc35815_send_packet()
1297 lp->tx_skbs[lp->tfd_start].skb = skb; in tc35815_send_packet()
1298 lp->tx_skbs[lp->tfd_start].skb_dma = dma_map_single(&lp->pci_dev->dev, in tc35815_send_packet()
1299 skb->data, in tc35815_send_packet()
1300 skb->len, in tc35815_send_packet()
1304 txfd = &lp->tfd_base[lp->tfd_start]; in tc35815_send_packet()
1305 txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma); in tc35815_send_packet()
1306 txfd->bd.BDCtl = cpu_to_le32(skb->len); in tc35815_send_packet()
1307 txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start); in tc35815_send_packet()
1308 txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT)); in tc35815_send_packet()
1310 if (lp->tfd_start == lp->tfd_end) { in tc35815_send_packet()
1312 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_send_packet()
1314 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL); in tc35815_send_packet()
1315 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); in tc35815_send_packet()
1317 printk("%s: starting TxFD.\n", dev->name); in tc35815_send_packet()
1320 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); in tc35815_send_packet()
1322 txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL); in tc35815_send_packet()
1324 printk("%s: queueing TxFD.\n", dev->name); in tc35815_send_packet()
1328 lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM; in tc35815_send_packet()
1336 printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name); in tc35815_send_packet()
1344 spin_unlock_irqrestore(&lp->lock, flags); in tc35815_send_packet()
1354 dev->name, status); in tc35815_fatal_error_interrupt()
1363 panic("%s: Too many fatal errors.", dev->name); in tc35815_fatal_error_interrupt()
1364 printk(KERN_WARNING "%s: Resetting ...\n", dev->name); in tc35815_fatal_error_interrupt()
1372 int ret = -1; in tc35815_do_interrupt()
1382 dev_warn(&dev->dev, in tc35815_do_interrupt()
1385 dev->stats.rx_dropped++; in tc35815_do_interrupt()
1390 dev_warn(&dev->dev, in tc35815_do_interrupt()
1393 dev->stats.rx_dropped++; in tc35815_do_interrupt()
1398 dev_warn(&dev->dev, in tc35815_do_interrupt()
1401 dev->stats.rx_length_errors++; in tc35815_do_interrupt()
1409 lp->lstats.rx_ints++; in tc35815_do_interrupt()
1413 lp->lstats.tx_ints++; in tc35815_do_interrupt()
1414 spin_lock_irq(&lp->lock); in tc35815_do_interrupt()
1416 spin_unlock_irq(&lp->lock); in tc35815_do_interrupt()
1432 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_interrupt()
1433 u32 dmactl = tc_readl(&tr->DMA_Ctl); in tc35815_interrupt()
1437 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); in tc35815_interrupt()
1438 if (napi_schedule_prep(&lp->napi)) in tc35815_interrupt()
1439 __napi_schedule(&lp->napi); in tc35815_interrupt()
1442 dev->name); in tc35815_interrupt()
1445 (void)tc_readl(&tr->Int_Src); /* flush */ in tc35815_interrupt()
1454 disable_irq(dev->irq); in tc35815_poll_controller()
1455 tc35815_interrupt(dev->irq, dev); in tc35815_poll_controller()
1456 enable_irq(dev->irq); in tc35815_poll_controller()
1469 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) { in tc35815_rx()
1470 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat); in tc35815_rx()
1477 pkt_len -= ETH_FCS_LEN; in tc35815_rx()
1481 dump_rxfd(lp->rfd_cur); in tc35815_rx()
1487 if (--limit < 0) in tc35815_rx()
1490 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) in tc35815_rx()
1494 printk("%s: invalid BDID.\n", dev->name); in tc35815_rx()
1497 BUG_ON(lp->rx_skbs[cur_bd].skb_dma != in tc35815_rx()
1498 (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3)); in tc35815_rx()
1499 if (!lp->rx_skbs[cur_bd].skb) { in tc35815_rx()
1500 printk("%s: NULL skb.\n", dev->name); in tc35815_rx()
1506 skb = lp->rx_skbs[cur_bd].skb; in tc35815_rx()
1507 prefetch(skb->data); in tc35815_rx()
1508 lp->rx_skbs[cur_bd].skb = NULL; in tc35815_rx()
1509 dma_unmap_single(&lp->pci_dev->dev, in tc35815_rx()
1510 lp->rx_skbs[cur_bd].skb_dma, in tc35815_rx()
1513 memmove(skb->data, skb->data - NET_IP_ALIGN, in tc35815_rx()
1518 skb->protocol = eth_type_trans(skb, dev); in tc35815_rx()
1521 dev->stats.rx_packets++; in tc35815_rx()
1522 dev->stats.rx_bytes += pkt_len; in tc35815_rx()
1524 dev->stats.rx_errors++; in tc35815_rx()
1526 dev_info(&dev->dev, "Rx error (status %x)\n", in tc35815_rx()
1534 dev->stats.rx_length_errors++; in tc35815_rx()
1536 dev->stats.rx_fifo_errors++; in tc35815_rx()
1538 dev->stats.rx_crc_errors++; in tc35815_rx()
1540 dev->stats.rx_frame_errors++; in tc35815_rx()
1545 int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl); in tc35815_rx()
1550 printk("%s: invalid BDID.\n", dev->name); in tc35815_rx()
1557 lp->fbl_count--; in tc35815_rx()
1558 while (lp->fbl_count < RX_BUF_NUM) in tc35815_rx()
1561 (id + 1 + lp->fbl_count) % RX_BUF_NUM; in tc35815_rx()
1562 struct BDesc *bd = &lp->fbl_ptr->bd[curid]; in tc35815_rx()
1564 bdctl = le32_to_cpu(bd->BDCtl); in tc35815_rx()
1567 dev->name); in tc35815_rx()
1572 if (!lp->rx_skbs[curid].skb) { in tc35815_rx()
1573 lp->rx_skbs[curid].skb = in tc35815_rx()
1575 lp->pci_dev, in tc35815_rx()
1576 &lp->rx_skbs[curid].skb_dma); in tc35815_rx()
1577 if (!lp->rx_skbs[curid].skb) in tc35815_rx()
1579 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); in tc35815_rx()
1582 bd->BDCtl = cpu_to_le32(BD_CownsBD | in tc35815_rx()
1585 lp->fbl_count++; in tc35815_rx()
1592 le32_to_cpu(lp->rfd_cur->fd.FDNext)); in tc35815_rx()
1593 if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) { in tc35815_rx()
1594 printk("%s: RxFD FDNext invalid.\n", dev->name); in tc35815_rx()
1601 lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead); in tc35815_rx()
1603 lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL); in tc35815_rx()
1605 lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD); in tc35815_rx()
1606 lp->rfd_cur++; in tc35815_rx()
1608 if (lp->rfd_cur > lp->rfd_limit) in tc35815_rx()
1609 lp->rfd_cur = lp->rfd_base; in tc35815_rx()
1611 if (lp->rfd_cur != next_rfd) in tc35815_rx()
1613 lp->rfd_cur, next_rfd); in tc35815_rx()
1623 struct net_device *dev = lp->dev; in tc35815_poll()
1625 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_poll()
1632 spin_lock(&lp->rx_lock); in tc35815_poll()
1633 status = tc_readl(&tr->Int_Src); in tc35815_poll()
1637 &tr->Int_Src); /* write to clear */ in tc35815_poll()
1639 handled = tc35815_do_interrupt(dev, status, budget - received); in tc35815_poll()
1642 &tr->Int_Src); in tc35815_poll()
1648 status = tc_readl(&tr->Int_Src); in tc35815_poll()
1650 spin_unlock(&lp->rx_lock); in tc35815_poll()
1655 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); in tc35815_poll()
1670 dev->stats.collisions += 16; in tc35815_check_tx_stat()
1672 dev->stats.collisions += status & Tx_TxColl_MASK; in tc35815_check_tx_stat()
1675 if (lp->chiptype == TC35815_TX4939) in tc35815_check_tx_stat()
1678 if (!lp->link || lp->duplex == DUPLEX_FULL) in tc35815_check_tx_stat()
1683 dev->stats.tx_packets++; in tc35815_check_tx_stat()
1687 dev->stats.tx_errors++; in tc35815_check_tx_stat()
1689 dev->stats.tx_aborted_errors++; in tc35815_check_tx_stat()
1693 dev->stats.tx_fifo_errors++; in tc35815_check_tx_stat()
1695 if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) { in tc35815_check_tx_stat()
1696 lp->lstats.tx_underrun++; in tc35815_check_tx_stat()
1697 if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) { in tc35815_check_tx_stat()
1699 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_check_tx_stat()
1700 tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh); in tc35815_check_tx_stat()
1706 dev->stats.tx_fifo_errors++; in tc35815_check_tx_stat()
1710 dev->stats.tx_carrier_errors++; in tc35815_check_tx_stat()
1714 dev->stats.tx_aborted_errors++; in tc35815_check_tx_stat()
1718 dev->stats.tx_fifo_errors++; in tc35815_check_tx_stat()
1722 dev->stats.tx_heartbeat_errors++; in tc35815_check_tx_stat()
1726 printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status); in tc35815_check_tx_stat()
1739 txfd = &lp->tfd_base[lp->tfd_end]; in tc35815_txdone()
1740 while (lp->tfd_start != lp->tfd_end && in tc35815_txdone()
1741 !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) { in tc35815_txdone()
1742 int status = le32_to_cpu(txfd->fd.FDStat); in tc35815_txdone()
1744 unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext); in tc35815_txdone()
1745 u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem); in tc35815_txdone()
1748 printk("%s: complete TxFD.\n", dev->name); in tc35815_txdone()
1754 lp->tx_skbs[fdsystem].skb : NULL; in tc35815_txdone()
1756 if (lp->tx_skbs[lp->tfd_end].skb != skb) { in tc35815_txdone()
1757 printk("%s: tx_skbs mismatch.\n", dev->name); in tc35815_txdone()
1761 BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb); in tc35815_txdone()
1764 dev->stats.tx_bytes += skb->len; in tc35815_txdone()
1765 dma_unmap_single(&lp->pci_dev->dev, in tc35815_txdone()
1766 lp->tx_skbs[lp->tfd_end].skb_dma, in tc35815_txdone()
1767 skb->len, DMA_TO_DEVICE); in tc35815_txdone()
1768 lp->tx_skbs[lp->tfd_end].skb = NULL; in tc35815_txdone()
1769 lp->tx_skbs[lp->tfd_end].skb_dma = 0; in tc35815_txdone()
1772 txfd->fd.FDSystem = cpu_to_le32(0xffffffff); in tc35815_txdone()
1774 lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM; in tc35815_txdone()
1775 txfd = &lp->tfd_base[lp->tfd_end]; in tc35815_txdone()
1778 printk("%s: TxFD FDNext invalid.\n", dev->name); in tc35815_txdone()
1784 if (lp->tfd_end != lp->tfd_start) { in tc35815_txdone()
1786 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_txdone()
1787 int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; in tc35815_txdone()
1788 struct TxFD *txhead = &lp->tfd_base[head]; in tc35815_txdone()
1789 int qlen = (lp->tfd_start + TX_FD_NUM in tc35815_txdone()
1790 - lp->tfd_end) % TX_FD_NUM; in tc35815_txdone()
1793 if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) { in tc35815_txdone()
1794 printk("%s: TxFD FDCtl invalid.\n", dev->name); in tc35815_txdone()
1799 if (lp->lstats.max_tx_qlen < qlen) in tc35815_txdone()
1800 lp->lstats.max_tx_qlen = qlen; in tc35815_txdone()
1804 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL); in tc35815_txdone()
1805 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); in tc35815_txdone()
1808 dev->name); in tc35815_txdone()
1811 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); in tc35815_txdone()
1832 napi_disable(&lp->napi); in tc35815_close()
1833 if (dev->phydev) in tc35815_close()
1834 phy_stop(dev->phydev); in tc35815_close()
1835 cancel_work_sync(&lp->restart_work); in tc35815_close()
1839 free_irq(dev->irq, dev); in tc35815_close()
1854 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_get_stats()
1857 dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt); in tc35815_get_stats()
1859 return &dev->stats; in tc35815_get_stats()
1866 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_set_cam_entry()
1871 saved_addr = tc_readl(&tr->CAM_Adr); in tc35815_set_cam_entry()
1875 dev->name, index, addr); in tc35815_set_cam_entry()
1878 tc_writel(cam_index - 2, &tr->CAM_Adr); in tc35815_set_cam_entry()
1879 cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000; in tc35815_set_cam_entry()
1881 tc_writel(cam_data, &tr->CAM_Data); in tc35815_set_cam_entry()
1883 tc_writel(cam_index + 2, &tr->CAM_Adr); in tc35815_set_cam_entry()
1885 tc_writel(cam_data, &tr->CAM_Data); in tc35815_set_cam_entry()
1888 tc_writel(cam_index, &tr->CAM_Adr); in tc35815_set_cam_entry()
1890 tc_writel(cam_data, &tr->CAM_Data); in tc35815_set_cam_entry()
1892 tc_writel(cam_index + 4, &tr->CAM_Adr); in tc35815_set_cam_entry()
1893 cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff; in tc35815_set_cam_entry()
1895 tc_writel(cam_data, &tr->CAM_Data); in tc35815_set_cam_entry()
1898 tc_writel(saved_addr, &tr->CAM_Adr); in tc35815_set_cam_entry()
1904 * num_addrs == -1 Promiscuous mode, receive all packets
1907 * and do best-effort filtering.
1913 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_set_multicast_list()
1915 if (dev->flags & IFF_PROMISC) { in tc35815_set_multicast_list()
1920 if (!lp->link) in tc35815_set_multicast_list()
1923 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); in tc35815_set_multicast_list()
1924 } else if ((dev->flags & IFF_ALLMULTI) || in tc35815_set_multicast_list()
1925 netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) { in tc35815_set_multicast_list()
1928 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); in tc35815_set_multicast_list()
1934 tc_writel(0, &tr->CAM_Ctl); in tc35815_set_multicast_list()
1939 tc35815_set_cam_entry(dev, i + 2, ha->addr); in tc35815_set_multicast_list()
1943 tc_writel(ena_bits, &tr->CAM_Ena); in tc35815_set_multicast_list()
1944 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); in tc35815_set_multicast_list()
1946 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); in tc35815_set_multicast_list()
1947 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); in tc35815_set_multicast_list()
1955 strlcpy(info->driver, MODNAME, sizeof(info->driver)); in tc35815_get_drvinfo()
1956 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); in tc35815_get_drvinfo()
1957 strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info)); in tc35815_get_drvinfo()
1963 return lp->msg_enable; in tc35815_get_msglevel()
1969 lp->msg_enable = datum; in tc35815_set_msglevel()
1978 return sizeof(lp->lstats) / sizeof(int); in tc35815_get_sset_count()
1980 return -EOPNOTSUPP; in tc35815_get_sset_count()
1987 data[0] = lp->lstats.max_tx_qlen; in tc35815_get_ethtool_stats()
1988 data[1] = lp->lstats.tx_ints; in tc35815_get_ethtool_stats()
1989 data[2] = lp->lstats.rx_ints; in tc35815_get_ethtool_stats()
1990 data[3] = lp->lstats.tx_underrun; in tc35815_get_ethtool_stats()
2022 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_chip_reset()
2025 tc_writel(MAC_Reset, &tr->MAC_Ctl); in tc35815_chip_reset()
2028 while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) { in tc35815_chip_reset()
2030 printk(KERN_ERR "%s: MAC reset failed.\n", dev->name); in tc35815_chip_reset()
2035 tc_writel(0, &tr->MAC_Ctl); in tc35815_chip_reset()
2038 tc_writel(0, &tr->DMA_Ctl); in tc35815_chip_reset()
2039 tc_writel(0, &tr->TxThrsh); in tc35815_chip_reset()
2040 tc_writel(0, &tr->TxPollCtr); in tc35815_chip_reset()
2041 tc_writel(0, &tr->RxFragSize); in tc35815_chip_reset()
2042 tc_writel(0, &tr->Int_En); in tc35815_chip_reset()
2043 tc_writel(0, &tr->FDA_Bas); in tc35815_chip_reset()
2044 tc_writel(0, &tr->FDA_Lim); in tc35815_chip_reset()
2045 tc_writel(0xffffffff, &tr->Int_Src); /* Write 1 to clear */ in tc35815_chip_reset()
2046 tc_writel(0, &tr->CAM_Ctl); in tc35815_chip_reset()
2047 tc_writel(0, &tr->Tx_Ctl); in tc35815_chip_reset()
2048 tc_writel(0, &tr->Rx_Ctl); in tc35815_chip_reset()
2049 tc_writel(0, &tr->CAM_Ena); in tc35815_chip_reset()
2050 (void)tc_readl(&tr->Miss_Cnt); /* Read to clear */ in tc35815_chip_reset()
2053 tc_writel(DMA_TestMode, &tr->DMA_Ctl); in tc35815_chip_reset()
2055 tc_writel(i, &tr->CAM_Adr); in tc35815_chip_reset()
2056 tc_writel(0, &tr->CAM_Data); in tc35815_chip_reset()
2058 tc_writel(0, &tr->DMA_Ctl); in tc35815_chip_reset()
2065 (struct tc35815_regs __iomem *)dev->base_addr; in tc35815_chip_init()
2069 tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr); in tc35815_chip_init()
2072 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); in tc35815_chip_init()
2073 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); in tc35815_chip_init()
2075 /* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */ in tc35815_chip_init()
2077 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); in tc35815_chip_init()
2079 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); in tc35815_chip_init()
2080 tc_writel(0, &tr->TxPollCtr); /* Batch mode */ in tc35815_chip_init()
2081 tc_writel(TX_THRESHOLD, &tr->TxThrsh); in tc35815_chip_init()
2082 tc_writel(INT_EN_CMD, &tr->Int_En); in tc35815_chip_init()
2085 tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas); in tc35815_chip_init()
2086 tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base, in tc35815_chip_init()
2087 &tr->FDA_Lim); in tc35815_chip_init()
2093 tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */ in tc35815_chip_init()
2094 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ in tc35815_chip_init()
2098 if (lp->chiptype == TC35815_TX4939) in tc35815_chip_init()
2101 if (!dev->phydev || !lp->link || lp->duplex == DUPLEX_FULL) in tc35815_chip_init()
2103 tc_writel(txctl, &tr->Tx_Ctl); in tc35815_chip_init()
2117 if (dev->phydev) in tc35815_suspend()
2118 phy_stop(dev->phydev); in tc35815_suspend()
2119 spin_lock_irqsave(&lp->lock, flags); in tc35815_suspend()
2121 spin_unlock_irqrestore(&lp->lock, flags); in tc35815_suspend()
2136 if (dev->phydev) in tc35815_resume()
2137 phy_start(dev->phydev); in tc35815_resume()