Lines Matching defs:lp

335 #define HAVE_DMA_RXALIGN(lp)	likely((lp)->chiptype != TC35815CF)
439 static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt)
441 return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf);
444 static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
446 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
540 struct tc35815_local *lp = netdev_priv(dev);
545 spin_lock_irqsave(&lp->lock, flags);
547 (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) {
573 lp->chiptype != TC35815_TX4939)
577 lp->speed = phydev->speed;
578 lp->duplex = phydev->duplex;
582 if (phydev->link != lp->link) {
588 lp->speed = 0;
589 lp->duplex = -1;
591 lp->link = phydev->link;
595 spin_unlock_irqrestore(&lp->lock, flags);
597 if (status_change && netif_msg_link(lp)) {
610 struct tc35815_local *lp = netdev_priv(dev);
613 phydev = phy_find_first(lp->mii_bus);
622 lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
649 lp->link = 0;
650 lp->speed = 0;
651 lp->duplex = -1;
658 struct tc35815_local *lp = netdev_priv(dev);
661 lp->mii_bus = mdiobus_alloc();
662 if (lp->mii_bus == NULL) {
667 lp->mii_bus->name = "tc35815_mii_bus";
668 lp->mii_bus->read = tc_mdio_read;
669 lp->mii_bus->write = tc_mdio_write;
670 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(lp->pci_dev));
671 lp->mii_bus->priv = dev;
672 lp->mii_bus->parent = &lp->pci_dev->dev;
673 err = mdiobus_register(lp->mii_bus);
682 mdiobus_unregister(lp->mii_bus);
684 mdiobus_free(lp->mii_bus);
705 struct tc35815_local *lp = netdev_priv(dev);
707 lp->pci_dev, tc35815_mac_match);
767 struct tc35815_local *lp;
784 dev = alloc_etherdev(sizeof(*lp));
789 lp = netdev_priv(dev);
790 lp->dev = dev;
806 netif_napi_add_weight(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
811 INIT_WORK(&lp->restart_work, tc35815_restart_work);
812 spin_lock_init(&lp->lock);
813 spin_lock_init(&lp->rx_lock);
814 lp->pci_dev = pdev;
815 lp->chiptype = ent->driver_data;
817 lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK;
857 struct tc35815_local *lp = netdev_priv(dev);
860 mdiobus_unregister(lp->mii_bus);
861 mdiobus_free(lp->mii_bus);
869 struct tc35815_local *lp = netdev_priv(dev);
873 if (!lp->fd_buf) {
880 lp->fd_buf = dma_alloc_coherent(&lp->pci_dev->dev,
882 &lp->fd_buf_dma, GFP_ATOMIC);
883 if (!lp->fd_buf)
886 lp->rx_skbs[i].skb =
887 alloc_rxbuf_skb(dev, lp->pci_dev,
888 &lp->rx_skbs[i].skb_dma);
889 if (!lp->rx_skbs[i].skb) {
891 free_rxbuf_skb(lp->pci_dev,
892 lp->rx_skbs[i].skb,
893 lp->rx_skbs[i].skb_dma);
894 lp->rx_skbs[i].skb = NULL;
896 dma_free_coherent(&lp->pci_dev->dev,
898 lp->fd_buf, lp->fd_buf_dma);
899 lp->fd_buf = NULL;
904 dev->name, lp->fd_buf);
908 clear_page((void *)((unsigned long)lp->fd_buf +
911 fd_addr = (unsigned long)lp->fd_buf;
914 lp->rfd_base = (struct RxFD *)fd_addr;
917 lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
918 lp->rfd_cur = lp->rfd_base;
919 lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
922 lp->tfd_base = (struct TxFD *)fd_addr;
925 lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1]));
926 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
927 lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0);
929 lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0]));
930 lp->tfd_start = 0;
931 lp->tfd_end = 0;
934 lp->fbl_ptr = (struct FrFD *)fd_addr;
935 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
936 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
942 lp->fbl_count = 0;
944 if (lp->rx_skbs[i].skb) {
945 if (i != lp->fbl_count) {
946 lp->rx_skbs[lp->fbl_count].skb =
947 lp->rx_skbs[i].skb;
948 lp->rx_skbs[lp->fbl_count].skb_dma =
949 lp->rx_skbs[i].skb_dma;
951 lp->fbl_count++;
955 if (i >= lp->fbl_count) {
956 lp->fbl_ptr->bd[i].BuffData = 0;
957 lp->fbl_ptr->bd[i].BDCtl = 0;
960 lp->fbl_ptr->bd[i].BuffData =
961 cpu_to_le32(lp->rx_skbs[i].skb_dma);
963 lp->fbl_ptr->bd[i].BDCtl =
969 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
976 struct tc35815_local *lp = netdev_priv(dev);
980 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
983 lp->tx_skbs[fdsystem].skb : NULL;
985 if (lp->tx_skbs[i].skb != skb) {
990 BUG_ON(lp->tx_skbs[i].skb != skb);
993 dma_unmap_single(&lp->pci_dev->dev,
994 lp->tx_skbs[i].skb_dma, skb->len,
996 lp->tx_skbs[i].skb = NULL;
997 lp->tx_skbs[i].skb_dma = 0;
1000 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1009 struct tc35815_local *lp = netdev_priv(dev);
1012 if (lp->tfd_base) {
1014 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
1017 lp->tx_skbs[fdsystem].skb : NULL;
1019 if (lp->tx_skbs[i].skb != skb) {
1024 BUG_ON(lp->tx_skbs[i].skb != skb);
1027 dma_unmap_single(&lp->pci_dev->dev,
1028 lp->tx_skbs[i].skb_dma,
1031 lp->tx_skbs[i].skb = NULL;
1032 lp->tx_skbs[i].skb_dma = 0;
1034 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1038 lp->rfd_base = NULL;
1039 lp->rfd_limit = NULL;
1040 lp->rfd_cur = NULL;
1041 lp->fbl_ptr = NULL;
1044 if (lp->rx_skbs[i].skb) {
1045 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
1046 lp->rx_skbs[i].skb_dma);
1047 lp->rx_skbs[i].skb = NULL;
1050 if (lp->fd_buf) {
1051 dma_free_coherent(&lp->pci_dev->dev, PAGE_SIZE * FD_PAGE_NUM,
1052 lp->fd_buf, lp->fd_buf_dma);
1053 lp->fd_buf = NULL;
1115 struct tc35815_local *lp = netdev_priv(dev);
1119 lp->tfd_base, lp->tfd_start, lp->tfd_end);
1121 lp->rfd_base, lp->rfd_limit, lp->rfd_cur);
1122 printk("FrFD %p\n", lp->fbl_ptr);
1124 dump_txfd(&lp->tfd_base[i]);
1126 int bd_count = dump_rxfd(&lp->rfd_base[i]);
1129 dump_frfd(lp->fbl_ptr);
1143 struct tc35815_local *lp = netdev_priv(dev);
1144 return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end;
1149 struct tc35815_local *lp = netdev_priv(dev);
1158 spin_lock_bh(&lp->rx_lock);
1159 spin_lock_irq(&lp->lock);
1165 spin_unlock_irq(&lp->lock);
1166 spin_unlock_bh(&lp->rx_lock);
1173 struct tc35815_local *lp =
1175 struct net_device *dev = lp->dev;
1182 struct tc35815_local *lp = netdev_priv(dev);
1188 spin_lock_irqsave(&lp->lock, flags);
1191 schedule_work(&lp->restart_work);
1192 spin_unlock_irqrestore(&lp->lock, flags);
1219 struct tc35815_local *lp = netdev_priv(dev);
1236 napi_enable(&lp->napi);
1239 spin_lock_irq(&lp->lock);
1241 spin_unlock_irq(&lp->lock);
1263 struct tc35815_local *lp = netdev_priv(dev);
1281 spin_lock_irqsave(&lp->lock, flags);
1284 if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM >
1288 if (netif_msg_pktdata(lp))
1291 if (lp->tx_skbs[lp->tfd_start].skb) {
1296 BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
1298 lp->tx_skbs[lp->tfd_start].skb = skb;
1299 lp->tx_skbs[lp->tfd_start].skb_dma = dma_map_single(&lp->pci_dev->dev,
1305 txfd = &lp->tfd_base[lp->tfd_start];
1306 txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma);
1308 txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start);
1311 if (lp->tfd_start == lp->tfd_end) {
1317 if (netif_msg_tx_queued(lp)) {
1321 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1324 if (netif_msg_tx_queued(lp)) {
1329 lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
1336 if (netif_msg_tx_queued(lp))
1345 spin_unlock_irqrestore(&lp->lock, flags);
1372 struct tc35815_local *lp = netdev_priv(dev);
1382 if (netif_msg_rx_err(lp))
1390 if (netif_msg_rx_err(lp))
1398 if (netif_msg_rx_err(lp))
1410 lp->lstats.rx_ints++;
1414 lp->lstats.tx_ints++;
1415 spin_lock_irq(&lp->lock);
1417 spin_unlock_irq(&lp->lock);
1431 struct tc35815_local *lp = netdev_priv(dev);
1437 if (napi_schedule_prep(&lp->napi)) {
1440 __napi_schedule(&lp->napi);
1461 struct tc35815_local *lp = netdev_priv(dev);
1466 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
1467 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
1477 if (netif_msg_rx_status(lp))
1478 dump_rxfd(lp->rfd_cur);
1487 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
1494 BUG_ON(lp->rx_skbs[cur_bd].skb_dma !=
1495 (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3));
1496 if (!lp->rx_skbs[cur_bd].skb) {
1503 skb = lp->rx_skbs[cur_bd].skb;
1505 lp->rx_skbs[cur_bd].skb = NULL;
1506 dma_unmap_single(&lp->pci_dev->dev,
1507 lp->rx_skbs[cur_bd].skb_dma,
1509 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
1513 if (netif_msg_pktdata(lp))
1522 if (netif_msg_rx_err(lp))
1542 int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl);
1554 lp->fbl_count--;
1555 while (lp->fbl_count < RX_BUF_NUM)
1558 (id + 1 + lp->fbl_count) % RX_BUF_NUM;
1559 struct BDesc *bd = &lp->fbl_ptr->bd[curid];
1569 if (!lp->rx_skbs[curid].skb) {
1570 lp->rx_skbs[curid].skb =
1572 lp->pci_dev,
1573 &lp->rx_skbs[curid].skb_dma);
1574 if (!lp->rx_skbs[curid].skb)
1576 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
1582 lp->fbl_count++;
1588 next_rfd = fd_bus_to_virt(lp,
1589 le32_to_cpu(lp->rfd_cur->fd.FDNext));
1590 if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) {
1598 lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead);
1600 lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL);
1602 lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD);
1603 lp->rfd_cur++;
1605 if (lp->rfd_cur > lp->rfd_limit)
1606 lp->rfd_cur = lp->rfd_base;
1608 if (lp->rfd_cur != next_rfd)
1610 lp->rfd_cur, next_rfd);
1619 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
1620 struct net_device *dev = lp->dev;
1629 spin_lock(&lp->rx_lock);
1647 spin_unlock(&lp->rx_lock);
1662 struct tc35815_local *lp = netdev_priv(dev);
1672 if (lp->chiptype == TC35815_TX4939)
1675 if (!lp->link || lp->duplex == DUPLEX_FULL)
1692 if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) {
1693 lp->lstats.tx_underrun++;
1694 if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) {
1722 if (msg && netif_msg_tx_err(lp))
1732 struct tc35815_local *lp = netdev_priv(dev);
1736 txfd = &lp->tfd_base[lp->tfd_end];
1737 while (lp->tfd_start != lp->tfd_end &&
1744 if (netif_msg_tx_done(lp)) {
1751 lp->tx_skbs[fdsystem].skb : NULL;
1753 if (lp->tx_skbs[lp->tfd_end].skb != skb) {
1758 BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
1762 dma_unmap_single(&lp->pci_dev->dev,
1763 lp->tx_skbs[lp->tfd_end].skb_dma,
1765 lp->tx_skbs[lp->tfd_end].skb = NULL;
1766 lp->tx_skbs[lp->tfd_end].skb_dma = 0;
1771 lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM;
1772 txfd = &lp->tfd_base[lp->tfd_end];
1774 if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) {
1781 if (lp->tfd_end != lp->tfd_start) {
1784 int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
1785 struct TxFD *txhead = &lp->tfd_base[head];
1786 int qlen = (lp->tfd_start + TX_FD_NUM
1787 - lp->tfd_end) % TX_FD_NUM;
1796 if (lp->lstats.max_tx_qlen < qlen)
1797 lp->lstats.max_tx_qlen = qlen;
1803 if (netif_msg_tx_queued(lp)) {
1808 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1826 struct tc35815_local *lp = netdev_priv(dev);
1829 napi_disable(&lp->napi);
1832 cancel_work_sync(&lp->restart_work);
1862 struct tc35815_local *lp = netdev_priv(dev);
1871 if (netif_msg_hw(lp))
1917 struct tc35815_local *lp = netdev_priv(dev);
1919 if (!lp->link)
1952 struct tc35815_local *lp = netdev_priv(dev);
1956 strscpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
1961 struct tc35815_local *lp = netdev_priv(dev);
1962 return lp->msg_enable;
1967 struct tc35815_local *lp = netdev_priv(dev);
1968 lp->msg_enable = datum;
1973 struct tc35815_local *lp = netdev_priv(dev);
1977 return sizeof(lp->lstats) / sizeof(int);
1985 struct tc35815_local *lp = netdev_priv(dev);
1986 data[0] = lp->lstats.max_tx_qlen;
1987 data[1] = lp->lstats.tx_ints;
1988 data[2] = lp->lstats.rx_ints;
1989 data[3] = lp->lstats.tx_underrun;
2062 struct tc35815_local *lp = netdev_priv(dev);
2075 if (HAVE_DMA_RXALIGN(lp))
2084 tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas);
2085 tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base,
2092 tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */
2097 if (lp->chiptype == TC35815_TX4939)
2100 if (!dev->phydev || !lp->link || lp->duplex == DUPLEX_FULL)
2109 struct tc35815_local *lp = netdev_priv(dev);
2118 spin_lock_irqsave(&lp->lock, flags);
2120 spin_unlock_irqrestore(&lp->lock, flags);