Lines Matching refs:lp

378 static dma_addr_t korina_tx_dma(struct korina_private *lp, int idx)
380 return lp->td_dma + (idx * sizeof(struct dma_desc));
383 static dma_addr_t korina_rx_dma(struct korina_private *lp, int idx)
385 return lp->rd_dma + (idx * sizeof(struct dma_desc));
406 struct korina_private *lp = netdev_priv(dev);
408 korina_abort_dma(dev, lp->tx_dma_regs);
413 struct korina_private *lp = netdev_priv(dev);
415 korina_abort_dma(dev, lp->rx_dma_regs);
422 struct korina_private *lp = netdev_priv(dev);
430 spin_lock_irqsave(&lp->lock, flags);
432 idx = lp->tx_chain_tail;
433 td = &lp->td_ring[idx];
436 if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
437 lp->tx_full = 1;
439 if (lp->tx_count == (KORINA_NUM_TDS - 2))
445 lp->tx_count++;
447 lp->tx_skb[idx] = skb;
452 ca = dma_map_single(lp->dmadev, skb->data, length, DMA_TO_DEVICE);
453 if (dma_mapping_error(lp->dmadev, ca))
456 lp->tx_skb_dma[idx] = ca;
462 if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
463 if (lp->tx_chain_status == desc_is_empty) {
468 lp->tx_chain_tail = chain_next;
470 writel(korina_tx_dma(lp, lp->tx_chain_head),
471 &lp->tx_dma_regs->dmandptr);
473 lp->tx_chain_head = lp->tx_chain_tail;
479 lp->td_ring[chain_prev].control &=
482 lp->td_ring[chain_prev].link = korina_tx_dma(lp, idx);
484 lp->tx_chain_tail = chain_next;
486 writel(korina_tx_dma(lp, lp->tx_chain_head),
487 &lp->tx_dma_regs->dmandptr);
489 lp->tx_chain_head = lp->tx_chain_tail;
490 lp->tx_chain_status = desc_is_empty;
493 if (lp->tx_chain_status == desc_is_empty) {
498 lp->tx_chain_tail = chain_next;
499 lp->tx_chain_status = desc_filled;
504 lp->td_ring[chain_prev].control &=
506 lp->td_ring[chain_prev].link = korina_tx_dma(lp, idx);
507 lp->tx_chain_tail = chain_next;
512 spin_unlock_irqrestore(&lp->lock, flags);
519 spin_unlock_irqrestore(&lp->lock, flags);
524 static int korina_mdio_wait(struct korina_private *lp)
528 return readl_poll_timeout_atomic(&lp->eth_regs->miimind,
535 struct korina_private *lp = netdev_priv(dev);
538 ret = korina_mdio_wait(lp);
542 writel(phy << 8 | reg, &lp->eth_regs->miimaddr);
543 writel(1, &lp->eth_regs->miimcmd);
545 ret = korina_mdio_wait(lp);
549 if (readl(&lp->eth_regs->miimind) & ETH_MII_IND_NV)
552 ret = readl(&lp->eth_regs->miimrdd);
553 writel(0, &lp->eth_regs->miimcmd);
559 struct korina_private *lp = netdev_priv(dev);
561 if (korina_mdio_wait(lp))
564 writel(0, &lp->eth_regs->miimcmd);
565 writel(phy << 8 | reg, &lp->eth_regs->miimaddr);
566 writel(val, &lp->eth_regs->miimwtd);
573 struct korina_private *lp = netdev_priv(dev);
577 dmas = readl(&lp->rx_dma_regs->dmas);
579 dmasm = readl(&lp->rx_dma_regs->dmasm);
582 &lp->rx_dma_regs->dmasm);
584 napi_schedule(&lp->napi);
598 struct korina_private *lp = netdev_priv(dev);
599 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
606 skb = lp->rx_skb[lp->rx_next_done];
643 ca = dma_map_single(lp->dmadev, skb_new->data, KORINA_RBSIZE,
645 if (dma_mapping_error(lp->dmadev, ca)) {
651 dma_unmap_single(lp->dmadev, lp->rx_skb_dma[lp->rx_next_done],
659 napi_gro_receive(&lp->napi, skb);
667 lp->rx_skb[lp->rx_next_done] = skb_new;
668 lp->rx_skb_dma[lp->rx_next_done] = ca;
674 rd->ca = lp->rx_skb_dma[lp->rx_next_done];
678 lp->rd_ring[(lp->rx_next_done - 1) &
682 lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
683 rd = &lp->rd_ring[lp->rx_next_done];
684 writel((u32)~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
687 dmas = readl(&lp->rx_dma_regs->dmas);
691 &lp->rx_dma_regs->dmas);
693 lp->dma_halt_cnt++;
695 rd->ca = lp->rx_skb_dma[lp->rx_next_done];
696 writel(korina_rx_dma(lp, rd - lp->rd_ring),
697 &lp->rx_dma_regs->dmandptr);
705 struct korina_private *lp =
707 struct net_device *dev = lp->dev;
714 writel(readl(&lp->rx_dma_regs->dmasm) &
716 &lp->rx_dma_regs->dmasm);
726 struct korina_private *lp = netdev_priv(dev);
754 &lp->eth_regs->ethhash0);
756 &lp->eth_regs->ethhash1);
759 spin_lock_irqsave(&lp->lock, flags);
760 writel(recognise, &lp->eth_regs->etharc);
761 spin_unlock_irqrestore(&lp->lock, flags);
766 struct korina_private *lp = netdev_priv(dev);
767 struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
771 spin_lock(&lp->lock);
775 if (lp->tx_full == 1) {
777 lp->tx_full = 0;
780 devcs = lp->td_ring[lp->tx_next_done].devcs;
792 lp->tx_skb[lp->tx_next_done]->len;
819 if (lp->tx_skb[lp->tx_next_done]) {
820 dma_unmap_single(lp->dmadev,
821 lp->tx_skb_dma[lp->tx_next_done],
822 lp->tx_skb[lp->tx_next_done]->len,
824 dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
825 lp->tx_skb[lp->tx_next_done] = NULL;
828 lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
829 lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
830 lp->td_ring[lp->tx_next_done].link = 0;
831 lp->td_ring[lp->tx_next_done].ca = 0;
832 lp->tx_count--;
835 lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
836 td = &lp->td_ring[lp->tx_next_done];
841 dmas = readl(&lp->tx_dma_regs->dmas);
842 writel(~dmas, &lp->tx_dma_regs->dmas);
844 writel(readl(&lp->tx_dma_regs->dmasm) &
846 &lp->tx_dma_regs->dmasm);
848 spin_unlock(&lp->lock);
855 struct korina_private *lp = netdev_priv(dev);
859 dmas = readl(&lp->tx_dma_regs->dmas);
862 dmasm = readl(&lp->tx_dma_regs->dmasm);
864 &lp->tx_dma_regs->dmasm);
868 if (lp->tx_chain_status == desc_filled &&
869 (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
870 writel(korina_tx_dma(lp, lp->tx_chain_head),
871 &lp->tx_dma_regs->dmandptr);
872 lp->tx_chain_status = desc_is_empty;
873 lp->tx_chain_head = lp->tx_chain_tail;
889 struct korina_private *lp = netdev_priv(dev);
891 mii_check_media(&lp->mii_if, 1, init_media);
893 if (lp->mii_if.full_duplex)
894 writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
895 &lp->eth_regs->ethmac2);
897 writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
898 &lp->eth_regs->ethmac2);
903 struct korina_private *lp = timer_container_of(lp, t,
905 struct net_device *dev = lp->dev;
908 mod_timer(&lp->media_check_timer, jiffies + HZ);
923 struct korina_private *lp = netdev_priv(dev);
929 spin_lock_irq(&lp->lock);
930 rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
931 spin_unlock_irq(&lp->lock);
932 korina_set_carrier(&lp->mii_if);
941 struct korina_private *lp = netdev_priv(dev);
945 strscpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
951 struct korina_private *lp = netdev_priv(dev);
953 spin_lock_irq(&lp->lock);
954 mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
955 spin_unlock_irq(&lp->lock);
963 struct korina_private *lp = netdev_priv(dev);
966 spin_lock_irq(&lp->lock);
967 rc = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
968 spin_unlock_irq(&lp->lock);
969 korina_set_carrier(&lp->mii_if);
976 struct korina_private *lp = netdev_priv(dev);
978 return mii_link_ok(&lp->mii_if);
990 struct korina_private *lp = netdev_priv(dev);
997 lp->td_ring[i].control = DMA_DESC_IOF;
998 lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
999 lp->td_ring[i].ca = 0;
1000 lp->td_ring[i].link = 0;
1002 lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
1003 lp->tx_full = lp->tx_count = 0;
1004 lp->tx_chain_status = desc_is_empty;
1011 lp->rx_skb[i] = skb;
1012 lp->rd_ring[i].control = DMA_DESC_IOD |
1014 lp->rd_ring[i].devcs = 0;
1015 ca = dma_map_single(lp->dmadev, skb->data, KORINA_RBSIZE,
1017 if (dma_mapping_error(lp->dmadev, ca))
1019 lp->rd_ring[i].ca = ca;
1020 lp->rx_skb_dma[i] = ca;
1021 lp->rd_ring[i].link = korina_rx_dma(lp, i + 1);
1026 lp->rd_ring[i - 1].link = lp->rd_dma;
1027 lp->rd_ring[i - 1].control |= DMA_DESC_COD;
1029 lp->rx_next_done = 0;
1030 lp->rx_chain_head = 0;
1031 lp->rx_chain_tail = 0;
1032 lp->rx_chain_status = desc_is_empty;
1039 struct korina_private *lp = netdev_priv(dev);
1043 lp->rd_ring[i].control = 0;
1044 if (lp->rx_skb[i]) {
1045 dma_unmap_single(lp->dmadev, lp->rx_skb_dma[i],
1047 dev_kfree_skb_any(lp->rx_skb[i]);
1048 lp->rx_skb[i] = NULL;
1053 lp->td_ring[i].control = 0;
1054 if (lp->tx_skb[i]) {
1055 dma_unmap_single(lp->dmadev, lp->tx_skb_dma[i],
1056 lp->tx_skb[i]->len, DMA_TO_DEVICE);
1057 dev_kfree_skb_any(lp->tx_skb[i]);
1058 lp->tx_skb[i] = NULL;
1068 struct korina_private *lp = netdev_priv(dev);
1075 writel(0, &lp->eth_regs->ethintfc);
1076 while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
1080 writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
1089 writel(0, &lp->rx_dma_regs->dmas);
1091 writel(0, &lp->rx_dma_regs->dmandptr);
1092 writel(korina_rx_dma(lp, 0), &lp->rx_dma_regs->dmadptr);
1094 writel(readl(&lp->tx_dma_regs->dmasm) &
1096 &lp->tx_dma_regs->dmasm);
1097 writel(readl(&lp->rx_dma_regs->dmasm) &
1099 &lp->rx_dma_regs->dmasm);
1102 writel(ETH_ARC_AB, &lp->eth_regs->etharc);
1105 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
1106 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
1108 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
1109 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
1111 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
1112 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
1114 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
1115 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
1120 &lp->eth_regs->ethmac2);
1123 writel(0x15, &lp->eth_regs->ethipgt);
1125 writel(0x12, &lp->eth_regs->ethipgr);
1129 writel(((lp->mii_clock_freq) / MII_CLOCK + 1) & ~1,
1130 &lp->eth_regs->ethmcp);
1131 writel(0, &lp->eth_regs->miimcfg);
1134 writel(48, &lp->eth_regs->ethfifott);
1136 writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
1140 napi_enable(&lp->napi);
1151 struct korina_private *lp = container_of(work,
1153 struct net_device *dev = lp->dev;
1158 disable_irq(lp->rx_irq);
1159 disable_irq(lp->tx_irq);
1161 writel(readl(&lp->tx_dma_regs->dmasm) |
1163 &lp->tx_dma_regs->dmasm);
1164 writel(readl(&lp->rx_dma_regs->dmasm) |
1166 &lp->rx_dma_regs->dmasm);
1168 napi_disable(&lp->napi);
1178 enable_irq(lp->tx_irq);
1179 enable_irq(lp->rx_irq);
1184 struct korina_private *lp = netdev_priv(dev);
1186 schedule_work(&lp->restart_task);
1200 struct korina_private *lp = netdev_priv(dev);
1212 ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
1216 dev->name, lp->rx_irq);
1219 ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
1223 dev->name, lp->tx_irq);
1227 mod_timer(&lp->media_check_timer, jiffies + 1);
1232 free_irq(lp->rx_irq, dev);
1240 struct korina_private *lp = netdev_priv(dev);
1243 timer_delete(&lp->media_check_timer);
1246 disable_irq(lp->rx_irq);
1247 disable_irq(lp->tx_irq);
1250 tmp = readl(&lp->tx_dma_regs->dmasm);
1252 writel(tmp, &lp->tx_dma_regs->dmasm);
1255 tmp = readl(&lp->rx_dma_regs->dmasm);
1257 writel(tmp, &lp->rx_dma_regs->dmasm);
1259 napi_disable(&lp->napi);
1261 cancel_work_sync(&lp->restart_task);
1265 free_irq(lp->rx_irq, dev);
1266 free_irq(lp->tx_irq, dev);
1288 struct korina_private *lp;
1299 lp = netdev_priv(dev);
1310 lp->mii_clock_freq = clk_get_rate(clk);
1312 lp->mii_clock_freq = 200000000; /* max possible input clk */
1315 lp->rx_irq = platform_get_irq_byname(pdev, "rx");
1316 lp->tx_irq = platform_get_irq_byname(pdev, "tx");
1323 lp->eth_regs = p;
1330 lp->rx_dma_regs = p;
1337 lp->tx_dma_regs = p;
1339 lp->td_ring = dmam_alloc_coherent(&pdev->dev, TD_RING_SIZE,
1340 &lp->td_dma, GFP_KERNEL);
1341 if (!lp->td_ring)
1344 lp->rd_ring = dmam_alloc_coherent(&pdev->dev, RD_RING_SIZE,
1345 &lp->rd_dma, GFP_KERNEL);
1346 if (!lp->rd_ring)
1349 spin_lock_init(&lp->lock);
1351 dev->irq = lp->rx_irq;
1352 lp->dev = dev;
1353 lp->dmadev = &pdev->dev;
1358 netif_napi_add(dev, &lp->napi, korina_poll);
1360 lp->mii_if.dev = dev;
1361 lp->mii_if.mdio_read = korina_mdio_read;
1362 lp->mii_if.mdio_write = korina_mdio_write;
1363 lp->mii_if.phy_id = 1;
1364 lp->mii_if.phy_id_mask = 0x1f;
1365 lp->mii_if.reg_num_mask = 0x1f;
1375 timer_setup(&lp->media_check_timer, korina_poll_media, 0);
1377 INIT_WORK(&lp->restart_task, korina_restart_task);