Lines Matching defs:lp
69 static u32 _temac_ior_be(struct temac_local *lp, int offset)
71 return ioread32be(lp->regs + offset);
74 static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
76 return iowrite32be(value, lp->regs + offset);
79 static u32 _temac_ior_le(struct temac_local *lp, int offset)
81 return ioread32(lp->regs + offset);
84 static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
86 return iowrite32(value, lp->regs + offset);
89 static bool hard_acs_rdy(struct temac_local *lp)
91 return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
94 static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
98 return hard_acs_rdy(lp) || ktime_after(cur, timeout);
111 int temac_indirect_busywait(struct temac_local *lp)
115 spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
116 if (WARN_ON(!hard_acs_rdy(lp)))
124 * must be called without lp->indirect_lock being held.
126 u32 temac_indirect_in32(struct temac_local *lp, int reg)
131 spin_lock_irqsave(lp->indirect_lock, flags);
132 val = temac_indirect_in32_locked(lp, reg);
133 spin_unlock_irqrestore(lp->indirect_lock, flags);
139 * function must be called with lp->indirect_lock being held. Use
144 u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
150 if (WARN_ON(temac_indirect_busywait(lp)))
153 temac_iow(lp, XTE_CTL0_OFFSET, reg);
159 if (WARN_ON(temac_indirect_busywait(lp)))
162 return temac_ior(lp, XTE_LSW0_OFFSET);
167 * must be called without lp->indirect_lock being held.
169 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
173 spin_lock_irqsave(lp->indirect_lock, flags);
174 temac_indirect_out32_locked(lp, reg, value);
175 spin_unlock_irqrestore(lp->indirect_lock, flags);
180 * function must be called with lp->indirect_lock being held. Use
185 void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
191 if (WARN_ON(temac_indirect_busywait(lp)))
194 temac_iow(lp, XTE_LSW0_OFFSET, value);
195 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
200 WARN_ON(temac_indirect_busywait(lp));
207 * lp->dma_in32.
209 static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
211 return ioread32be(lp->sdma_regs + (reg << 2));
214 static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
216 return ioread32(lp->sdma_regs + (reg << 2));
223 * lp->dma_out32.
225 static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
227 iowrite32be(value, lp->sdma_regs + (reg << 2));
230 static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
232 iowrite32(value, lp->sdma_regs + (reg << 2));
244 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
246 return dcr_read(lp->sdma_dcrs, reg);
252 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
254 dcr_write(lp->sdma_dcrs, reg, value);
261 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
270 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
271 lp->dma_in = temac_dma_dcr_in;
272 lp->dma_out = temac_dma_dcr_out;
286 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
299 struct temac_local *lp = netdev_priv(ndev);
303 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
305 for (i = 0; i < lp->rx_bd_num; i++) {
306 if (!lp->rx_skb[i])
308 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
310 dev_kfree_skb(lp->rx_skb[i]);
312 if (lp->rx_bd_v)
314 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
315 lp->rx_bd_v, lp->rx_bd_p);
316 if (lp->tx_bd_v)
318 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
319 lp->tx_bd_v, lp->tx_bd_p);
327 struct temac_local *lp = netdev_priv(ndev);
332 lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
333 sizeof(*lp->rx_skb), GFP_KERNEL);
334 if (!lp->rx_skb)
339 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
340 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
341 &lp->tx_bd_p, GFP_KERNEL);
342 if (!lp->tx_bd_v)
345 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
346 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
347 &lp->rx_bd_p, GFP_KERNEL);
348 if (!lp->rx_bd_v)
351 for (i = 0; i < lp->tx_bd_num; i++) {
352 lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
353 + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
356 for (i = 0; i < lp->rx_bd_num; i++) {
357 lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
358 + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
366 lp->rx_skb[i] = skb;
373 lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
374 lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
375 lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
379 lp->dma_out(lp, TX_CHNL_CTRL,
380 lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
384 lp->dma_out(lp, RX_CHNL_CTRL,
385 lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
391 lp->tx_bd_ci = 0;
392 lp->tx_bd_tail = 0;
393 lp->rx_bd_ci = 0;
394 lp->rx_bd_tail = lp->rx_bd_num - 1;
398 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
399 lp->dma_out(lp, RX_TAILDESC_PTR,
400 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
403 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
418 struct temac_local *lp = netdev_priv(ndev);
422 spin_lock_irqsave(lp->indirect_lock, flags);
423 temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
431 temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
434 spin_unlock_irqrestore(lp->indirect_lock, flags);
459 struct temac_local *lp = netdev_priv(ndev);
467 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
472 spin_lock_irqsave(lp->indirect_lock, flags);
484 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
488 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
496 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
497 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
502 if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
504 temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
508 spin_unlock_irqrestore(lp->indirect_lock, flags);
599 struct temac_local *lp = netdev_priv(ndev);
604 spin_lock_irqsave(lp->indirect_lock, flags);
606 reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
609 temac_indirect_out32_locked(lp, tp->reg, reg);
613 spin_unlock_irqrestore(lp->indirect_lock, flags);
614 lp->options |= options;
622 struct temac_local *lp = netdev_priv(ndev);
635 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
637 while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
647 temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
649 while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
659 spin_lock_irqsave(lp->indirect_lock, flags);
660 val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
661 temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
663 spin_unlock_irqrestore(lp->indirect_lock, flags);
666 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
668 while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
676 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
683 spin_lock_irqsave(lp->indirect_lock, flags);
684 temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
685 temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
686 temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
687 temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
688 spin_unlock_irqrestore(lp->indirect_lock, flags);
694 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
700 if (temac_setoptions(ndev, lp->options))
709 struct temac_local *lp = netdev_priv(ndev);
718 if (lp->last_link != link_state) {
719 spin_lock_irqsave(lp->indirect_lock, flags);
720 mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
736 temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
737 spin_unlock_irqrestore(lp->indirect_lock, flags);
739 lp->last_link = link_state;
773 struct temac_local *lp = netdev_priv(ndev);
778 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
805 lp->tx_bd_ci++;
806 if (lp->tx_bd_ci >= lp->tx_bd_num)
807 lp->tx_bd_ci = 0;
809 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
819 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
824 tail = lp->tx_bd_tail;
825 cur_p = &lp->tx_bd_v[tail];
835 if (tail >= lp->tx_bd_num)
838 cur_p = &lp->tx_bd_v[tail];
848 struct temac_local *lp = netdev_priv(ndev);
857 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
859 if (temac_check_tx_bd_space(lp, num_frag + 1)) {
869 if (temac_check_tx_bd_space(lp, num_frag + 1))
898 if (++lp->tx_bd_tail >= lp->tx_bd_num)
899 lp->tx_bd_tail = 0;
901 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
907 if (--lp->tx_bd_tail < 0)
908 lp->tx_bd_tail = lp->tx_bd_num - 1;
909 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
916 if (--lp->tx_bd_tail < 0)
917 lp->tx_bd_tail = lp->tx_bd_num - 1;
918 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
939 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
940 lp->tx_bd_tail++;
941 if (lp->tx_bd_tail >= lp->tx_bd_num)
942 lp->tx_bd_tail = 0;
948 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
950 if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
956 static int ll_temac_recv_buffers_available(struct temac_local *lp)
960 if (!lp->rx_skb[lp->rx_bd_ci])
962 available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
964 available += lp->rx_bd_num;
970 struct temac_local *lp = netdev_priv(ndev);
975 spin_lock_irqsave(&lp->rx_lock, flags);
983 struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
984 struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
1011 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
1026 lp->rx_skb[lp->rx_bd_ci] = NULL;
1031 rx_bd = lp->rx_bd_ci;
1032 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1033 lp->rx_bd_ci = 0;
1034 } while (rx_bd != lp->rx_bd_tail);
1045 if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
1046 schedule_delayed_work(&lp->restart_work, HZ / 1000);
1062 rx_bd = lp->rx_bd_tail + 1;
1063 if (rx_bd >= lp->rx_bd_num)
1065 bd = &lp->rx_bd_v[rx_bd];
1088 lp->rx_skb[rx_bd] = skb;
1090 lp->rx_bd_tail = rx_bd;
1096 lp->dma_out(lp, RX_TAILDESC_PTR,
1097 lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1100 spin_unlock_irqrestore(&lp->rx_lock, flags);
1108 struct temac_local *lp = container_of(work, struct temac_local,
1110 struct net_device *ndev = lp->ndev;
1118 struct temac_local *lp = netdev_priv(ndev);
1121 status = lp->dma_in(lp, TX_IRQ_REG);
1122 lp->dma_out(lp, TX_IRQ_REG, status);
1125 temac_start_xmit_done(lp->ndev);
1129 status, lp->dma_in(lp, TX_CHNL_STS));
1137 struct temac_local *lp = netdev_priv(ndev);
1141 status = lp->dma_in(lp, RX_IRQ_REG);
1142 lp->dma_out(lp, RX_IRQ_REG, status);
1145 ll_temac_recv(lp->ndev);
1149 status, lp->dma_in(lp, RX_CHNL_STS));
1156 struct temac_local *lp = netdev_priv(ndev);
1162 if (lp->phy_node) {
1163 phydev = of_phy_connect(lp->ndev, lp->phy_node,
1166 dev_err(lp->dev, "of_phy_connect() failed\n");
1170 } else if (strlen(lp->phy_name) > 0) {
1171 phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1172 lp->phy_interface);
1174 dev_err(lp->dev, "phy_connect() failed\n");
1182 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1185 rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1192 free_irq(lp->tx_irq, ndev);
1196 dev_err(lp->dev, "request_irq() failed\n");
1202 struct temac_local *lp = netdev_priv(ndev);
1207 cancel_delayed_work_sync(&lp->restart_work);
1209 free_irq(lp->tx_irq, ndev);
1210 free_irq(lp->rx_irq, ndev);
1224 struct temac_local *lp = netdev_priv(ndev);
1226 disable_irq(lp->tx_irq);
1227 disable_irq(lp->rx_irq);
1229 ll_temac_rx_irq(lp->tx_irq, ndev);
1230 ll_temac_tx_irq(lp->rx_irq, ndev);
1232 enable_irq(lp->tx_irq);
1233 enable_irq(lp->rx_irq);
1257 struct temac_local *lp = netdev_priv(ndev);
1261 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1289 struct temac_local *lp = netdev_priv(ndev);
1295 ering->rx_pending = lp->rx_bd_num;
1298 ering->tx_pending = lp->tx_bd_num;
1307 struct temac_local *lp = netdev_priv(ndev);
1318 lp->rx_bd_num = ering->rx_pending;
1319 lp->tx_bd_num = ering->tx_pending;
1329 struct temac_local *lp = netdev_priv(ndev);
1331 ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
1332 ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
1333 ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
1334 ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
1344 struct temac_local *lp = netdev_priv(ndev);
1353 lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
1355 lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
1360 lp->coalesce_delay_rx =
1363 lp->coalesce_delay_tx =
1387 struct temac_local *lp;
1395 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1419 lp = netdev_priv(ndev);
1420 lp->ndev = ndev;
1421 lp->dev = &pdev->dev;
1422 lp->options = XTE_OPTION_DEFAULTS;
1423 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1424 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1425 spin_lock_init(&lp->rx_lock);
1426 INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1435 lp->indirect_lock = pdata->indirect_lock;
1437 lp->indirect_lock = devm_kmalloc(&pdev->dev,
1438 sizeof(*lp->indirect_lock),
1440 if (!lp->indirect_lock)
1442 spin_lock_init(lp->indirect_lock);
1446 lp->regs = devm_platform_ioremap_resource(pdev, 0);
1447 if (IS_ERR(lp->regs)) {
1462 lp->temac_ior = _temac_ior_le;
1463 lp->temac_iow = _temac_iow_le;
1465 lp->temac_ior = _temac_ior_be;
1466 lp->temac_iow = _temac_iow_be;
1470 lp->temac_features = 0;
1474 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1477 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1480 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1482 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1484 if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1491 lp->coalesce_delay_tx = 0x10;
1492 lp->coalesce_count_tx = 0x22;
1493 lp->coalesce_delay_rx = 0xff;
1494 lp->coalesce_count_rx = 0x07;
1510 if (temac_dcr_setup(lp, pdev, dma_np)) {
1512 lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1514 if (IS_ERR(lp->sdma_regs)) {
1518 return PTR_ERR(lp->sdma_regs);
1521 lp->dma_in = temac_dma_in32_le;
1522 lp->dma_out = temac_dma_out32_le;
1524 lp->dma_in = temac_dma_in32_be;
1525 lp->dma_out = temac_dma_out32_be;
1527 dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1531 lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1532 lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1538 lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
1539 if (IS_ERR(lp->sdma_regs)) {
1542 return PTR_ERR(lp->sdma_regs);
1545 lp->dma_in = temac_dma_in32_le;
1546 lp->dma_out = temac_dma_out32_le;
1548 lp->dma_in = temac_dma_in32_be;
1549 lp->dma_out = temac_dma_out32_be;
1553 lp->rx_irq = platform_get_irq(pdev, 0);
1554 lp->tx_irq = platform_get_irq(pdev, 1);
1558 lp->coalesce_delay_tx = pdata->tx_irq_timeout;
1559 lp->coalesce_count_tx = pdata->tx_irq_count;
1562 lp->coalesce_delay_rx = pdata->rx_irq_timeout;
1563 lp->coalesce_count_rx = pdata->rx_irq_count;
1568 if (lp->rx_irq <= 0) {
1569 rc = lp->rx_irq ?: -EINVAL;
1573 if (lp->tx_irq <= 0) {
1574 rc = lp->tx_irq ?: -EINVAL;
1591 rc = temac_mdio_setup(lp, pdev);
1596 lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1597 if (lp->phy_node)
1598 dev_dbg(lp->dev, "using PHY node %pOF\n", lp->phy_node);
1600 snprintf(lp->phy_name, sizeof(lp->phy_name),
1601 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1602 lp->phy_interface = pdata->phy_interface;
1606 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1608 dev_err(lp->dev, "Error creating sysfs files\n");
1612 rc = register_netdev(lp->ndev);
1614 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1621 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1623 if (lp->phy_node)
1624 of_node_put(lp->phy_node);
1625 temac_mdio_teardown(lp);
1632 struct temac_local *lp = netdev_priv(ndev);
1635 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1636 if (lp->phy_node)
1637 of_node_put(lp->phy_node);
1638 temac_mdio_teardown(lp);