Lines Matching defs:sp

53 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
54 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
55 sp->tx_old - sp->tx_new - 1)
57 #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
59 (unsigned long)((sp)->rx_desc)))
115 struct sgiseeq_private *sp = netdev_priv(dev);
117 dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr),
123 struct sgiseeq_private *sp = netdev_priv(dev);
125 dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
146 static inline void seeq_go(struct sgiseeq_private *sp,
150 sregs->rstat = sp->mode | RSTAT_GO_BITS;
156 struct sgiseeq_private *sp = netdev_priv(dev);
157 struct sgiseeq_regs *sregs = sp->sregs;
167 struct sgiseeq_private *sp = netdev_priv(dev);
172 spin_lock_irq(&sp->tx_lock);
174 spin_unlock_irq(&sp->tx_lock);
185 struct sgiseeq_private *sp = netdev_priv(dev);
189 sp->rx_new = sp->tx_new = 0;
190 sp->rx_old = sp->tx_old = 0;
196 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
197 dma_sync_desc_dev(dev, &sp->tx_desc[i]);
202 if (!sp->rx_desc[i].skb) {
212 sp->rx_desc[i].skb = skb;
213 sp->rx_desc[i].rdma.pbuf = dma_addr;
215 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
216 dma_sync_desc_dev(dev, &sp->rx_desc[i]);
218 sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
219 dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
225 struct sgiseeq_private *sp = netdev_priv(dev);
230 if (sp->tx_desc[i].skb) {
231 dev_kfree_skb(sp->tx_desc[i].skb);
232 sp->tx_desc[i].skb = NULL;
238 if (sp->rx_desc[i].skb) {
239 dev_kfree_skb(sp->rx_desc[i].skb);
240 sp->rx_desc[i].skb = NULL;
291 static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
294 struct hpc3_ethregs *hregs = sp->hregs;
303 if (sp->is_edlc) {
305 sregs->rw.wregs.control = sp->control;
311 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
312 hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
314 seeq_go(sp, hregs, sregs);
331 static inline void rx_maybe_restart(struct sgiseeq_private *sp,
336 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
337 seeq_go(sp, hregs, sregs);
341 static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
350 unsigned int orig_end = PREV_RX(sp->rx_new);
353 rd = &sp->rx_desc[sp->rx_new];
405 sp->rx_new = NEXT_RX(sp->rx_new);
407 rd = &sp->rx_desc[sp->rx_new];
412 dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
413 sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
414 dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
415 dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
416 sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
417 dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
418 rx_maybe_restart(sp, hregs, sregs);
421 static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
424 if (sp->is_edlc) {
425 sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
426 sregs->rw.wregs.control = sp->control;
431 struct sgiseeq_private *sp,
435 int i = sp->tx_old;
443 td = &sp->tx_desc[i];
448 td = &sp->tx_desc[i];
453 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
458 static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
466 tx_maybe_reset_collisions(sp, sregs);
479 for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
480 td = &sp->tx_desc[j];
488 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
494 sp->tx_old = NEXT_TX(sp->tx_old);
508 struct sgiseeq_private *sp = netdev_priv(dev);
509 struct hpc3_ethregs *hregs = sp->hregs;
510 struct sgiseeq_regs *sregs = sp->sregs;
512 spin_lock(&sp->tx_lock);
518 sgiseeq_rx(dev, sp, hregs, sregs);
521 if (sp->tx_old != sp->tx_new)
522 sgiseeq_tx(dev, sp, hregs, sregs);
524 if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
527 spin_unlock(&sp->tx_lock);
534 struct sgiseeq_private *sp = netdev_priv(dev);
535 struct sgiseeq_regs *sregs = sp->sregs;
544 err = init_seeq(dev, sp, sregs);
560 struct sgiseeq_private *sp = netdev_priv(dev);
561 struct sgiseeq_regs *sregs = sp->sregs;
567 reset_hpc3_and_seeq(sp->hregs, sregs);
576 struct sgiseeq_private *sp = netdev_priv(dev);
577 struct sgiseeq_regs *sregs = sp->sregs;
580 err = init_seeq(dev, sp, sregs);
593 struct sgiseeq_private *sp = netdev_priv(dev);
594 struct hpc3_ethregs *hregs = sp->hregs;
599 spin_lock_irqsave(&sp->tx_lock, flags);
605 spin_unlock_irqrestore(&sp->tx_lock, flags);
612 entry = sp->tx_new;
613 td = &sp->tx_desc[entry];
635 if (sp->tx_old != sp->tx_new) {
638 backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
643 sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
647 kick_tx(dev, sp, hregs);
649 if (!TX_BUFFS_AVAIL(sp))
651 spin_unlock_irqrestore(&sp->tx_lock, flags);
667 struct sgiseeq_private *sp = netdev_priv(dev);
668 unsigned char oldmode = sp->mode;
671 sp->mode = SEEQ_RCMD_RANY;
673 sp->mode = SEEQ_RCMD_RBMCAST;
675 sp->mode = SEEQ_RCMD_RBCAST;
681 if (oldmode != sp->mode)
689 struct sgiseeq_private *sp = netdev_priv(dev);
693 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
698 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
706 struct sgiseeq_private *sp = netdev_priv(dev);
710 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
716 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
736 struct sgiseeq_private *sp;
748 sp = netdev_priv(dev);
751 sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
752 &sp->srings_dma, DMA_BIDIRECTIONAL, GFP_KERNEL);
758 sp->srings = sr;
759 sp->rx_desc = sp->srings->rxvector;
760 sp->tx_desc = sp->srings->txvector;
761 spin_lock_init(&sp->tx_lock);
764 setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
765 setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
770 gpriv = sp;
773 sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
774 sp->hregs = &hpcregs->ethregs;
775 sp->name = sgiseeqstr;
776 sp->mode = SEEQ_RCMD_RBCAST;
779 sp->hregs->pconfig = 0x161;
780 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
784 sp->hregs->pconfig = 0x161;
785 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
789 hpc3_eth_reset(sp->hregs);
791 sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
792 if (sp->is_edlc)
793 sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
813 dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
814 sp->srings_dma, DMA_BIDIRECTIONAL);
825 struct sgiseeq_private *sp = netdev_priv(dev);
828 dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
829 sp->srings_dma, DMA_BIDIRECTIONAL);