Lines Matching defs:lp
45 struct sonic_local *lp = netdev_priv(dev);
47 lp->msg_enable = netif_msg_init(sonic_debug, 0);
50 netif_dbg(lp, drv, dev, "%s", version);
55 struct sonic_local *lp = netdev_priv(dev);
61 lp->descriptors =
62 dma_alloc_coherent(lp->device,
64 SONIC_BUS_SCALE(lp->dma_bitmode),
65 &lp->descriptors_laddr, GFP_KERNEL);
67 if (!lp->descriptors)
70 lp->cda = lp->descriptors;
71 lp->tda = lp->cda + SIZEOF_SONIC_CDA *
72 SONIC_BUS_SCALE(lp->dma_bitmode);
73 lp->rda = lp->tda + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
74 SONIC_BUS_SCALE(lp->dma_bitmode);
75 lp->rra = lp->rda + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
76 SONIC_BUS_SCALE(lp->dma_bitmode);
78 lp->cda_laddr = lp->descriptors_laddr;
79 lp->tda_laddr = lp->cda_laddr + SIZEOF_SONIC_CDA *
80 SONIC_BUS_SCALE(lp->dma_bitmode);
81 lp->rda_laddr = lp->tda_laddr + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
82 SONIC_BUS_SCALE(lp->dma_bitmode);
83 lp->rra_laddr = lp->rda_laddr + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
84 SONIC_BUS_SCALE(lp->dma_bitmode);
98 struct sonic_local *lp = netdev_priv(dev);
101 netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
103 spin_lock_init(&lp->lock);
110 dev_kfree_skb(lp->rx_skb[i]);
111 lp->rx_skb[i] = NULL;
118 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
120 lp->rx_skb[i] = skb;
124 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
126 if (dma_mapping_error(lp->device, laddr)) {
129 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
130 lp->rx_laddr[i] = (dma_addr_t)0;
133 dev_kfree_skb(lp->rx_skb[i]);
134 lp->rx_skb[i] = NULL;
140 lp->rx_laddr[i] = laddr;
150 netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
158 struct sonic_local * __maybe_unused lp = netdev_priv(dev);
179 struct sonic_local *lp = netdev_priv(dev);
182 netif_dbg(lp, ifdown, dev, "%s\n", __func__);
198 if(lp->tx_laddr[i]) {
199 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
200 lp->tx_laddr[i] = (dma_addr_t)0;
202 if(lp->tx_skb[i]) {
203 dev_kfree_skb(lp->tx_skb[i]);
204 lp->tx_skb[i] = NULL;
210 if(lp->rx_laddr[i]) {
211 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
212 lp->rx_laddr[i] = (dma_addr_t)0;
214 if(lp->rx_skb[i]) {
215 dev_kfree_skb(lp->rx_skb[i]);
216 lp->rx_skb[i] = NULL;
225 struct sonic_local *lp = netdev_priv(dev);
239 if(lp->tx_laddr[i]) {
240 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
241 lp->tx_laddr[i] = (dma_addr_t)0;
243 if(lp->tx_skb[i]) {
244 dev_kfree_skb(lp->tx_skb[i]);
245 lp->tx_skb[i] = NULL;
250 lp->stats.tx_errors++;
275 struct sonic_local *lp = netdev_priv(dev);
281 netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
294 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
295 if (dma_mapping_error(lp->device, laddr)) {
301 spin_lock_irqsave(&lp->lock, flags);
303 entry = (lp->eol_tx + 1) & SONIC_TDS_MASK;
314 sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, ~SONIC_EOL &
315 sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK));
317 netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
321 lp->tx_len[entry] = length;
322 lp->tx_laddr[entry] = laddr;
323 lp->tx_skb[entry] = skb;
325 lp->eol_tx = entry;
328 if (lp->tx_skb[entry]) {
330 netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
335 spin_unlock_irqrestore(&lp->lock, flags);
347 struct sonic_local *lp = netdev_priv(dev);
356 spin_lock_irqsave(&lp->lock, flags);
360 spin_unlock_irqrestore(&lp->lock, flags);
369 netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
374 int entry = lp->cur_tx;
386 netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
388 while (lp->tx_skb[entry] != NULL) {
393 lp->stats.tx_packets++;
394 lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
398 lp->stats.tx_aborted_errors++;
401 lp->stats.tx_carrier_errors++;
403 lp->stats.tx_window_errors++;
405 lp->stats.tx_fifo_errors++;
409 dev_consume_skb_irq(lp->tx_skb[entry]);
410 lp->tx_skb[entry] = NULL;
412 dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
413 lp->tx_laddr[entry] = (dma_addr_t)0;
423 if (freed_some || lp->tx_skb[entry] == NULL)
425 lp->cur_tx = entry;
432 netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
436 netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
440 netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
446 lp->stats.rx_frame_errors += 65536;
448 lp->stats.rx_crc_errors += 65536;
450 lp->stats.rx_missed_errors += 65536;
456 netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
479 spin_unlock_irqrestore(&lp->lock, flags);
485 static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
492 if (addr == lp->rx_laddr[i])
500 static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
507 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
510 *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
512 if (dma_mapping_error(lp->device, *new_addr)) {
522 static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
557 struct sonic_local *lp = netdev_priv(dev);
558 int entry = lp->cur_rx;
559 int prev_entry = lp->eol_rx;
572 int i = index_from_addr(lp, addr, entry);
579 if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
580 struct sk_buff *used_skb = lp->rx_skb[i];
584 dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
593 lp->stats.rx_packets++;
594 lp->stats.rx_bytes += pkt_len;
596 lp->rx_skb[i] = new_skb;
597 lp->rx_laddr[i] = new_laddr;
601 lp->stats.rx_dropped++;
607 sonic_update_rra(dev, lp, addr, new_laddr);
619 lp->cur_rx = entry;
621 if (prev_entry != lp->eol_rx) {
625 sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
626 sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
627 lp->eol_rx = prev_entry;
641 struct sonic_local *lp = netdev_priv(dev);
644 lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
646 lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
648 lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
651 return &lp->stats;
660 struct sonic_local *lp = netdev_priv(dev);
678 netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
691 SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
694 spin_lock_irqsave(&lp->lock, flags);
698 spin_unlock_irqrestore(&lp->lock, flags);
702 netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
713 struct sonic_local *lp = netdev_priv(dev);
738 netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
742 u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
743 u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
755 SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
756 SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
759 netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
769 netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
780 lp->rda_laddr +
781 ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
785 (lp->rda_laddr & 0xffff) | SONIC_EOL);
786 lp->eol_rx = SONIC_NUM_RDS - 1;
787 lp->cur_rx = 0;
788 SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
789 SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
794 netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
803 (lp->tda_laddr & 0xffff) +
804 (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
805 lp->tx_skb[i] = NULL;
809 (lp->tda_laddr & 0xffff));
811 SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
812 SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
813 lp->cur_tx = 0;
814 lp->eol_tx = SONIC_NUM_TDS - 1;
830 SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
849 netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,