Lines Matching full:card
26 * 1 - Per card interrupt spinlock (to protect structures and such)
28 * 3 - Per card resource spinlock (to access registers, etc.)
105 #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) argument
120 static u32 ns_read_sram(ns_dev * card, u32 sram_address);
121 static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
124 static void ns_init_card_error(ns_dev * card, int error);
125 static scq_info *get_scq(ns_dev *card, int size, u32 scd);
126 static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc);
131 static void fill_tst(ns_dev * card, int n, vc_map * vc);
133 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
135 static void process_tsq(ns_dev * card);
136 static void drain_scq(ns_dev * card, scq_info * scq, int pos);
137 static void process_rsq(ns_dev * card);
138 static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
139 static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
140 static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
141 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
142 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb);
143 static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb);
147 static void which_list(ns_dev * card, struct sk_buff *skb);
199 ns_dev *card = pci_get_drvdata(pcidev); in nicstar_remove_one() local
205 i = card->index; in nicstar_remove_one()
210 if (card->atmdev->phy && card->atmdev->phy->stop) in nicstar_remove_one()
211 card->atmdev->phy->stop(card->atmdev); in nicstar_remove_one()
214 writel(0x00000000, card->membase + CFG); in nicstar_remove_one()
217 atm_dev_deregister(card->atmdev); in nicstar_remove_one()
224 PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); in nicstar_remove_one()
225 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { in nicstar_remove_one()
232 card->iovpool.count); in nicstar_remove_one()
233 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { in nicstar_remove_one()
238 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) in nicstar_remove_one()
240 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) in nicstar_remove_one()
242 free_scq(card, card->scq0, NULL); in nicstar_remove_one()
244 if (card->scd2vc[j] != NULL) in nicstar_remove_one()
245 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); in nicstar_remove_one()
247 idr_destroy(&card->idr); in nicstar_remove_one()
248 dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, in nicstar_remove_one()
249 card->rsq.org, card->rsq.dma); in nicstar_remove_one()
250 dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, in nicstar_remove_one()
251 card->tsq.org, card->tsq.dma); in nicstar_remove_one()
252 free_irq(card->pcidev->irq, card); in nicstar_remove_one()
253 iounmap(card->membase); in nicstar_remove_one()
254 kfree(card); in nicstar_remove_one()
307 static u32 ns_read_sram(ns_dev * card, u32 sram_address) in ns_read_sram() argument
314 spin_lock_irqsave(&card->res_lock, flags); in ns_read_sram()
315 while (CMD_BUSY(card)) ; in ns_read_sram()
316 writel(sram_address, card->membase + CMD); in ns_read_sram()
317 while (CMD_BUSY(card)) ; in ns_read_sram()
318 data = readl(card->membase + DR0); in ns_read_sram()
319 spin_unlock_irqrestore(&card->res_lock, flags); in ns_read_sram()
323 static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, in ns_write_sram() argument
331 spin_lock_irqsave(&card->res_lock, flags); in ns_write_sram()
332 while (CMD_BUSY(card)) ; in ns_write_sram()
334 writel(*(value++), card->membase + i); in ns_write_sram()
336 so card->membase + DR0 == card->membase */ in ns_write_sram()
340 writel(sram_address, card->membase + CMD); in ns_write_sram()
341 spin_unlock_irqrestore(&card->res_lock, flags); in ns_write_sram()
347 struct ns_dev *card = NULL; in ns_init_card() local
361 ns_init_card_error(card, error); in ns_init_card()
368 ns_init_card_error(card, error); in ns_init_card()
372 card = kmalloc(sizeof(*card), GFP_KERNEL); in ns_init_card()
373 if (!card) { in ns_init_card()
378 ns_init_card_error(card, error); in ns_init_card()
381 cards[i] = card; in ns_init_card()
382 spin_lock_init(&card->int_lock); in ns_init_card()
383 spin_lock_init(&card->res_lock); in ns_init_card()
385 pci_set_drvdata(pcidev, card); in ns_init_card()
387 card->index = i; in ns_init_card()
388 card->atmdev = NULL; in ns_init_card()
389 card->pcidev = pcidev; in ns_init_card()
391 card->membase = ioremap(membase, NS_IOREMAP_SIZE); in ns_init_card()
392 if (!card->membase) { in ns_init_card()
395 ns_init_card_error(card, error); in ns_init_card()
398 PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase); in ns_init_card()
405 ns_init_card_error(card, error); in ns_init_card()
422 ns_init_card_error(card, error); in ns_init_card()
429 data = readl(card->membase + STAT); in ns_init_card()
431 writel(NS_STAT_TMROF, card->membase + STAT); in ns_init_card()
434 writel(NS_CFG_SWRST, card->membase + CFG); in ns_init_card()
436 writel(0x00000000, card->membase + CFG); in ns_init_card()
439 writel(0x00000008, card->membase + GP); in ns_init_card()
441 writel(0x00000001, card->membase + GP); in ns_init_card()
443 while (CMD_BUSY(card)) ; in ns_init_card()
444 writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ in ns_init_card()
448 while (CMD_BUSY(card)) ; in ns_init_card()
449 writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); in ns_init_card()
450 while (CMD_BUSY(card)) ; in ns_init_card()
451 data = readl(card->membase + DR0); in ns_init_card()
455 card->max_pcr = ATM_25_PCR; in ns_init_card()
456 while (CMD_BUSY(card)) ; in ns_init_card()
457 writel(0x00000008, card->membase + DR0); in ns_init_card()
458 writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); in ns_init_card()
460 writel(NS_STAT_SFBQF, card->membase + STAT); in ns_init_card()
462 while (CMD_BUSY(card)) ; in ns_init_card()
463 writel(0x00000022, card->membase + DR0); in ns_init_card()
464 writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); in ns_init_card()
470 card->max_pcr = ATM_OC3_PCR; in ns_init_card()
472 while (CMD_BUSY(card)) ; in ns_init_card()
473 writel(0x00000002, card->membase + DR0); in ns_init_card()
474 writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); in ns_init_card()
480 ns_init_card_error(card, error); in ns_init_card()
483 writel(0x00000000, card->membase + GP); in ns_init_card()
487 ns_write_sram(card, 0x1C003, &data, 1); in ns_init_card()
489 ns_write_sram(card, 0x14003, &data, 1); in ns_init_card()
490 if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && in ns_init_card()
491 ns_read_sram(card, 0x1C003) == 0x76543210) in ns_init_card()
492 card->sram_size = 128; in ns_init_card()
494 card->sram_size = 32; in ns_init_card()
495 PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); in ns_init_card()
497 card->rct_size = NS_MAX_RCTSIZE; in ns_init_card()
500 if (card->sram_size == 128) in ns_init_card()
505 if (card->sram_size == 32) { in ns_init_card()
509 card->rct_size = 4096; in ns_init_card()
515 card->vpibits = NS_VPIBITS; in ns_init_card()
516 if (card->rct_size == 4096) in ns_init_card()
517 card->vcibits = 12 - NS_VPIBITS; in ns_init_card()
518 else /* card->rct_size == 16384 */ in ns_init_card()
519 card->vcibits = 14 - NS_VPIBITS; in ns_init_card()
523 nicstar_init_eprom(card->membase); in ns_init_card()
526 writel(0x00000000, card->membase + VPM); in ns_init_card()
529 card->tsq.org = dma_alloc_coherent(&card->pcidev->dev, in ns_init_card()
531 &card->tsq.dma, GFP_KERNEL); in ns_init_card()
532 if (card->tsq.org == NULL) { in ns_init_card()
535 ns_init_card_error(card, error); in ns_init_card()
538 card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT); in ns_init_card()
539 card->tsq.next = card->tsq.base; in ns_init_card()
540 card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); in ns_init_card()
542 ns_tsi_init(card->tsq.base + j); in ns_init_card()
543 writel(0x00000000, card->membase + TSQH); in ns_init_card()
544 writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB); in ns_init_card()
545 PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base); in ns_init_card()
548 card->rsq.org = dma_alloc_coherent(&card->pcidev->dev, in ns_init_card()
550 &card->rsq.dma, GFP_KERNEL); in ns_init_card()
551 if (card->rsq.org == NULL) { in ns_init_card()
554 ns_init_card_error(card, error); in ns_init_card()
557 card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT); in ns_init_card()
558 card->rsq.next = card->rsq.base; in ns_init_card()
559 card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); in ns_init_card()
561 ns_rsqe_init(card->rsq.base + j); in ns_init_card()
562 writel(0x00000000, card->membase + RSQH); in ns_init_card()
563 writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB); in ns_init_card()
564 PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base); in ns_init_card()
567 card->scq1 = NULL; in ns_init_card()
568 card->scq2 = NULL; in ns_init_card()
569 card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0); in ns_init_card()
570 if (card->scq0 == NULL) { in ns_init_card()
573 ns_init_card_error(card, error); in ns_init_card()
576 u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base); in ns_init_card()
580 ns_write_sram(card, NS_VRSCD0, u32d, 4); in ns_init_card()
581 ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ in ns_init_card()
582 ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ in ns_init_card()
583 card->scq0->scd = NS_VRSCD0; in ns_init_card()
584 PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base); in ns_init_card()
587 card->tst_addr = NS_TST0; in ns_init_card()
588 card->tst_free_entries = NS_TST_NUM_ENTRIES; in ns_init_card()
591 ns_write_sram(card, NS_TST0 + j, &data, 1); in ns_init_card()
593 ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); in ns_init_card()
595 ns_write_sram(card, NS_TST1 + j, &data, 1); in ns_init_card()
597 ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); in ns_init_card()
599 card->tste2vc[j] = NULL; in ns_init_card()
600 writel(NS_TST0 << 2, card->membase + TSTB); in ns_init_card()
611 for (j = 0; j < card->rct_size; j++) in ns_init_card()
612 ns_write_sram(card, j * 4, u32d, 4); in ns_init_card()
614 memset(card->vcmap, 0, sizeof(card->vcmap)); in ns_init_card()
617 card->scd2vc[j] = NULL; in ns_init_card()
620 card->sbnr.min = MIN_SB; in ns_init_card()
621 card->sbnr.init = NUM_SB; in ns_init_card()
622 card->sbnr.max = MAX_SB; in ns_init_card()
623 card->lbnr.min = MIN_LB; in ns_init_card()
624 card->lbnr.init = NUM_LB; in ns_init_card()
625 card->lbnr.max = MAX_LB; in ns_init_card()
626 card->iovnr.min = MIN_IOVB; in ns_init_card()
627 card->iovnr.init = NUM_IOVB; in ns_init_card()
628 card->iovnr.max = MAX_IOVB; in ns_init_card()
629 card->hbnr.min = MIN_HB; in ns_init_card()
630 card->hbnr.init = NUM_HB; in ns_init_card()
631 card->hbnr.max = MAX_HB; in ns_init_card()
633 card->sm_handle = NULL; in ns_init_card()
634 card->sm_addr = 0x00000000; in ns_init_card()
635 card->lg_handle = NULL; in ns_init_card()
636 card->lg_addr = 0x00000000; in ns_init_card()
638 card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ in ns_init_card()
640 idr_init(&card->idr); in ns_init_card()
643 skb_queue_head_init(&card->hbpool.queue); in ns_init_card()
644 card->hbpool.count = 0; in ns_init_card()
653 ns_init_card_error(card, error); in ns_init_card()
657 skb_queue_tail(&card->hbpool.queue, hb); in ns_init_card()
658 card->hbpool.count++; in ns_init_card()
662 skb_queue_head_init(&card->lbpool.queue); in ns_init_card()
663 card->lbpool.count = 0; /* Not used */ in ns_init_card()
672 ns_init_card_error(card, error); in ns_init_card()
676 skb_queue_tail(&card->lbpool.queue, lb); in ns_init_card()
678 push_rxbufs(card, lb); in ns_init_card()
681 card->rcbuf = lb; in ns_init_card()
682 card->rawcell = (struct ns_rcqe *) lb->data; in ns_init_card()
683 card->rawch = NS_PRV_DMA(lb); in ns_init_card()
688 ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { in ns_init_card()
693 ns_init_card_error(card, error); in ns_init_card()
698 skb_queue_head_init(&card->sbpool.queue); in ns_init_card()
699 card->sbpool.count = 0; /* Not used */ in ns_init_card()
708 ns_init_card_error(card, error); in ns_init_card()
712 skb_queue_tail(&card->sbpool.queue, sb); in ns_init_card()
714 push_rxbufs(card, sb); in ns_init_card()
718 ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { in ns_init_card()
723 ns_init_card_error(card, error); in ns_init_card()
728 skb_queue_head_init(&card->iovpool.queue); in ns_init_card()
729 card->iovpool.count = 0; in ns_init_card()
738 ns_init_card_error(card, error); in ns_init_card()
742 skb_queue_tail(&card->iovpool.queue, iovb); in ns_init_card()
743 card->iovpool.count++; in ns_init_card()
747 if (card->rct_size == 4096) in ns_init_card()
749 else /* (card->rct_size == 16384) */ in ns_init_card()
752 card->efbie = 1; in ns_init_card()
754 card->intcnt = 0; in ns_init_card()
756 (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { in ns_init_card()
759 ns_init_card_error(card, error); in ns_init_card()
764 card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, in ns_init_card()
766 if (card->atmdev == NULL) { in ns_init_card()
769 ns_init_card_error(card, error); in ns_init_card()
773 if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) { in ns_init_card()
774 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, in ns_init_card()
775 card->atmdev->esi, 6); in ns_init_card()
776 if (ether_addr_equal(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00")) { in ns_init_card()
777 nicstar_read_eprom(card->membase, in ns_init_card()
779 card->atmdev->esi, 6); in ns_init_card()
783 printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); in ns_init_card()
785 card->atmdev->dev_data = card; in ns_init_card()
786 card->atmdev->ci_range.vpi_bits = card->vpibits; in ns_init_card()
787 card->atmdev->ci_range.vci_bits = card->vcibits; in ns_init_card()
788 card->atmdev->link_rate = card->max_pcr; in ns_init_card()
789 card->atmdev->phy = NULL; in ns_init_card()
792 if (card->max_pcr == ATM_OC3_PCR) in ns_init_card()
793 suni_init(card->atmdev); in ns_init_card()
797 if (card->max_pcr == ATM_25_PCR) in ns_init_card()
798 idt77105_init(card->atmdev); in ns_init_card()
801 if (card->atmdev->phy && card->atmdev->phy->start) in ns_init_card()
802 card->atmdev->phy->start(card->atmdev); in ns_init_card()
806 NS_CFG_PHYIE, card->membase + CFG); in ns_init_card()
813 static void ns_init_card_error(ns_dev *card, int error) in ns_init_card_error() argument
816 writel(0x00000000, card->membase + CFG); in ns_init_card_error()
820 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) in ns_init_card_error()
825 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) in ns_init_card_error()
827 free_scq(card, card->scq0, NULL); in ns_init_card_error()
831 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) in ns_init_card_error()
836 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) in ns_init_card_error()
840 kfree(card->rsq.org); in ns_init_card_error()
843 kfree(card->tsq.org); in ns_init_card_error()
846 free_irq(card->pcidev->irq, card); in ns_init_card_error()
849 iounmap(card->membase); in ns_init_card_error()
852 pci_disable_device(card->pcidev); in ns_init_card_error()
853 kfree(card); in ns_init_card_error()
857 static scq_info *get_scq(ns_dev *card, int size, u32 scd) in get_scq() argument
868 scq->org = dma_alloc_coherent(&card->pcidev->dev, in get_scq()
878 dma_free_coherent(&card->pcidev->dev, in get_scq()
902 static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) in free_scq() argument
933 dma_free_coherent(&card->pcidev->dev, in free_scq()
942 static void push_rxbufs(ns_dev * card, struct sk_buff *skb) in push_rxbufs() argument
954 addr1 = dma_map_single(&card->pcidev->dev, in push_rxbufs()
964 card->index); in push_rxbufs()
967 stat = readl(card->membase + STAT); in push_rxbufs()
968 card->sbfqc = ns_stat_sfbqc_get(stat); in push_rxbufs()
969 card->lbfqc = ns_stat_lfbqc_get(stat); in push_rxbufs()
972 if (card->sm_addr) { in push_rxbufs()
973 addr2 = card->sm_addr; in push_rxbufs()
974 handle2 = card->sm_handle; in push_rxbufs()
975 card->sm_addr = 0x00000000; in push_rxbufs()
976 card->sm_handle = NULL; in push_rxbufs()
979 card->sm_addr = addr1; in push_rxbufs()
980 card->sm_handle = handle1; in push_rxbufs()
986 if (card->lg_addr) { in push_rxbufs()
987 addr2 = card->lg_addr; in push_rxbufs()
988 handle2 = card->lg_handle; in push_rxbufs()
989 card->lg_addr = 0x00000000; in push_rxbufs()
990 card->lg_handle = NULL; in push_rxbufs()
993 card->lg_addr = addr1; in push_rxbufs()
994 card->lg_handle = handle1; in push_rxbufs()
1001 if (card->sbfqc >= card->sbnr.max) { in push_rxbufs()
1002 skb_unlink(handle1, &card->sbpool.queue); in push_rxbufs()
1004 skb_unlink(handle2, &card->sbpool.queue); in push_rxbufs()
1008 card->sbfqc += 2; in push_rxbufs()
1011 if (card->lbfqc >= card->lbnr.max) { in push_rxbufs()
1012 skb_unlink(handle1, &card->lbpool.queue); in push_rxbufs()
1014 skb_unlink(handle2, &card->lbpool.queue); in push_rxbufs()
1018 card->lbfqc += 2; in push_rxbufs()
1021 id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC); in push_rxbufs()
1025 id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC); in push_rxbufs()
1029 spin_lock_irqsave(&card->res_lock, flags); in push_rxbufs()
1030 while (CMD_BUSY(card)) ; in push_rxbufs()
1031 writel(addr2, card->membase + DR3); in push_rxbufs()
1032 writel(id2, card->membase + DR2); in push_rxbufs()
1033 writel(addr1, card->membase + DR1); in push_rxbufs()
1034 writel(id1, card->membase + DR0); in push_rxbufs()
1036 card->membase + CMD); in push_rxbufs()
1037 spin_unlock_irqrestore(&card->res_lock, flags); in push_rxbufs()
1040 card->index, in push_rxbufs()
1045 if (!card->efbie && card->sbfqc >= card->sbnr.min && in push_rxbufs()
1046 card->lbfqc >= card->lbnr.min) { in push_rxbufs()
1047 card->efbie = 1; in push_rxbufs()
1048 writel((readl(card->membase + CFG) | NS_CFG_EFBIE), in push_rxbufs()
1049 card->membase + CFG); in push_rxbufs()
1059 ns_dev *card; in ns_irq_handler() local
1063 card = (ns_dev *) dev_id; in ns_irq_handler()
1064 dev = card->atmdev; in ns_irq_handler()
1065 card->intcnt++; in ns_irq_handler()
1067 PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); in ns_irq_handler()
1069 spin_lock_irqsave(&card->int_lock, flags); in ns_irq_handler()
1071 stat_r = readl(card->membase + STAT); in ns_irq_handler()
1075 TXPRINTK("nicstar%d: TSI interrupt\n", card->index); in ns_irq_handler()
1076 process_tsq(card); in ns_irq_handler()
1077 writel(NS_STAT_TSIF, card->membase + STAT); in ns_irq_handler()
1082 writel(NS_STAT_TXICP, card->membase + STAT); in ns_irq_handler()
1084 card->index); in ns_irq_handler()
1089 writel(NS_STAT_TSQF, card->membase + STAT); in ns_irq_handler()
1090 PRINTK("nicstar%d: TSQ full.\n", card->index); in ns_irq_handler()
1091 process_tsq(card); in ns_irq_handler()
1096 writel(NS_STAT_TMROF, card->membase + STAT); in ns_irq_handler()
1097 PRINTK("nicstar%d: Timer overflow.\n", card->index); in ns_irq_handler()
1102 writel(NS_STAT_PHYI, card->membase + STAT); in ns_irq_handler()
1103 PRINTK("nicstar%d: PHY interrupt.\n", card->index); in ns_irq_handler()
1111 writel(NS_STAT_SFBQF, card->membase + STAT); in ns_irq_handler()
1113 card->index); in ns_irq_handler()
1118 writel(NS_STAT_LFBQF, card->membase + STAT); in ns_irq_handler()
1120 card->index); in ns_irq_handler()
1125 writel(NS_STAT_RSQF, card->membase + STAT); in ns_irq_handler()
1126 printk("nicstar%d: RSQ full.\n", card->index); in ns_irq_handler()
1127 process_rsq(card); in ns_irq_handler()
1132 RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); in ns_irq_handler()
1133 process_rsq(card); in ns_irq_handler()
1134 writel(NS_STAT_EOPDU, card->membase + STAT); in ns_irq_handler()
1139 writel(NS_STAT_RAWCF, card->membase + STAT); in ns_irq_handler()
1142 card->index); in ns_irq_handler()
1147 while (readl(card->membase + RAWCT) != card->rawch) { in ns_irq_handler()
1149 if (ns_rcqe_islast(card->rawcell)) { in ns_irq_handler()
1152 oldbuf = card->rcbuf; in ns_irq_handler()
1153 card->rcbuf = idr_find(&card->idr, in ns_irq_handler()
1154 ns_rcqe_nextbufhandle(card->rawcell)); in ns_irq_handler()
1155 card->rawch = NS_PRV_DMA(card->rcbuf); in ns_irq_handler()
1156 card->rawcell = (struct ns_rcqe *) in ns_irq_handler()
1157 card->rcbuf->data; in ns_irq_handler()
1158 recycle_rx_buf(card, oldbuf); in ns_irq_handler()
1160 card->rawch += NS_RCQE_SIZE; in ns_irq_handler()
1161 card->rawcell++; in ns_irq_handler()
1171 writel(NS_STAT_SFBQE, card->membase + STAT); in ns_irq_handler()
1173 card->index); in ns_irq_handler()
1174 for (i = 0; i < card->sbnr.min; i++) { in ns_irq_handler()
1177 writel(readl(card->membase + CFG) & in ns_irq_handler()
1178 ~NS_CFG_EFBIE, card->membase + CFG); in ns_irq_handler()
1179 card->efbie = 0; in ns_irq_handler()
1183 skb_queue_tail(&card->sbpool.queue, sb); in ns_irq_handler()
1185 push_rxbufs(card, sb); in ns_irq_handler()
1187 card->sbfqc = i; in ns_irq_handler()
1188 process_rsq(card); in ns_irq_handler()
1196 writel(NS_STAT_LFBQE, card->membase + STAT); in ns_irq_handler()
1198 card->index); in ns_irq_handler()
1199 for (i = 0; i < card->lbnr.min; i++) { in ns_irq_handler()
1202 writel(readl(card->membase + CFG) & in ns_irq_handler()
1203 ~NS_CFG_EFBIE, card->membase + CFG); in ns_irq_handler()
1204 card->efbie = 0; in ns_irq_handler()
1208 skb_queue_tail(&card->lbpool.queue, lb); in ns_irq_handler()
1210 push_rxbufs(card, lb); in ns_irq_handler()
1212 card->lbfqc = i; in ns_irq_handler()
1213 process_rsq(card); in ns_irq_handler()
1218 writel(NS_STAT_RSQAF, card->membase + STAT); in ns_irq_handler()
1219 RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); in ns_irq_handler()
1220 process_rsq(card); in ns_irq_handler()
1223 spin_unlock_irqrestore(&card->int_lock, flags); in ns_irq_handler()
1224 PRINTK("nicstar%d: end of interrupt service\n", card->index); in ns_irq_handler()
1230 ns_dev *card; in ns_open() local
1245 card = (ns_dev *) vcc->dev->dev_data; in ns_open()
1246 PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, in ns_open()
1249 PRINTK("nicstar%d: unsupported AAL.\n", card->index); in ns_open()
1253 vc = &(card->vcmap[vpi << card->vcibits | vci]); in ns_open()
1262 printk("nicstar%d: %s vci already in use.\n", card->index, in ns_open()
1282 card->index); in ns_open()
1292 card->index, vcc->qos.txtp.max_pcr); in ns_open()
1297 modl = tmpl % card->max_pcr; in ns_open()
1299 n = (int)(tmpl / card->max_pcr); in ns_open()
1305 (card->tst_free_entries - in ns_open()
1309 card->index); in ns_open()
1319 card->index); in ns_open()
1325 if (n > (card->tst_free_entries - NS_TST_RESERVED)) { in ns_open()
1328 card->index); in ns_open()
1333 card->tst_free_entries -= n; in ns_open()
1336 card->index, n); in ns_open()
1338 if (card->scd2vc[frscdi] == NULL) { in ns_open()
1339 card->scd2vc[frscdi] = vc; in ns_open()
1346 card->index); in ns_open()
1347 card->tst_free_entries += n; in ns_open()
1355 scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd); in ns_open()
1358 card->index); in ns_open()
1359 card->scd2vc[frscdi] = NULL; in ns_open()
1360 card->tst_free_entries += n; in ns_open()
1370 ns_write_sram(card, vc->cbr_scd, u32d, 4); in ns_open()
1372 fill_tst(card, n, vc); in ns_open()
1375 vc->scq = card->scq0; in ns_open()
1398 ns_write_sram(card, in ns_open()
1400 (vpi << card->vcibits | vci) * in ns_open()
1413 ns_dev *card; in ns_close() local
1418 card = vcc->dev->dev_data; in ns_close()
1419 PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, in ns_close()
1430 (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; in ns_close()
1431 spin_lock_irqsave(&card->res_lock, flags); in ns_close()
1432 while (CMD_BUSY(card)) ; in ns_close()
1434 card->membase + CMD); in ns_close()
1435 spin_unlock_irqrestore(&card->res_lock, flags); in ns_close()
1442 stat = readl(card->membase + STAT); in ns_close()
1443 card->sbfqc = ns_stat_sfbqc_get(stat); in ns_close()
1444 card->lbfqc = ns_stat_lfbqc_get(stat); in ns_close()
1448 card->index); in ns_close()
1450 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, in ns_close()
1453 spin_lock_irqsave(&card->int_lock, flags); in ns_close()
1454 recycle_iov_buf(card, iovb); in ns_close()
1455 spin_unlock_irqrestore(&card->int_lock, flags); in ns_close()
1504 ns_write_sram(card, scq->scd, &data, 1); in ns_close()
1513 if (card->tste2vc[i] == vc) { in ns_close()
1514 ns_write_sram(card, card->tst_addr + i, &data, in ns_close()
1516 card->tste2vc[i] = NULL; in ns_close()
1517 card->tst_free_entries++; in ns_close()
1521 card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; in ns_close()
1522 free_scq(card, vc->scq, vcc); in ns_close()
1528 scq_info *scq = card->scq0; in ns_close()
1551 stat = readl(card->membase + STAT); in ns_close()
1552 cfg = readl(card->membase + CFG); in ns_close()
1556 card->tsq.base, card->tsq.next, in ns_close()
1557 card->tsq.last, readl(card->membase + TSQT)); in ns_close()
1560 card->rsq.base, card->rsq.next, in ns_close()
1561 card->rsq.last, readl(card->membase + RSQT)); in ns_close()
1563 card->efbie ? "enabled" : "disabled"); in ns_close()
1565 ns_stat_sfbqc_get(stat), card->sbpool.count, in ns_close()
1566 ns_stat_lfbqc_get(stat), card->lbpool.count); in ns_close()
1568 card->hbpool.count, card->iovpool.count); in ns_close()
1573 static void fill_tst(ns_dev * card, int n, vc_map * vc) in fill_tst() argument
1584 new_tst = card->tst_addr; in fill_tst()
1589 if (card->tste2vc[e] == NULL) in fill_tst()
1593 printk("nicstar%d: No free TST entries found. \n", card->index); in fill_tst()
1602 if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { in fill_tst()
1603 card->tste2vc[e] = vc; in fill_tst()
1604 ns_write_sram(card, new_tst + e, &data, 1); in fill_tst()
1618 ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); in fill_tst()
1619 ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); in fill_tst()
1620 card->tst_addr = new_tst; in fill_tst()
1625 ns_dev *card; in ns_send() local
1632 card = vcc->dev->dev_data; in ns_send()
1633 TXPRINTK("nicstar%d: ns_send() called.\n", card->index); in ns_send()
1636 card->index); in ns_send()
1644 card->index); in ns_send()
1652 card->index); in ns_send()
1659 printk("nicstar%d: No scatter-gather yet.\n", card->index); in ns_send()
1667 NS_PRV_DMA(skb) = dma_map_single(&card->pcidev->dev, skb->data, in ns_send()
1704 scq = card->scq0; in ns_send()
1707 if (push_scqe(card, vc, scq, &scqe, skb) != 0) { in ns_send()
1709 dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len, in ns_send()
1719 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, in push_scqe() argument
1733 printk("nicstar%d: Error pushing TBD.\n", card->index); in push_scqe()
1746 card->index); in push_scqe()
1754 card->index, skb, index); in push_scqe()
1756 card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), in push_scqe()
1778 ns_write_sram(card, scq->scd, &data, 1); in push_scqe()
1781 card->index); in push_scqe()
1810 card->index, le32_to_cpu(tsr.word_1), in push_scqe()
1821 card->index); in push_scqe()
1824 ns_write_sram(card, scq->scd, &data, 1); in push_scqe()
1831 static void process_tsq(ns_dev * card) in process_tsq() argument
1840 if (card->tsq.next == card->tsq.last) in process_tsq()
1841 one_ahead = card->tsq.base; in process_tsq()
1843 one_ahead = card->tsq.next + 1; in process_tsq()
1845 if (one_ahead == card->tsq.last) in process_tsq()
1846 two_ahead = card->tsq.base; in process_tsq()
1850 while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || in process_tsq()
1857 while (ns_tsi_isempty(card->tsq.next)) { in process_tsq()
1858 if (card->tsq.next == card->tsq.last) in process_tsq()
1859 card->tsq.next = card->tsq.base; in process_tsq()
1861 card->tsq.next++; in process_tsq()
1864 if (!ns_tsi_tmrof(card->tsq.next)) { in process_tsq()
1865 scdi = ns_tsi_getscdindex(card->tsq.next); in process_tsq()
1867 scq = card->scq0; in process_tsq()
1869 if (card->scd2vc[scdi] == NULL) { in process_tsq()
1872 card->index); in process_tsq()
1873 ns_tsi_init(card->tsq.next); in process_tsq()
1876 scq = card->scd2vc[scdi]->scq; in process_tsq()
1878 drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); in process_tsq()
1883 ns_tsi_init(card->tsq.next); in process_tsq()
1884 previous = card->tsq.next; in process_tsq()
1885 if (card->tsq.next == card->tsq.last) in process_tsq()
1886 card->tsq.next = card->tsq.base; in process_tsq()
1888 card->tsq.next++; in process_tsq()
1890 if (card->tsq.next == card->tsq.last) in process_tsq()
1891 one_ahead = card->tsq.base; in process_tsq()
1893 one_ahead = card->tsq.next + 1; in process_tsq()
1895 if (one_ahead == card->tsq.last) in process_tsq()
1896 two_ahead = card->tsq.base; in process_tsq()
1902 writel(PTR_DIFF(previous, card->tsq.base), in process_tsq()
1903 card->membase + TSQH); in process_tsq()
1906 static void drain_scq(ns_dev * card, scq_info * scq, int pos) in drain_scq() argument
1914 card->index, scq, pos); in drain_scq()
1916 printk("nicstar%d: Bad index on drain_scq().\n", card->index); in drain_scq()
1927 card->index, skb, i); in drain_scq()
1929 dma_unmap_single(&card->pcidev->dev, in drain_scq()
1948 static void process_rsq(ns_dev * card) in process_rsq() argument
1952 if (!ns_rsqe_valid(card->rsq.next)) in process_rsq()
1955 dequeue_rx(card, card->rsq.next); in process_rsq()
1956 ns_rsqe_init(card->rsq.next); in process_rsq()
1957 previous = card->rsq.next; in process_rsq()
1958 if (card->rsq.next == card->rsq.last) in process_rsq()
1959 card->rsq.next = card->rsq.base; in process_rsq()
1961 card->rsq.next++; in process_rsq()
1962 } while (ns_rsqe_valid(card->rsq.next)); in process_rsq()
1963 writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH); in process_rsq()
1966 static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) in dequeue_rx() argument
1979 stat = readl(card->membase + STAT); in dequeue_rx()
1980 card->sbfqc = ns_stat_sfbqc_get(stat); in dequeue_rx()
1981 card->lbfqc = ns_stat_lfbqc_get(stat); in dequeue_rx()
1984 skb = idr_remove(&card->idr, id); in dequeue_rx()
1987 "nicstar%d: skb not found!\n", card->index); in dequeue_rx()
1990 dma_sync_single_for_cpu(&card->pcidev->dev, in dequeue_rx()
1995 dma_unmap_single(&card->pcidev->dev, in dequeue_rx()
2002 if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { in dequeue_rx()
2004 card->index, vpi, vci); in dequeue_rx()
2005 recycle_rx_buf(card, skb); in dequeue_rx()
2009 vc = &(card->vcmap[vpi << card->vcibits | vci]); in dequeue_rx()
2012 card->index, vpi, vci); in dequeue_rx()
2013 recycle_rx_buf(card, skb); in dequeue_rx()
2030 card->index); in dequeue_rx()
2037 card->index); in dequeue_rx()
2057 recycle_rx_buf(card, skb); in dequeue_rx()
2064 iovb = skb_dequeue(&(card->iovpool.queue)); in dequeue_rx()
2069 card->index); in dequeue_rx()
2071 recycle_rx_buf(card, skb); in dequeue_rx()
2075 } else if (--card->iovpool.count < card->iovnr.min) { in dequeue_rx()
2080 skb_queue_tail(&card->iovpool.queue, new_iovb); in dequeue_rx()
2081 card->iovpool.count++; in dequeue_rx()
2093 printk("nicstar%d: received too big AAL5 SDU.\n", card->index); in dequeue_rx()
2095 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, in dequeue_rx()
2112 card->index); in dequeue_rx()
2113 which_list(card, skb); in dequeue_rx()
2115 recycle_rx_buf(card, skb); in dequeue_rx()
2117 recycle_iov_buf(card, iovb); in dequeue_rx()
2125 card->index); in dequeue_rx()
2126 which_list(card, skb); in dequeue_rx()
2128 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, in dequeue_rx()
2131 recycle_iov_buf(card, iovb); in dequeue_rx()
2145 printk("nicstar%d: AAL5 CRC error", card->index); in dequeue_rx()
2151 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, in dequeue_rx()
2154 recycle_iov_buf(card, iovb); in dequeue_rx()
2163 push_rxbufs(card, skb); in dequeue_rx()
2167 dequeue_sm_buf(card, skb); in dequeue_rx()
2181 push_rxbufs(card, sb); in dequeue_rx()
2185 dequeue_sm_buf(card, sb); in dequeue_rx()
2192 push_rxbufs(card, skb); in dequeue_rx()
2197 push_rxbufs(card, skb); in dequeue_rx()
2200 dequeue_lg_buf(card, skb); in dequeue_rx()
2211 push_rxbufs(card, sb); in dequeue_rx()
2221 hb = skb_dequeue(&(card->hbpool.queue)); in dequeue_rx()
2228 card->index); in dequeue_rx()
2230 recycle_iovec_rx_bufs(card, in dequeue_rx()
2235 recycle_iov_buf(card, iovb); in dequeue_rx()
2237 } else if (card->hbpool.count < card->hbnr.min) { in dequeue_rx()
2242 skb_queue_tail(&card->hbpool. in dequeue_rx()
2244 card->hbpool.count++; in dequeue_rx()
2248 } else if (--card->hbpool.count < card->hbnr.min) { in dequeue_rx()
2253 skb_queue_tail(&card->hbpool.queue, in dequeue_rx()
2255 card->hbpool.count++; in dequeue_rx()
2257 if (card->hbpool.count < card->hbnr.min) { in dequeue_rx()
2263 skb_queue_tail(&card->hbpool. in dequeue_rx()
2265 card->hbpool.count++; in dequeue_rx()
2273 recycle_iovec_rx_bufs(card, iov, in dequeue_rx()
2275 if (card->hbpool.count < card->hbnr.max) { in dequeue_rx()
2276 skb_queue_tail(&card->hbpool.queue, hb); in dequeue_rx()
2277 card->hbpool.count++; in dequeue_rx()
2290 push_rxbufs(card, sb); in dequeue_rx()
2303 push_rxbufs(card, lb); in dequeue_rx()
2309 card->index); in dequeue_rx()
2319 recycle_iov_buf(card, iovb); in dequeue_rx()
2324 static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) in recycle_rx_buf() argument
2328 card->index); in recycle_rx_buf()
2331 push_rxbufs(card, skb); in recycle_rx_buf()
2334 static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count) in recycle_iovec_rx_bufs() argument
2337 recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); in recycle_iovec_rx_bufs()
2340 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb) in recycle_iov_buf() argument
2342 if (card->iovpool.count < card->iovnr.max) { in recycle_iov_buf()
2343 skb_queue_tail(&card->iovpool.queue, iovb); in recycle_iov_buf()
2344 card->iovpool.count++; in recycle_iov_buf()
2349 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) in dequeue_sm_buf() argument
2351 skb_unlink(sb, &card->sbpool.queue); in dequeue_sm_buf()
2352 if (card->sbfqc < card->sbnr.init) { in dequeue_sm_buf()
2356 skb_queue_tail(&card->sbpool.queue, new_sb); in dequeue_sm_buf()
2358 push_rxbufs(card, new_sb); in dequeue_sm_buf()
2361 if (card->sbfqc < card->sbnr.init) in dequeue_sm_buf()
2366 skb_queue_tail(&card->sbpool.queue, new_sb); in dequeue_sm_buf()
2368 push_rxbufs(card, new_sb); in dequeue_sm_buf()
2373 static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) in dequeue_lg_buf() argument
2375 skb_unlink(lb, &card->lbpool.queue); in dequeue_lg_buf()
2376 if (card->lbfqc < card->lbnr.init) { in dequeue_lg_buf()
2380 skb_queue_tail(&card->lbpool.queue, new_lb); in dequeue_lg_buf()
2382 push_rxbufs(card, new_lb); in dequeue_lg_buf()
2385 if (card->lbfqc < card->lbnr.init) in dequeue_lg_buf()
2390 skb_queue_tail(&card->lbpool.queue, new_lb); in dequeue_lg_buf()
2392 push_rxbufs(card, new_lb); in dequeue_lg_buf()
2400 ns_dev *card; in ns_proc_read() local
2404 card = (ns_dev *) dev->dev_data; in ns_proc_read()
2405 stat = readl(card->membase + STAT); in ns_proc_read()
2410 ns_stat_sfbqc_get(stat), card->sbnr.min, in ns_proc_read()
2411 card->sbnr.init, card->sbnr.max); in ns_proc_read()
2414 ns_stat_lfbqc_get(stat), card->lbnr.min, in ns_proc_read()
2415 card->lbnr.init, card->lbnr.max); in ns_proc_read()
2418 card->hbpool.count, card->hbnr.min, in ns_proc_read()
2419 card->hbnr.init, card->hbnr.max); in ns_proc_read()
2422 card->iovpool.count, card->iovnr.min, in ns_proc_read()
2423 card->iovnr.init, card->iovnr.max); in ns_proc_read()
2427 sprintf(page, "Interrupt counter: %u \n", card->intcnt); in ns_proc_read()
2428 card->intcnt = 0; in ns_proc_read()
2435 if (card->max_pcr == ATM_25_PCR && !left--) { in ns_proc_read()
2440 while (CMD_BUSY(card)) ; in ns_proc_read()
2442 card->membase + CMD); in ns_proc_read()
2443 while (CMD_BUSY(card)) ; in ns_proc_read()
2444 phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; in ns_proc_read()
2455 if (card->tste2vc[left + 1] == NULL) in ns_proc_read()
2459 card->tste2vc[left + 1]->tx_vcc->vpi, in ns_proc_read()
2460 card->tste2vc[left + 1]->tx_vcc->vci); in ns_proc_read()
2468 ns_dev *card; in ns_ioctl() local
2473 card = dev->dev_data; in ns_ioctl()
2482 ns_stat_sfbqc_get(readl(card->membase + STAT)); in ns_ioctl()
2483 pl.level.min = card->sbnr.min; in ns_ioctl()
2484 pl.level.init = card->sbnr.init; in ns_ioctl()
2485 pl.level.max = card->sbnr.max; in ns_ioctl()
2490 ns_stat_lfbqc_get(readl(card->membase + STAT)); in ns_ioctl()
2491 pl.level.min = card->lbnr.min; in ns_ioctl()
2492 pl.level.init = card->lbnr.init; in ns_ioctl()
2493 pl.level.max = card->lbnr.max; in ns_ioctl()
2497 pl.count = card->hbpool.count; in ns_ioctl()
2498 pl.level.min = card->hbnr.min; in ns_ioctl()
2499 pl.level.init = card->hbnr.init; in ns_ioctl()
2500 pl.level.max = card->hbnr.max; in ns_ioctl()
2504 pl.count = card->iovpool.count; in ns_ioctl()
2505 pl.level.min = card->iovnr.min; in ns_ioctl()
2506 pl.level.init = card->iovnr.init; in ns_ioctl()
2507 pl.level.max = card->iovnr.max; in ns_ioctl()
2533 card->sbnr.min = pl.level.min; in ns_ioctl()
2534 card->sbnr.init = pl.level.init; in ns_ioctl()
2535 card->sbnr.max = pl.level.max; in ns_ioctl()
2541 card->lbnr.min = pl.level.min; in ns_ioctl()
2542 card->lbnr.init = pl.level.init; in ns_ioctl()
2543 card->lbnr.max = pl.level.max; in ns_ioctl()
2549 card->hbnr.min = pl.level.min; in ns_ioctl()
2550 card->hbnr.init = pl.level.init; in ns_ioctl()
2551 card->hbnr.max = pl.level.max; in ns_ioctl()
2557 card->iovnr.min = pl.level.min; in ns_ioctl()
2558 card->iovnr.init = pl.level.init; in ns_ioctl()
2559 card->iovnr.max = pl.level.max; in ns_ioctl()
2574 while (card->sbfqc < card->sbnr.init) { in ns_ioctl()
2581 skb_queue_tail(&card->sbpool.queue, sb); in ns_ioctl()
2583 push_rxbufs(card, sb); in ns_ioctl()
2588 while (card->lbfqc < card->lbnr.init) { in ns_ioctl()
2595 skb_queue_tail(&card->lbpool.queue, lb); in ns_ioctl()
2597 push_rxbufs(card, lb); in ns_ioctl()
2602 while (card->hbpool.count > card->hbnr.init) { in ns_ioctl()
2605 spin_lock_irqsave(&card->int_lock, flags); in ns_ioctl()
2606 hb = skb_dequeue(&card->hbpool.queue); in ns_ioctl()
2607 card->hbpool.count--; in ns_ioctl()
2608 spin_unlock_irqrestore(&card->int_lock, flags); in ns_ioctl()
2612 card->index); in ns_ioctl()
2617 while (card->hbpool.count < card->hbnr.init) { in ns_ioctl()
2624 spin_lock_irqsave(&card->int_lock, flags); in ns_ioctl()
2625 skb_queue_tail(&card->hbpool.queue, hb); in ns_ioctl()
2626 card->hbpool.count++; in ns_ioctl()
2627 spin_unlock_irqrestore(&card->int_lock, flags); in ns_ioctl()
2632 while (card->iovpool.count > card->iovnr.init) { in ns_ioctl()
2635 spin_lock_irqsave(&card->int_lock, flags); in ns_ioctl()
2636 iovb = skb_dequeue(&card->iovpool.queue); in ns_ioctl()
2637 card->iovpool.count--; in ns_ioctl()
2638 spin_unlock_irqrestore(&card->int_lock, flags); in ns_ioctl()
2642 card->index); in ns_ioctl()
2647 while (card->iovpool.count < card->iovnr.init) { in ns_ioctl()
2654 spin_lock_irqsave(&card->int_lock, flags); in ns_ioctl()
2655 skb_queue_tail(&card->iovpool.queue, iovb); in ns_ioctl()
2656 card->iovpool.count++; in ns_ioctl()
2657 spin_unlock_irqrestore(&card->int_lock, flags); in ns_ioctl()
2671 printk("nicstar%d: %s == NULL \n", card->index, in ns_ioctl()
2679 static void which_list(ns_dev * card, struct sk_buff *skb) in which_list() argument
2688 ns_dev *card; in ns_poll() local
2694 card = cards[i]; in ns_poll()
2695 if (!spin_trylock_irqsave(&card->int_lock, flags)) { in ns_poll()
2701 stat_r = readl(card->membase + STAT); in ns_poll()
2707 process_tsq(card); in ns_poll()
2708 process_rsq(card); in ns_poll()
2710 writel(stat_w, card->membase + STAT); in ns_poll()
2711 spin_unlock_irqrestore(&card->int_lock, flags); in ns_poll()
2720 ns_dev *card; in ns_phy_put() local
2723 card = dev->dev_data; in ns_phy_put()
2724 spin_lock_irqsave(&card->res_lock, flags); in ns_phy_put()
2725 while (CMD_BUSY(card)) ; in ns_phy_put()
2726 writel((u32) value, card->membase + DR0); in ns_phy_put()
2728 card->membase + CMD); in ns_phy_put()
2729 spin_unlock_irqrestore(&card->res_lock, flags); in ns_phy_put()
2734 ns_dev *card; in ns_phy_get() local
2738 card = dev->dev_data; in ns_phy_get()
2739 spin_lock_irqsave(&card->res_lock, flags); in ns_phy_get()
2740 while (CMD_BUSY(card)) ; in ns_phy_get()
2742 card->membase + CMD); in ns_phy_get()
2743 while (CMD_BUSY(card)) ; in ns_phy_get()
2744 data = readl(card->membase + DR0) & 0x000000FF; in ns_phy_get()
2745 spin_unlock_irqrestore(&card->res_lock, flags); in ns_phy_get()