Lines Matching +full:auto +full:- +full:string +full:- +full:detection

3 	Written/copyright 1993-1998 by Donald Becker.
11 with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
19 - alignment problem with 1.3.* kernel and some minor changes.
21 - added support for Linux/Alpha, but removed most of it, because
23 - added hook for the 32bit lance driver
24 - added PCnetPCI II (79C970A) to chip table
26 - hopefully fix above so Linux/Alpha can use ISA cards too.
27 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
28 v1.12 10/27/97 Module support -djb
29 v1.14 2/3/98 Module support modified, made PCI support optional -djb
30 v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
33 -- Mika Kuoppala <miku@iki.fi>
36 the 2.1 version of the old driver - Alan Cox
39 Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
41 Reworked detection, added support for Racal InterLan EtherBlaster cards
42 Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
49 #include <linux/string.h>
101 This device driver is designed for the AMD 79C960, the "PCnet-ISA
102 single-chip ethernet controller for ISA". This chip is used in a wide
109 II. Board-specific settings
112 bus-master mode, rather than in shared memory mode. (Only older designs
113 have on-board buffer memory needed to support the slower shared memory mode.)
118 After the board is found it generates a DMA-timeout interrupt and uses
120 of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
124 The HP-J2405A board is an exception: with this board it is easy to read the
125 EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
126 _know_ the base address -- that field is for writing the EEPROM.)
139 of entries makes it more difficult to achieve back-to-back packet transmission
141 of receiving back-to-back minimum-sized packets.)
144 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
146 allocating full-sized buffers "just in case", at the expense of a
147 memory-to-memory data copy for each packet received. For most systems this
150 the buffers are only used when needed as low-memory bounce buffers.
161 As mentioned before, low-memory "bounce-buffers" are used when needed.
164 The driver runs as two independent, single-threaded flows of control. One
165 is the send-packet routine, which enforces single-threaded use by the
166 dev->tbusy flag. The other thread is the interrupt handler, which is single
169 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
172 the 'lp->tx_full' flag.
175 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
178 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
186 This is a compile-time option for efficiency.
194 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
198 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
227 u16 mode; /* Pre-set mode (reg. 15) */
236 /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
241 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
243 /* The addresses of receive-in-place skbuffs. */
246 /* Tx low-memory "bounce buffer" address. */
262 These are from the datasheets -- in real life the '970 version
274 {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
277 {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
285 {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
296 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
328 MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
340 return -EPERM; in init_module()
345 dev->irq = irq[this_dev]; in init_module()
346 dev->base_addr = io[this_dev]; in init_module()
347 dev->dma = dma[this_dev]; in init_module()
357 return -ENXIO; in init_module()
362 struct lance_private *lp = dev->ml_priv; in cleanup_card()
363 if (dev->dma != 4) in cleanup_card()
364 free_dma(dev->dma); in cleanup_card()
365 release_region(dev->base_addr, LANCE_TOTAL_SIZE); in cleanup_card()
366 kfree(lp->tx_bounce_buffs); in cleanup_card()
367 kfree((void*)lp->rx_buffs); in cleanup_card()
389 board probes now that kmalloc() can allocate ISA DMA-able regions.
403 "lance-probe"); in do_lance_probe()
422 struct lance_private *lp = dev->ml_priv; in do_lance_probe()
423 int ver = lp->chip_version; in do_lance_probe()
425 r->name = chip_table[ver].name; in do_lance_probe()
432 return -ENODEV; in do_lance_probe()
442 return ERR_PTR(-ENODEV); in lance_probe()
444 sprintf(dev->name, "eth%d", unit); in lance_probe()
471 unsigned long dma_channels; /* Mark spuriously-busy DMA channels */ in lance_probe1()
476 int hp_builtin = 0; /* HP on-board ethernet. */ in lance_probe1()
479 int err = -ENOMEM; in lance_probe1()
483 Check for HP's on-board ethernet by looking for 'HP' in the BIOS. in lance_probe1()
489 return -ENOMEM; in lance_probe1()
493 /* We can have boards other than the built-in! Verify this is on-board. */ in lance_probe1()
499 /* We also recognize the HP Vectra on-board here, but check below. */ in lance_probe1()
506 /* The Un-Reset needed is only needed for the real NE2100, and will in lance_probe1()
513 return -ENODEV; in lance_probe1()
526 return -ENODEV; in lance_probe1()
535 a ISA DMA-able region. */ in lance_probe1()
537 printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr); in lance_probe1()
542 dev->dev_addr[i] = inb(ioaddr + i); in lance_probe1()
543 printk("%pM", dev->dev_addr); in lance_probe1()
545 dev->base_addr = ioaddr; in lance_probe1()
550 return -ENOMEM; in lance_probe1()
552 dev->ml_priv = lp; in lance_probe1()
553 lp->name = chipname; in lance_probe1()
554 lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ, in lance_probe1()
556 if (!lp->rx_buffs) in lance_probe1()
559 lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ, in lance_probe1()
561 if (!lp->tx_bounce_buffs) in lance_probe1()
564 lp->tx_bounce_buffs = NULL; in lance_probe1()
566 lp->chip_version = lance_version; in lance_probe1()
567 spin_lock_init(&lp->devlock); in lance_probe1()
569 lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */ in lance_probe1()
571 lp->init_block.phys_addr[i] = dev->dev_addr[i]; in lance_probe1()
572 lp->init_block.filter[0] = 0x00000000; in lance_probe1()
573 lp->init_block.filter[1] = 0x00000000; in lance_probe1()
574 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; in lance_probe1()
575 lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS; in lance_probe1()
579 outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA); in lance_probe1()
582 outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA); in lance_probe1()
587 dev->dma = 4; /* Native bus-master, no DMA channel needed. */ in lance_probe1()
588 dev->irq = irq; in lance_probe1()
593 dev->dma = dma_tbl[(port_val >> 4) & 3]; in lance_probe1()
594 dev->irq = irq_tbl[(port_val >> 2) & 3]; in lance_probe1()
595 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma); in lance_probe1()
600 dev->dma = dma_tbl[(reset_val >> 2) & 3]; in lance_probe1()
601 dev->irq = irq_tbl[(reset_val >> 4) & 7]; in lance_probe1()
602 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma); in lance_probe1()
603 } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */ in lance_probe1()
607 dev->dma = bus_info & 0x07; in lance_probe1()
608 dev->irq = (bus_info >> 4) & 0x0F; in lance_probe1()
611 if (dev->mem_start & 0x07) in lance_probe1()
612 dev->dma = dev->mem_start & 0x07; in lance_probe1()
615 if (dev->dma == 0) { in lance_probe1()
617 stuck DMA channels in the DMA detection below. */ in lance_probe1()
621 err = -ENODEV; in lance_probe1()
622 if (dev->irq >= 2) in lance_probe1()
623 printk(" assigned IRQ %d", dev->irq); in lance_probe1()
624 else if (lance_version != 0) { /* 7990 boards need DMA detection first. */ in lance_probe1()
627 /* To auto-IRQ we enable the initialization-done and DMA error in lance_probe1()
636 dev->irq = probe_irq_off(irq_mask); in lance_probe1()
637 if (dev->irq) in lance_probe1()
638 printk(", probed IRQ %d", dev->irq); in lance_probe1()
647 dev->dma = 4; in lance_probe1()
650 if (dev->dma == 4) { in lance_probe1()
652 } else if (dev->dma) { in lance_probe1()
653 if (request_dma(dev->dma, chipname)) { in lance_probe1()
654 printk("DMA %d allocation failed.\n", dev->dma); in lance_probe1()
657 printk(", assigned DMA %d.\n", dev->dma); in lance_probe1()
658 } else { /* OK, we have to auto-DMA. */ in lance_probe1()
679 for (boguscnt = 100; boguscnt > 0; --boguscnt) in lance_probe1()
683 dev->dma = dma; in lance_probe1()
684 printk(", DMA %d.\n", dev->dma); in lance_probe1()
694 printk("DMA detection failed.\n"); in lance_probe1()
699 if (lance_version == 0 && dev->irq == 0) { in lance_probe1()
700 /* We may auto-IRQ now that we have a DMA channel. */ in lance_probe1()
708 dev->irq = probe_irq_off(irq_mask); in lance_probe1()
709 if (dev->irq == 0) { in lance_probe1()
713 printk(" Auto-IRQ detected IRQ%d.\n", dev->irq); in lance_probe1()
716 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) { in lance_probe1()
717 /* Turn on auto-select of media (10baseT or BNC) so that the user in lance_probe1()
727 /* The LANCE-specific entries in the device structure. */ in lance_probe1()
728 dev->netdev_ops = &lance_netdev_ops; in lance_probe1()
729 dev->watchdog_timeo = TX_TIMEOUT; in lance_probe1()
736 if (dev->dma != 4) in lance_probe1()
737 free_dma(dev->dma); in lance_probe1()
739 kfree(lp->tx_bounce_buffs); in lance_probe1()
741 kfree((void*)lp->rx_buffs); in lance_probe1()
751 struct lance_private *lp = dev->ml_priv; in lance_open()
752 int ioaddr = dev->base_addr; in lance_open()
755 if (dev->irq == 0 || in lance_open()
756 request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) { in lance_open()
757 return -EAGAIN; in lance_open()
766 /* The DMA controller is used as a no-operation slave, "cascade mode". */ in lance_open()
767 if (dev->dma != 4) { in lance_open()
769 enable_dma(dev->dma); in lance_open()
770 set_dma_mode(dev->dma, DMA_MODE_CASCADE); in lance_open()
774 /* Un-Reset the LANCE, needed only for the NE2100. */ in lance_open()
775 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET) in lance_open()
778 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) { in lance_open()
779 /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */ in lance_open()
787 dev->name, dev->irq, dev->dma, in lance_open()
788 (u32) isa_virt_to_bus(lp->tx_ring), in lance_open()
789 (u32) isa_virt_to_bus(lp->rx_ring), in lance_open()
790 (u32) isa_virt_to_bus(&lp->init_block)); in lance_open()
793 /* Re-initialize the LANCE, and start it when done. */ in lance_open()
795 outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA); in lance_open()
797 outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA); in lance_open()
819 dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA)); in lance_open()
826 etc.). Modern LANCE variants always reload their ring-buffer
830 sent (in effect, drop the packets on the floor) - the higher-level
832 these skbs to a temp list and then actually re-Tx them after
839 struct lance_private *lp = dev->ml_priv; in lance_purge_ring()
844 struct sk_buff *skb = lp->rx_skbuff[i]; in lance_purge_ring()
845 lp->rx_skbuff[i] = NULL; in lance_purge_ring()
846 lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */ in lance_purge_ring()
851 if (lp->tx_skbuff[i]) { in lance_purge_ring()
852 dev_kfree_skb_any(lp->tx_skbuff[i]); in lance_purge_ring()
853 lp->tx_skbuff[i] = NULL; in lance_purge_ring()
863 struct lance_private *lp = dev->ml_priv; in lance_init_ring()
866 lp->cur_rx = lp->cur_tx = 0; in lance_init_ring()
867 lp->dirty_rx = lp->dirty_tx = 0; in lance_init_ring()
874 lp->rx_skbuff[i] = skb; in lance_init_ring()
876 rx_buff = skb->data; in lance_init_ring()
880 lp->rx_ring[i].base = 0; in lance_init_ring()
882 lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000; in lance_init_ring()
883 lp->rx_ring[i].buf_length = -PKT_BUF_SZ; in lance_init_ring()
888 lp->tx_skbuff[i] = NULL; in lance_init_ring()
889 lp->tx_ring[i].base = 0; in lance_init_ring()
892 lp->init_block.mode = 0x0000; in lance_init_ring()
894 lp->init_block.phys_addr[i] = dev->dev_addr[i]; in lance_init_ring()
895 lp->init_block.filter[0] = 0x00000000; in lance_init_ring()
896 lp->init_block.filter[1] = 0x00000000; in lance_init_ring()
897 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; in lance_init_ring()
898 lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS; in lance_init_ring()
904 struct lance_private *lp = dev->ml_priv; in lance_restart()
907 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) { in lance_restart()
911 outw(0x0000, dev->base_addr + LANCE_ADDR); in lance_restart()
912 outw(csr0_bits, dev->base_addr + LANCE_DATA); in lance_restart()
918 struct lance_private *lp = (struct lance_private *) dev->ml_priv; in lance_tx_timeout()
919 int ioaddr = dev->base_addr; in lance_tx_timeout()
923 dev->name, inw (ioaddr + LANCE_DATA)); in lance_tx_timeout()
925 dev->stats.tx_errors++; in lance_tx_timeout()
930 lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "", in lance_tx_timeout()
931 lp->cur_rx); in lance_tx_timeout()
934 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length, in lance_tx_timeout()
935 lp->rx_ring[i].msg_length); in lance_tx_timeout()
938 lp->tx_ring[i].base, -lp->tx_ring[i].length, in lance_tx_timeout()
939 lp->tx_ring[i].misc); in lance_tx_timeout()
953 struct lance_private *lp = dev->ml_priv; in lance_start_xmit()
954 int ioaddr = dev->base_addr; in lance_start_xmit()
958 spin_lock_irqsave(&lp->devlock, flags); in lance_start_xmit()
962 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name, in lance_start_xmit()
970 entry = lp->cur_tx & TX_RING_MOD_MASK; in lance_start_xmit()
976 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) { in lance_start_xmit()
977 if (skb->len < ETH_ZLEN) { in lance_start_xmit()
980 lp->tx_ring[entry].length = -ETH_ZLEN; in lance_start_xmit()
983 lp->tx_ring[entry].length = -skb->len; in lance_start_xmit()
985 lp->tx_ring[entry].length = -skb->len; in lance_start_xmit()
987 lp->tx_ring[entry].misc = 0x0000; in lance_start_xmit()
989 dev->stats.tx_bytes += skb->len; in lance_start_xmit()
991 /* If any part of this buffer is >16M we must copy it to a low-memory in lance_start_xmit()
993 if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) { in lance_start_xmit()
995 printk("%s: bouncing a high-memory packet (%#x).\n", in lance_start_xmit()
996 dev->name, (u32)isa_virt_to_bus(skb->data)); in lance_start_xmit()
997 skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); in lance_start_xmit()
998 lp->tx_ring[entry].base = in lance_start_xmit()
999 ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; in lance_start_xmit()
1002 lp->tx_skbuff[entry] = skb; in lance_start_xmit()
1003 lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000; in lance_start_xmit()
1005 lp->cur_tx++; in lance_start_xmit()
1011 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) in lance_start_xmit()
1015 spin_unlock_irqrestore(&lp->devlock, flags); in lance_start_xmit()
1027 ioaddr = dev->base_addr; in lance_interrupt()
1028 lp = dev->ml_priv; in lance_interrupt()
1030 spin_lock (&lp->devlock); in lance_interrupt()
1032 outw(0x00, dev->base_addr + LANCE_ADDR); in lance_interrupt()
1033 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 && in lance_interrupt()
1034 --boguscnt >= 0) { in lance_interrupt()
1036 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA); in lance_interrupt()
1042 dev->name, csr0, inw(dev->base_addr + LANCE_DATA)); in lance_interrupt()
1047 if (csr0 & 0x0200) { /* Tx-done interrupt */ in lance_interrupt()
1048 int dirty_tx = lp->dirty_tx; in lance_interrupt()
1050 while (dirty_tx < lp->cur_tx) { in lance_interrupt()
1052 int status = lp->tx_ring[entry].base; in lance_interrupt()
1057 lp->tx_ring[entry].base = 0; in lance_interrupt()
1061 int err_status = lp->tx_ring[entry].misc; in lance_interrupt()
1062 dev->stats.tx_errors++; in lance_interrupt()
1064 dev->stats.tx_aborted_errors++; in lance_interrupt()
1066 dev->stats.tx_carrier_errors++; in lance_interrupt()
1068 dev->stats.tx_window_errors++; in lance_interrupt()
1071 dev->stats.tx_fifo_errors++; in lance_interrupt()
1074 dev->name, csr0); in lance_interrupt()
1080 dev->stats.collisions++; in lance_interrupt()
1081 dev->stats.tx_packets++; in lance_interrupt()
1084 /* We must free the original skb if it's not a data-only copy in lance_interrupt()
1086 if (lp->tx_skbuff[entry]) { in lance_interrupt()
1087 dev_consume_skb_irq(lp->tx_skbuff[entry]); in lance_interrupt()
1088 lp->tx_skbuff[entry] = NULL; in lance_interrupt()
1094 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt()
1095 printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n", in lance_interrupt()
1096 dirty_tx, lp->cur_tx, in lance_interrupt()
1104 dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) in lance_interrupt()
1107 lp->dirty_tx = dirty_tx; in lance_interrupt()
1112 dev->stats.tx_errors++; /* Tx babble. */ in lance_interrupt()
1114 dev->stats.rx_errors++; /* Missed a Rx frame. */ in lance_interrupt()
1117 dev->name, csr0); in lance_interrupt()
1124 outw(0x0000, dev->base_addr + LANCE_ADDR); in lance_interrupt()
1125 outw(0x0004, dev->base_addr + LANCE_DATA); in lance_interrupt()
1131 outw(0x0000, dev->base_addr + LANCE_ADDR); in lance_interrupt()
1132 outw(0x7940, dev->base_addr + LANCE_DATA); in lance_interrupt()
1136 dev->name, inw(ioaddr + LANCE_ADDR), in lance_interrupt()
1137 inw(dev->base_addr + LANCE_DATA)); in lance_interrupt()
1139 spin_unlock (&lp->devlock); in lance_interrupt()
1146 struct lance_private *lp = dev->ml_priv; in lance_rx()
1147 int entry = lp->cur_rx & RX_RING_MOD_MASK; in lance_rx()
1151 while (lp->rx_ring[entry].base >= 0) { in lance_rx()
1152 int status = lp->rx_ring[entry].base >> 24; in lance_rx()
1156 <murf@perftech.com> to Russ Nelson: Even with full-sized in lance_rx()
1160 dev->stats.rx_errors++; /* end of a packet.*/ in lance_rx()
1162 dev->stats.rx_frame_errors++; in lance_rx()
1164 dev->stats.rx_over_errors++; in lance_rx()
1166 dev->stats.rx_crc_errors++; in lance_rx()
1168 dev->stats.rx_fifo_errors++; in lance_rx()
1169 lp->rx_ring[entry].base &= 0x03ffffff; in lance_rx()
1174 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4; in lance_rx()
1179 printk("%s: Runt packet!\n",dev->name); in lance_rx()
1180 dev->stats.rx_errors++; in lance_rx()
1187 printk("%s: Memory squeeze, deferring packet.\n", dev->name); in lance_rx()
1189 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0) in lance_rx()
1192 if (i > RX_RING_SIZE -2) in lance_rx()
1194 dev->stats.rx_dropped++; in lance_rx()
1195 lp->rx_ring[entry].base |= 0x80000000; in lance_rx()
1196 lp->cur_rx++; in lance_rx()
1203 (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)), in lance_rx()
1205 skb->protocol=eth_type_trans(skb,dev); in lance_rx()
1207 dev->stats.rx_packets++; in lance_rx()
1208 dev->stats.rx_bytes += pkt_len; in lance_rx()
1213 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ; in lance_rx()
1214 lp->rx_ring[entry].base |= 0x80000000; in lance_rx()
1215 entry = (++lp->cur_rx) & RX_RING_MOD_MASK; in lance_rx()
1219 we should free one and mark stats->rx_dropped++. */ in lance_rx()
1227 int ioaddr = dev->base_addr; in lance_close()
1228 struct lance_private *lp = dev->ml_priv; in lance_close()
1232 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { in lance_close()
1234 dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); in lance_close()
1240 dev->name, inw(ioaddr+LANCE_DATA)); in lance_close()
1242 /* We stop the LANCE here -- it occasionally polls in lance_close()
1246 if (dev->dma != 4) in lance_close()
1249 disable_dma(dev->dma); in lance_close()
1252 free_irq(dev->irq, dev); in lance_close()
1261 struct lance_private *lp = dev->ml_priv; in lance_get_stats()
1263 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { in lance_get_stats()
1264 short ioaddr = dev->base_addr; in lance_get_stats()
1268 spin_lock_irqsave(&lp->devlock, flags); in lance_get_stats()
1271 dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); in lance_get_stats()
1273 spin_unlock_irqrestore(&lp->devlock, flags); in lance_get_stats()
1276 return &dev->stats; in lance_get_stats()
1284 short ioaddr = dev->base_addr; in set_multicast_list()
1289 if (dev->flags&IFF_PROMISC) { in set_multicast_list()
1296 if(dev->flags&IFF_ALLMULTI) in set_multicast_list()
1298 /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */ in set_multicast_list()
1299 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table)); in set_multicast_list()