xref: /linux/drivers/net/ethernet/adaptec/starfire.c (revision c771600c6af14749609b49565ffb4cac2959710d)
1  /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
2  /*
3  	Written 1998-2000 by Donald Becker.
4  
5  	Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6  	send all bug reports to me, and not to Donald Becker, as this code
7  	has been heavily modified from Donald's original version.
8  
9  	This software may be used and distributed according to the terms of
10  	the GNU General Public License (GPL), incorporated herein by reference.
11  	Drivers based on or derived from this code fall under the GPL and must
12  	retain the authorship, copyright and license notice.  This file is not
13  	a complete program and may only be used when the entire operating
14  	system is licensed under the GPL.
15  
16  	The information below comes from Donald Becker's original driver:
17  
18  	The author may be reached as becker@scyld.com, or C/O
19  	Scyld Computing Corporation
20  	410 Severn Ave., Suite 210
21  	Annapolis MD 21403
22  
23  	Support and updates available at
24  	http://www.scyld.com/network/starfire.html
25  	[link no longer provides useful info -jgarzik]
26  
27  */
28  
29  #define DRV_NAME	"starfire"
30  
31  #include <linux/interrupt.h>
32  #include <linux/module.h>
33  #include <linux/kernel.h>
34  #include <linux/pci.h>
35  #include <linux/netdevice.h>
36  #include <linux/etherdevice.h>
37  #include <linux/init.h>
38  #include <linux/delay.h>
39  #include <linux/crc32.h>
40  #include <linux/ethtool.h>
41  #include <linux/mii.h>
42  #include <linux/if_vlan.h>
43  #include <linux/mm.h>
44  #include <linux/firmware.h>
45  #include <asm/processor.h>		/* Processor type for cache alignment. */
46  #include <linux/uaccess.h>
47  #include <asm/io.h>
48  
49  /*
50   * The current frame processor firmware fails to checksum a fragment
51   * of length 1. If and when this is fixed, the #define below can be removed.
52   */
53  #define HAS_BROKEN_FIRMWARE
54  
55  /*
56   * If using the broken firmware, data must be padded to the next 32-bit boundary.
57   */
58  #ifdef HAS_BROKEN_FIRMWARE
59  #define PADDING_MASK 3
60  #endif
61  
62  /*
63   * Define this if using the driver with the zero-copy patch
64   */
65  #define ZEROCOPY
66  
67  #if IS_ENABLED(CONFIG_VLAN_8021Q)
68  #define VLAN_SUPPORT
69  #endif
70  
71  /* The user-configurable values.
72     These may be modified when a driver module is loaded.*/
73  
74  /* Used for tuning interrupt latency vs. overhead. */
75  static int intr_latency;
76  static int small_frames;
77  
78  static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
79  static int max_interrupt_work = 20;
80  static int mtu;
81  /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
82     The Starfire has a 512 element hash table based on the Ethernet CRC. */
83  static const int multicast_filter_limit = 512;
84  /* Whether to do TCP/UDP checksums in hardware */
85  static int enable_hw_cksum = 1;
86  
87  #define PKT_BUF_SZ	1536		/* Size of each temporary Rx buffer.*/
88  /*
89   * Set the copy breakpoint for the copy-only-tiny-frames scheme.
90   * Setting to > 1518 effectively disables this feature.
91   *
92   * NOTE:
93   * The ia64 doesn't allow for unaligned loads even of integers being
94   * misaligned on a 2 byte boundary. Thus always force copying of
95   * packets as the starfire doesn't allow for misaligned DMAs ;-(
96   * 23/10/2000 - Jes
97   *
98   * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
99   * at least, having unaligned frames leads to a rather serious performance
100   * penalty. -Ion
101   */
102  #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
103  static int rx_copybreak = PKT_BUF_SZ;
104  #else
105  static int rx_copybreak /* = 0 */;
106  #endif
107  
108  /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
109  #ifdef __sparc__
110  #define DMA_BURST_SIZE 64
111  #else
112  #define DMA_BURST_SIZE 128
113  #endif
114  
115  /* Operational parameters that are set at compile time. */
116  
117  /* The "native" ring sizes are either 256 or 2048.
118     However in some modes a descriptor may be marked to wrap the ring earlier.
119  */
120  #define RX_RING_SIZE	256
121  #define TX_RING_SIZE	32
122  /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
123  #define DONE_Q_SIZE	1024
124  /* All queues must be aligned on a 256-byte boundary */
125  #define QUEUE_ALIGN	256
126  
127  #if RX_RING_SIZE > 256
128  #define RX_Q_ENTRIES Rx2048QEntries
129  #else
130  #define RX_Q_ENTRIES Rx256QEntries
131  #endif
132  
133  /* Operational parameters that usually are not changed. */
134  /* Time in jiffies before concluding the transmitter is hung. */
135  #define TX_TIMEOUT	(2 * HZ)
136  
137  #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
138  /* 64-bit dma_addr_t */
139  #define ADDR_64BITS	/* This chip uses 64 bit addresses. */
140  #define netdrv_addr_t __le64
141  #define cpu_to_dma(x) cpu_to_le64(x)
142  #define dma_to_cpu(x) le64_to_cpu(x)
143  #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
144  #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
145  #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
146  #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
147  #define RX_DESC_ADDR_SIZE RxDescAddr64bit
148  #else  /* 32-bit dma_addr_t */
149  #define netdrv_addr_t __le32
150  #define cpu_to_dma(x) cpu_to_le32(x)
151  #define dma_to_cpu(x) le32_to_cpu(x)
152  #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
153  #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
154  #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
155  #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
156  #define RX_DESC_ADDR_SIZE RxDescAddr32bit
157  #endif
158  
159  #define skb_first_frag_len(skb)	skb_headlen(skb)
160  #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
161  
162  /* Firmware names */
163  #define FIRMWARE_RX	"adaptec/starfire_rx.bin"
164  #define FIRMWARE_TX	"adaptec/starfire_tx.bin"
165  
166  MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
167  MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
168  MODULE_LICENSE("GPL");
169  MODULE_FIRMWARE(FIRMWARE_RX);
170  MODULE_FIRMWARE(FIRMWARE_TX);
171  
172  module_param(max_interrupt_work, int, 0);
173  module_param(mtu, int, 0);
174  module_param(debug, int, 0);
175  module_param(rx_copybreak, int, 0);
176  module_param(intr_latency, int, 0);
177  module_param(small_frames, int, 0);
178  module_param(enable_hw_cksum, int, 0);
179  MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
180  MODULE_PARM_DESC(mtu, "MTU (all boards)");
181  MODULE_PARM_DESC(debug, "Debug level (0-6)");
182  MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
183  MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
184  MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
185  MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
186  
187  /*
188  				Theory of Operation
189  
190  I. Board Compatibility
191  
192  This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
193  
194  II. Board-specific settings
195  
196  III. Driver operation
197  
198  IIIa. Ring buffers
199  
200  The Starfire hardware uses multiple fixed-size descriptor queues/rings.  The
201  ring sizes are set fixed by the hardware, but may optionally be wrapped
202  earlier by the END bit in the descriptor.
203  This driver uses that hardware queue size for the Rx ring, where a large
204  number of entries has no ill effect beyond increases the potential backlog.
205  The Tx ring is wrapped with the END bit, since a large hardware Tx queue
206  disables the queue layer priority ordering and we have no mechanism to
207  utilize the hardware two-level priority queue.  When modifying the
208  RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
209  levels.
210  
211  IIIb/c. Transmit/Receive Structure
212  
213  See the Adaptec manual for the many possible structures, and options for
214  each structure.  There are far too many to document all of them here.
215  
216  For transmit this driver uses type 0/1 transmit descriptors (depending
217  on the 32/64 bitness of the architecture), and relies on automatic
218  minimum-length padding.  It does not use the completion queue
219  consumer index, but instead checks for non-zero status entries.
220  
221  For receive this driver uses type 2/3 receive descriptors.  The driver
222  allocates full frame size skbuffs for the Rx ring buffers, so all frames
223  should fit in a single descriptor.  The driver does not use the completion
224  queue consumer index, but instead checks for non-zero status entries.
225  
226  When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
227  is allocated and the frame is copied to the new skbuff.  When the incoming
228  frame is larger, the skbuff is passed directly up the protocol stack.
229  Buffers consumed this way are replaced by newly allocated skbuffs in a later
230  phase of receive.
231  
232  A notable aspect of operation is that unaligned buffers are not permitted by
233  the Starfire hardware.  Thus the IP header at offset 14 in an ethernet frame
234  isn't longword aligned, which may cause problems on some machine
235  e.g. Alphas and IA64. For these architectures, the driver is forced to copy
236  the frame into a new skbuff unconditionally. Copied frames are put into the
237  skbuff at an offset of "+2", thus 16-byte aligning the IP header.
238  
239  IIId. Synchronization
240  
241  The driver runs as two independent, single-threaded flows of control.  One
242  is the send-packet routine, which enforces single-threaded use by the
243  dev->tbusy flag.  The other thread is the interrupt handler, which is single
244  threaded by the hardware and interrupt handling software.
245  
246  The send packet thread has partial control over the Tx ring and the netif_queue
247  status. If the number of free Tx slots in the ring falls below a certain number
248  (currently hardcoded to 4), it signals the upper layer to stop the queue.
249  
250  The interrupt handler has exclusive control over the Rx ring and records stats
251  from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
252  empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
253  number of free Tx slow is above the threshold, it signals the upper layer to
254  restart the queue.
255  
256  IV. Notes
257  
258  IVb. References
259  
260  The Adaptec Starfire manuals, available only from Adaptec.
261  http://www.scyld.com/expert/100mbps.html
262  http://www.scyld.com/expert/NWay.html
263  
264  IVc. Errata
265  
266  - StopOnPerr is broken, don't enable
267  - Hardware ethernet padding exposes random data, perform software padding
268    instead (unverified -- works correctly for all the hardware I have)
269  
270  */
271  
272  
273  
274  enum chip_capability_flags {CanHaveMII=1, };
275  
276  enum chipset {
277  	CH_6915 = 0,
278  };
279  
280  static const struct pci_device_id starfire_pci_tbl[] = {
281  	{ PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
282  	{ 0, }
283  };
284  MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
285  
286  /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
287  static const struct chip_info {
288  	const char *name;
289  	int drv_flags;
290  } netdrv_tbl[] = {
291  	{ "Adaptec Starfire 6915", CanHaveMII },
292  };
293  
294  
295  /* Offsets to the device registers.
296     Unlike software-only systems, device drivers interact with complex hardware.
297     It's not useful to define symbolic names for every register bit in the
298     device.  The name can only partially document the semantics and make
299     the driver longer and more difficult to read.
300     In general, only the important configuration values or bits changed
301     multiple times should be defined symbolically.
302  */
303  enum register_offsets {
304  	PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
305  	IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
306  	MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
307  	GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
308  	TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
309  	TxRingHiAddr=0x5009C,		/* 64 bit address extension. */
310  	TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
311  	TxThreshold=0x500B0,
312  	CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
313  	RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
314  	CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
315  	RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
316  	RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
317  	TxMode=0x55000, VlanType=0x55064,
318  	PerfFilterTable=0x56000, HashTable=0x56100,
319  	TxGfpMem=0x58000, RxGfpMem=0x5a000,
320  };
321  
322  /*
323   * Bits in the interrupt status/mask registers.
324   * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
325   * enables all the interrupt sources that are or'ed into those status bits.
326   */
327  enum intr_status_bits {
328  	IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
329  	IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
330  	IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
331  	IntrTxComplQLow=0x200000, IntrPCI=0x100000,
332  	IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
333  	IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
334  	IntrNormalSummary=0x8000, IntrTxDone=0x4000,
335  	IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
336  	IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
337  	IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
338  	IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
339  	IntrNoTxCsum=0x20, IntrTxBadID=0x10,
340  	IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
341  	IntrTxGfp=0x02, IntrPCIPad=0x01,
342  	/* not quite bits */
343  	IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
344  	IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
345  	IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
346  };
347  
348  /* Bits in the RxFilterMode register. */
349  enum rx_mode_bits {
350  	AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
351  	AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
352  	PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
353  	WakeupOnGFP=0x0800,
354  };
355  
356  /* Bits in the TxMode register */
357  enum tx_mode_bits {
358  	MiiSoftReset=0x8000, MIILoopback=0x4000,
359  	TxFlowEnable=0x0800, RxFlowEnable=0x0400,
360  	PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
361  };
362  
363  /* Bits in the TxDescCtrl register. */
364  enum tx_ctrl_bits {
365  	TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
366  	TxDescSpace128=0x30, TxDescSpace256=0x40,
367  	TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
368  	TxDescType3=0x03, TxDescType4=0x04,
369  	TxNoDMACompletion=0x08,
370  	TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
371  	TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
372  	TxDMABurstSizeShift=8,
373  };
374  
375  /* Bits in the RxDescQCtrl register. */
376  enum rx_ctrl_bits {
377  	RxBufferLenShift=16, RxMinDescrThreshShift=0,
378  	RxPrefetchMode=0x8000, RxVariableQ=0x2000,
379  	Rx2048QEntries=0x4000, Rx256QEntries=0,
380  	RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
381  	RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
382  	RxDescSpace4=0x000, RxDescSpace8=0x100,
383  	RxDescSpace16=0x200, RxDescSpace32=0x300,
384  	RxDescSpace64=0x400, RxDescSpace128=0x500,
385  	RxConsumerWrEn=0x80,
386  };
387  
388  /* Bits in the RxDMACtrl register. */
389  enum rx_dmactrl_bits {
390  	RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
391  	RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
392  	RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
393  	RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
394  	RxChecksumRejectTCPOnly=0x01000000,
395  	RxCompletionQ2Enable=0x800000,
396  	RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
397  	RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
398  	RxDMAQ2NonIP=0x400000,
399  	RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
400  	RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
401  	RxBurstSizeShift=0,
402  };
403  
404  /* Bits in the RxCompletionAddr register */
405  enum rx_compl_bits {
406  	RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
407  	RxComplProducerWrEn=0x40,
408  	RxComplType0=0x00, RxComplType1=0x10,
409  	RxComplType2=0x20, RxComplType3=0x30,
410  	RxComplThreshShift=0,
411  };
412  
413  /* Bits in the TxCompletionAddr register */
414  enum tx_compl_bits {
415  	TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
416  	TxComplProducerWrEn=0x40,
417  	TxComplIntrStatus=0x20,
418  	CommonQueueMode=0x10,
419  	TxComplThreshShift=0,
420  };
421  
422  /* Bits in the GenCtrl register */
423  enum gen_ctrl_bits {
424  	RxEnable=0x05, TxEnable=0x0a,
425  	RxGFPEnable=0x10, TxGFPEnable=0x20,
426  };
427  
428  /* Bits in the IntrTimerCtrl register */
429  enum intr_ctrl_bits {
430  	Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
431  	SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
432  	IntrLatencyMask=0x1f,
433  };
434  
435  /* The Rx and Tx buffer descriptors. */
436  struct starfire_rx_desc {
437  	netdrv_addr_t rxaddr;
438  };
439  enum rx_desc_bits {
440  	RxDescValid=1, RxDescEndRing=2,
441  };
442  
443  /* Completion queue entry. */
444  struct csum_rx_done_desc {
445  	__le32 status;			/* Low 16 bits is length. */
446  	__le16 csum;			/* Partial checksum */
447  	__le16 status2;
448  };
449  struct full_rx_done_desc {
450  	__le32 status;			/* Low 16 bits is length. */
451  	__le16 status3;
452  	__le16 status2;
453  	__le16 vlanid;
454  	__le16 csum;			/* partial checksum */
455  	__le32 timestamp;
456  };
457  /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
458  #ifdef VLAN_SUPPORT
459  typedef struct full_rx_done_desc rx_done_desc;
460  #define RxComplType RxComplType3
461  #else  /* not VLAN_SUPPORT */
462  typedef struct csum_rx_done_desc rx_done_desc;
463  #define RxComplType RxComplType2
464  #endif /* not VLAN_SUPPORT */
465  
466  enum rx_done_bits {
467  	RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
468  };
469  
470  /* Type 1 Tx descriptor. */
471  struct starfire_tx_desc_1 {
472  	__le32 status;			/* Upper bits are status, lower 16 length. */
473  	__le32 addr;
474  };
475  
476  /* Type 2 Tx descriptor. */
477  struct starfire_tx_desc_2 {
478  	__le32 status;			/* Upper bits are status, lower 16 length. */
479  	__le32 reserved;
480  	__le64 addr;
481  };
482  
483  #ifdef ADDR_64BITS
484  typedef struct starfire_tx_desc_2 starfire_tx_desc;
485  #define TX_DESC_TYPE TxDescType2
486  #else  /* not ADDR_64BITS */
487  typedef struct starfire_tx_desc_1 starfire_tx_desc;
488  #define TX_DESC_TYPE TxDescType1
489  #endif /* not ADDR_64BITS */
490  #define TX_DESC_SPACING TxDescSpaceUnlim
491  
492  enum tx_desc_bits {
493  	TxDescID=0xB0000000,
494  	TxCRCEn=0x01000000, TxDescIntr=0x08000000,
495  	TxRingWrap=0x04000000, TxCalTCP=0x02000000,
496  };
497  struct tx_done_desc {
498  	__le32 status;			/* timestamp, index. */
499  #if 0
500  	__le32 intrstatus;		/* interrupt status */
501  #endif
502  };
503  
504  struct rx_ring_info {
505  	struct sk_buff *skb;
506  	dma_addr_t mapping;
507  };
508  struct tx_ring_info {
509  	struct sk_buff *skb;
510  	dma_addr_t mapping;
511  	unsigned int used_slots;
512  };
513  
514  #define PHY_CNT		2
515  struct netdev_private {
516  	/* Descriptor rings first for alignment. */
517  	struct starfire_rx_desc *rx_ring;
518  	starfire_tx_desc *tx_ring;
519  	dma_addr_t rx_ring_dma;
520  	dma_addr_t tx_ring_dma;
521  	/* The addresses of rx/tx-in-place skbuffs. */
522  	struct rx_ring_info rx_info[RX_RING_SIZE];
523  	struct tx_ring_info tx_info[TX_RING_SIZE];
524  	/* Pointers to completion queues (full pages). */
525  	rx_done_desc *rx_done_q;
526  	dma_addr_t rx_done_q_dma;
527  	unsigned int rx_done;
528  	struct tx_done_desc *tx_done_q;
529  	dma_addr_t tx_done_q_dma;
530  	unsigned int tx_done;
531  	struct napi_struct napi;
532  	struct net_device *dev;
533  	struct pci_dev *pci_dev;
534  #ifdef VLAN_SUPPORT
535  	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
536  #endif
537  	void *queue_mem;
538  	dma_addr_t queue_mem_dma;
539  	size_t queue_mem_size;
540  
541  	/* Frequently used values: keep some adjacent for cache effect. */
542  	spinlock_t lock;
543  	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
544  	unsigned int cur_tx, dirty_tx, reap_tx;
545  	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
546  	/* These values keep track of the transceiver/media in use. */
547  	int speed100;			/* Set if speed == 100MBit. */
548  	u32 tx_mode;
549  	u32 intr_timer_ctrl;
550  	u8 tx_threshold;
551  	/* MII transceiver section. */
552  	struct mii_if_info mii_if;		/* MII lib hooks/info */
553  	int phy_cnt;			/* MII device addresses. */
554  	unsigned char phys[PHY_CNT];	/* MII device addresses. */
555  	void __iomem *base;
556  };
557  
558  
559  static int	mdio_read(struct net_device *dev, int phy_id, int location);
560  static void	mdio_write(struct net_device *dev, int phy_id, int location, int value);
561  static int	netdev_open(struct net_device *dev);
562  static void	check_duplex(struct net_device *dev);
563  static void	tx_timeout(struct net_device *dev, unsigned int txqueue);
564  static void	init_ring(struct net_device *dev);
565  static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
566  static irqreturn_t intr_handler(int irq, void *dev_instance);
567  static void	netdev_error(struct net_device *dev, int intr_status);
568  static int	__netdev_rx(struct net_device *dev, int *quota);
569  static int	netdev_poll(struct napi_struct *napi, int budget);
570  static void	refill_rx_ring(struct net_device *dev);
571  static void	netdev_error(struct net_device *dev, int intr_status);
572  static void	set_rx_mode(struct net_device *dev);
573  static struct net_device_stats *get_stats(struct net_device *dev);
574  static int	netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
575  static int	netdev_close(struct net_device *dev);
576  static void	netdev_media_change(struct net_device *dev);
577  static const struct ethtool_ops ethtool_ops;
578  
579  
580  #ifdef VLAN_SUPPORT
netdev_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)581  static int netdev_vlan_rx_add_vid(struct net_device *dev,
582  				  __be16 proto, u16 vid)
583  {
584  	struct netdev_private *np = netdev_priv(dev);
585  
586  	spin_lock(&np->lock);
587  	if (debug > 1)
588  		printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
589  	set_bit(vid, np->active_vlans);
590  	set_rx_mode(dev);
591  	spin_unlock(&np->lock);
592  
593  	return 0;
594  }
595  
netdev_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)596  static int netdev_vlan_rx_kill_vid(struct net_device *dev,
597  				   __be16 proto, u16 vid)
598  {
599  	struct netdev_private *np = netdev_priv(dev);
600  
601  	spin_lock(&np->lock);
602  	if (debug > 1)
603  		printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
604  	clear_bit(vid, np->active_vlans);
605  	set_rx_mode(dev);
606  	spin_unlock(&np->lock);
607  
608  	return 0;
609  }
610  #endif /* VLAN_SUPPORT */
611  
612  
613  static const struct net_device_ops netdev_ops = {
614  	.ndo_open		= netdev_open,
615  	.ndo_stop		= netdev_close,
616  	.ndo_start_xmit		= start_tx,
617  	.ndo_tx_timeout		= tx_timeout,
618  	.ndo_get_stats		= get_stats,
619  	.ndo_set_rx_mode	= set_rx_mode,
620  	.ndo_eth_ioctl		= netdev_ioctl,
621  	.ndo_set_mac_address	= eth_mac_addr,
622  	.ndo_validate_addr	= eth_validate_addr,
623  #ifdef VLAN_SUPPORT
624  	.ndo_vlan_rx_add_vid	= netdev_vlan_rx_add_vid,
625  	.ndo_vlan_rx_kill_vid	= netdev_vlan_rx_kill_vid,
626  #endif
627  };
628  
starfire_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)629  static int starfire_init_one(struct pci_dev *pdev,
630  			     const struct pci_device_id *ent)
631  {
632  	struct device *d = &pdev->dev;
633  	struct netdev_private *np;
634  	int i, irq, chip_idx = ent->driver_data;
635  	struct net_device *dev;
636  	u8 addr[ETH_ALEN];
637  	long ioaddr;
638  	void __iomem *base;
639  	int drv_flags, io_size;
640  	int boguscnt;
641  
642  	if (pci_enable_device (pdev))
643  		return -EIO;
644  
645  	ioaddr = pci_resource_start(pdev, 0);
646  	io_size = pci_resource_len(pdev, 0);
647  	if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
648  		dev_err(d, "no PCI MEM resources, aborting\n");
649  		return -ENODEV;
650  	}
651  
652  	dev = alloc_etherdev(sizeof(*np));
653  	if (!dev)
654  		return -ENOMEM;
655  
656  	SET_NETDEV_DEV(dev, &pdev->dev);
657  
658  	irq = pdev->irq;
659  
660  	if (pci_request_regions (pdev, DRV_NAME)) {
661  		dev_err(d, "cannot reserve PCI resources, aborting\n");
662  		goto err_out_free_netdev;
663  	}
664  
665  	base = ioremap(ioaddr, io_size);
666  	if (!base) {
667  		dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
668  			io_size, ioaddr);
669  		goto err_out_free_res;
670  	}
671  
672  	pci_set_master(pdev);
673  
674  	/* enable MWI -- it vastly improves Rx performance on sparc64 */
675  	pci_try_set_mwi(pdev);
676  
677  #ifdef ZEROCOPY
678  	/* Starfire can do TCP/UDP checksumming */
679  	if (enable_hw_cksum)
680  		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
681  #endif /* ZEROCOPY */
682  
683  #ifdef VLAN_SUPPORT
684  	dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
685  #endif /* VLAN_RX_KILL_VID */
686  #ifdef ADDR_64BITS
687  	dev->features |= NETIF_F_HIGHDMA;
688  #endif /* ADDR_64BITS */
689  
690  	/* Serial EEPROM reads are hidden by the hardware. */
691  	for (i = 0; i < 6; i++)
692  		addr[i] = readb(base + EEPROMCtrl + 20 - i);
693  	eth_hw_addr_set(dev, addr);
694  
695  #if ! defined(final_version) /* Dump the EEPROM contents during development. */
696  	if (debug > 4)
697  		for (i = 0; i < 0x20; i++)
698  			printk("%2.2x%s",
699  			       (unsigned int)readb(base + EEPROMCtrl + i),
700  			       i % 16 != 15 ? " " : "\n");
701  #endif
702  
703  	/* Issue soft reset */
704  	writel(MiiSoftReset, base + TxMode);
705  	udelay(1000);
706  	writel(0, base + TxMode);
707  
708  	/* Reset the chip to erase previous misconfiguration. */
709  	writel(1, base + PCIDeviceConfig);
710  	boguscnt = 1000;
711  	while (--boguscnt > 0) {
712  		udelay(10);
713  		if ((readl(base + PCIDeviceConfig) & 1) == 0)
714  			break;
715  	}
716  	if (boguscnt == 0)
717  		printk("%s: chipset reset never completed!\n", dev->name);
718  	/* wait a little longer */
719  	udelay(1000);
720  
721  	np = netdev_priv(dev);
722  	np->dev = dev;
723  	np->base = base;
724  	spin_lock_init(&np->lock);
725  	pci_set_drvdata(pdev, dev);
726  
727  	np->pci_dev = pdev;
728  
729  	np->mii_if.dev = dev;
730  	np->mii_if.mdio_read = mdio_read;
731  	np->mii_if.mdio_write = mdio_write;
732  	np->mii_if.phy_id_mask = 0x1f;
733  	np->mii_if.reg_num_mask = 0x1f;
734  
735  	drv_flags = netdrv_tbl[chip_idx].drv_flags;
736  
737  	np->speed100 = 1;
738  
739  	/* timer resolution is 128 * 0.8us */
740  	np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
741  		Timer10X | EnableIntrMasking;
742  
743  	if (small_frames > 0) {
744  		np->intr_timer_ctrl |= SmallFrameBypass;
745  		switch (small_frames) {
746  		case 1 ... 64:
747  			np->intr_timer_ctrl |= SmallFrame64;
748  			break;
749  		case 65 ... 128:
750  			np->intr_timer_ctrl |= SmallFrame128;
751  			break;
752  		case 129 ... 256:
753  			np->intr_timer_ctrl |= SmallFrame256;
754  			break;
755  		default:
756  			np->intr_timer_ctrl |= SmallFrame512;
757  			if (small_frames > 512)
758  				printk("Adjusting small_frames down to 512\n");
759  			break;
760  		}
761  	}
762  
763  	dev->netdev_ops = &netdev_ops;
764  	dev->watchdog_timeo = TX_TIMEOUT;
765  	dev->ethtool_ops = &ethtool_ops;
766  
767  	netif_napi_add_weight(dev, &np->napi, netdev_poll, max_interrupt_work);
768  
769  	if (mtu)
770  		dev->mtu = mtu;
771  
772  	if (register_netdev(dev))
773  		goto err_out_cleardev;
774  
775  	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
776  	       dev->name, netdrv_tbl[chip_idx].name, base,
777  	       dev->dev_addr, irq);
778  
779  	if (drv_flags & CanHaveMII) {
780  		int phy, phy_idx = 0;
781  		int mii_status;
782  		for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
783  			mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
784  			msleep(100);
785  			boguscnt = 1000;
786  			while (--boguscnt > 0)
787  				if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
788  					break;
789  			if (boguscnt == 0) {
790  				printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
791  				continue;
792  			}
793  			mii_status = mdio_read(dev, phy, MII_BMSR);
794  			if (mii_status != 0) {
795  				np->phys[phy_idx++] = phy;
796  				np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
797  				printk(KERN_INFO "%s: MII PHY found at address %d, status "
798  					   "%#4.4x advertising %#4.4x.\n",
799  					   dev->name, phy, mii_status, np->mii_if.advertising);
800  				/* there can be only one PHY on-board */
801  				break;
802  			}
803  		}
804  		np->phy_cnt = phy_idx;
805  		if (np->phy_cnt > 0)
806  			np->mii_if.phy_id = np->phys[0];
807  		else
808  			memset(&np->mii_if, 0, sizeof(np->mii_if));
809  	}
810  
811  	printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
812  	       dev->name, enable_hw_cksum ? "enabled" : "disabled");
813  	return 0;
814  
815  err_out_cleardev:
816  	iounmap(base);
817  err_out_free_res:
818  	pci_release_regions (pdev);
819  err_out_free_netdev:
820  	free_netdev(dev);
821  	return -ENODEV;
822  }
823  
824  
825  /* Read the MII Management Data I/O (MDIO) interfaces. */
mdio_read(struct net_device * dev,int phy_id,int location)826  static int mdio_read(struct net_device *dev, int phy_id, int location)
827  {
828  	struct netdev_private *np = netdev_priv(dev);
829  	void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
830  	int result, boguscnt=1000;
831  	/* ??? Should we add a busy-wait here? */
832  	do {
833  		result = readl(mdio_addr);
834  	} while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
835  	if (boguscnt == 0)
836  		return 0;
837  	if ((result & 0xffff) == 0xffff)
838  		return 0;
839  	return result & 0xffff;
840  }
841  
842  
mdio_write(struct net_device * dev,int phy_id,int location,int value)843  static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
844  {
845  	struct netdev_private *np = netdev_priv(dev);
846  	void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
847  	writel(value, mdio_addr);
848  	/* The busy-wait will occur before a read. */
849  }
850  
851  
netdev_open(struct net_device * dev)852  static int netdev_open(struct net_device *dev)
853  {
854  	const struct firmware *fw_rx, *fw_tx;
855  	const __be32 *fw_rx_data, *fw_tx_data;
856  	struct netdev_private *np = netdev_priv(dev);
857  	void __iomem *ioaddr = np->base;
858  	const int irq = np->pci_dev->irq;
859  	int i, retval;
860  	size_t tx_size, rx_size;
861  	size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
862  
863  	/* Do we ever need to reset the chip??? */
864  
865  	retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
866  	if (retval)
867  		return retval;
868  
869  	/* Disable the Rx and Tx, and reset the chip. */
870  	writel(0, ioaddr + GenCtrl);
871  	writel(1, ioaddr + PCIDeviceConfig);
872  	if (debug > 1)
873  		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
874  		       dev->name, irq);
875  
876  	/* Allocate the various queues. */
877  	if (!np->queue_mem) {
878  		tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
879  		rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
880  		tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
881  		rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
882  		np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
883  		np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
884  						   np->queue_mem_size,
885  						   &np->queue_mem_dma, GFP_ATOMIC);
886  		if (np->queue_mem == NULL) {
887  			free_irq(irq, dev);
888  			return -ENOMEM;
889  		}
890  
891  		np->tx_done_q     = np->queue_mem;
892  		np->tx_done_q_dma = np->queue_mem_dma;
893  		np->rx_done_q     = (void *) np->tx_done_q + tx_done_q_size;
894  		np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
895  		np->tx_ring       = (void *) np->rx_done_q + rx_done_q_size;
896  		np->tx_ring_dma   = np->rx_done_q_dma + rx_done_q_size;
897  		np->rx_ring       = (void *) np->tx_ring + tx_ring_size;
898  		np->rx_ring_dma   = np->tx_ring_dma + tx_ring_size;
899  	}
900  
901  	/* Start with no carrier, it gets adjusted later */
902  	netif_carrier_off(dev);
903  	init_ring(dev);
904  	/* Set the size of the Rx buffers. */
905  	writel((np->rx_buf_sz << RxBufferLenShift) |
906  	       (0 << RxMinDescrThreshShift) |
907  	       RxPrefetchMode | RxVariableQ |
908  	       RX_Q_ENTRIES |
909  	       RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
910  	       RxDescSpace4,
911  	       ioaddr + RxDescQCtrl);
912  
913  	/* Set up the Rx DMA controller. */
914  	writel(RxChecksumIgnore |
915  	       (0 << RxEarlyIntThreshShift) |
916  	       (6 << RxHighPrioThreshShift) |
917  	       ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
918  	       ioaddr + RxDMACtrl);
919  
920  	/* Set Tx descriptor */
921  	writel((2 << TxHiPriFIFOThreshShift) |
922  	       (0 << TxPadLenShift) |
923  	       ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
924  	       TX_DESC_Q_ADDR_SIZE |
925  	       TX_DESC_SPACING | TX_DESC_TYPE,
926  	       ioaddr + TxDescCtrl);
927  
928  	writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
929  	writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
930  	writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
931  	writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
932  	writel(np->tx_ring_dma, ioaddr + TxRingPtr);
933  
934  	writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
935  	writel(np->rx_done_q_dma |
936  	       RxComplType |
937  	       (0 << RxComplThreshShift),
938  	       ioaddr + RxCompletionAddr);
939  
940  	if (debug > 1)
941  		printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
942  
943  	/* Fill both the Tx SA register and the Rx perfect filter. */
944  	for (i = 0; i < 6; i++)
945  		writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
946  	/* The first entry is special because it bypasses the VLAN filter.
947  	   Don't use it. */
948  	writew(0, ioaddr + PerfFilterTable);
949  	writew(0, ioaddr + PerfFilterTable + 4);
950  	writew(0, ioaddr + PerfFilterTable + 8);
951  	for (i = 1; i < 16; i++) {
952  		const __be16 *eaddrs = (const __be16 *)dev->dev_addr;
953  		void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
954  		writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
955  		writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
956  		writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
957  	}
958  
959  	/* Initialize other registers. */
960  	/* Configure the PCI bus bursts and FIFO thresholds. */
961  	np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable;	/* modified when link is up. */
962  	writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
963  	udelay(1000);
964  	writel(np->tx_mode, ioaddr + TxMode);
965  	np->tx_threshold = 4;
966  	writel(np->tx_threshold, ioaddr + TxThreshold);
967  
968  	writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
969  
970  	napi_enable(&np->napi);
971  
972  	netif_start_queue(dev);
973  
974  	if (debug > 1)
975  		printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
976  	set_rx_mode(dev);
977  
978  	np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
979  	check_duplex(dev);
980  
981  	/* Enable GPIO interrupts on link change */
982  	writel(0x0f00ff00, ioaddr + GPIOCtrl);
983  
984  	/* Set the interrupt mask */
985  	writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
986  	       IntrTxDMADone | IntrStatsMax | IntrLinkChange |
987  	       IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
988  	       ioaddr + IntrEnable);
989  	/* Enable PCI interrupts. */
990  	writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
991  	       ioaddr + PCIDeviceConfig);
992  
993  #ifdef VLAN_SUPPORT
994  	/* Set VLAN type to 802.1q */
995  	writel(ETH_P_8021Q, ioaddr + VlanType);
996  #endif /* VLAN_SUPPORT */
997  
998  	retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
999  	if (retval) {
1000  		printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1001  		       FIRMWARE_RX);
1002  		goto out_init;
1003  	}
1004  	if (fw_rx->size % 4) {
1005  		printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1006  		       fw_rx->size, FIRMWARE_RX);
1007  		retval = -EINVAL;
1008  		goto out_rx;
1009  	}
1010  	retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1011  	if (retval) {
1012  		printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1013  		       FIRMWARE_TX);
1014  		goto out_rx;
1015  	}
1016  	if (fw_tx->size % 4) {
1017  		printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1018  		       fw_tx->size, FIRMWARE_TX);
1019  		retval = -EINVAL;
1020  		goto out_tx;
1021  	}
1022  	fw_rx_data = (const __be32 *)&fw_rx->data[0];
1023  	fw_tx_data = (const __be32 *)&fw_tx->data[0];
1024  	rx_size = fw_rx->size / 4;
1025  	tx_size = fw_tx->size / 4;
1026  
1027  	/* Load Rx/Tx firmware into the frame processors */
1028  	for (i = 0; i < rx_size; i++)
1029  		writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1030  	for (i = 0; i < tx_size; i++)
1031  		writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1032  	if (enable_hw_cksum)
1033  		/* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1034  		writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1035  	else
1036  		/* Enable the Rx and Tx units only. */
1037  		writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1038  
1039  	if (debug > 1)
1040  		printk(KERN_DEBUG "%s: Done netdev_open().\n",
1041  		       dev->name);
1042  
1043  out_tx:
1044  	release_firmware(fw_tx);
1045  out_rx:
1046  	release_firmware(fw_rx);
1047  out_init:
1048  	if (retval)
1049  		netdev_close(dev);
1050  	return retval;
1051  }
1052  
1053  
check_duplex(struct net_device * dev)1054  static void check_duplex(struct net_device *dev)
1055  {
1056  	struct netdev_private *np = netdev_priv(dev);
1057  	u16 reg0;
1058  	int silly_count = 1000;
1059  
1060  	mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1061  	mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1062  	udelay(500);
1063  	while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1064  		/* do nothing */;
1065  	if (!silly_count) {
1066  		printk("%s: MII reset failed!\n", dev->name);
1067  		return;
1068  	}
1069  
1070  	reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1071  
1072  	if (!np->mii_if.force_media) {
1073  		reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1074  	} else {
1075  		reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1076  		if (np->speed100)
1077  			reg0 |= BMCR_SPEED100;
1078  		if (np->mii_if.full_duplex)
1079  			reg0 |= BMCR_FULLDPLX;
1080  		printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1081  		       dev->name,
1082  		       np->speed100 ? "100" : "10",
1083  		       np->mii_if.full_duplex ? "full" : "half");
1084  	}
1085  	mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1086  }
1087  
1088  
tx_timeout(struct net_device * dev,unsigned int txqueue)1089  static void tx_timeout(struct net_device *dev, unsigned int txqueue)
1090  {
1091  	struct netdev_private *np = netdev_priv(dev);
1092  	void __iomem *ioaddr = np->base;
1093  	int old_debug;
1094  
1095  	printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1096  	       "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1097  
1098  	/* Perhaps we should reinitialize the hardware here. */
1099  
1100  	/*
1101  	 * Stop and restart the interface.
1102  	 * Cheat and increase the debug level temporarily.
1103  	 */
1104  	old_debug = debug;
1105  	debug = 2;
1106  	netdev_close(dev);
1107  	netdev_open(dev);
1108  	debug = old_debug;
1109  
1110  	/* Trigger an immediate transmit demand. */
1111  
1112  	netif_trans_update(dev); /* prevent tx timeout */
1113  	dev->stats.tx_errors++;
1114  	netif_wake_queue(dev);
1115  }
1116  
1117  
1118  /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
init_ring(struct net_device * dev)1119  static void init_ring(struct net_device *dev)
1120  {
1121  	struct netdev_private *np = netdev_priv(dev);
1122  	int i;
1123  
1124  	np->cur_rx = np->cur_tx = np->reap_tx = 0;
1125  	np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1126  
1127  	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1128  
1129  	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1130  	for (i = 0; i < RX_RING_SIZE; i++) {
1131  		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1132  		np->rx_info[i].skb = skb;
1133  		if (skb == NULL)
1134  			break;
1135  		np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
1136  							skb->data,
1137  							np->rx_buf_sz,
1138  							DMA_FROM_DEVICE);
1139  		if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
1140  			dev_kfree_skb(skb);
1141  			np->rx_info[i].skb = NULL;
1142  			break;
1143  		}
1144  		/* Grrr, we cannot offset to correctly align the IP header. */
1145  		np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1146  	}
1147  	writew(i - 1, np->base + RxDescQIdx);
1148  	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1149  
1150  	/* Clear the remainder of the Rx buffer ring. */
1151  	for (  ; i < RX_RING_SIZE; i++) {
1152  		np->rx_ring[i].rxaddr = 0;
1153  		np->rx_info[i].skb = NULL;
1154  		np->rx_info[i].mapping = 0;
1155  	}
1156  	/* Mark the last entry as wrapping the ring. */
1157  	np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1158  
1159  	/* Clear the completion rings. */
1160  	for (i = 0; i < DONE_Q_SIZE; i++) {
1161  		np->rx_done_q[i].status = 0;
1162  		np->tx_done_q[i].status = 0;
1163  	}
1164  
1165  	for (i = 0; i < TX_RING_SIZE; i++)
1166  		memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1167  }
1168  
1169  
start_tx(struct sk_buff * skb,struct net_device * dev)1170  static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1171  {
1172  	struct netdev_private *np = netdev_priv(dev);
1173  	unsigned int entry;
1174  	unsigned int prev_tx;
1175  	u32 status;
1176  	int i, j;
1177  
1178  	/*
1179  	 * be cautious here, wrapping the queue has weird semantics
1180  	 * and we may not have enough slots even when it seems we do.
1181  	 */
1182  	if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1183  		netif_stop_queue(dev);
1184  		return NETDEV_TX_BUSY;
1185  	}
1186  
1187  #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1188  	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1189  		if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1190  			return NETDEV_TX_OK;
1191  	}
1192  #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1193  
1194  	prev_tx = np->cur_tx;
1195  	entry = np->cur_tx % TX_RING_SIZE;
1196  	for (i = 0; i < skb_num_frags(skb); i++) {
1197  		int wrap_ring = 0;
1198  		status = TxDescID;
1199  
1200  		if (i == 0) {
1201  			np->tx_info[entry].skb = skb;
1202  			status |= TxCRCEn;
1203  			if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1204  				status |= TxRingWrap;
1205  				wrap_ring = 1;
1206  			}
1207  			if (np->reap_tx) {
1208  				status |= TxDescIntr;
1209  				np->reap_tx = 0;
1210  			}
1211  			if (skb->ip_summed == CHECKSUM_PARTIAL) {
1212  				status |= TxCalTCP;
1213  				dev->stats.tx_compressed++;
1214  			}
1215  			status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1216  
1217  			np->tx_info[entry].mapping =
1218  				dma_map_single(&np->pci_dev->dev, skb->data,
1219  					       skb_first_frag_len(skb),
1220  					       DMA_TO_DEVICE);
1221  		} else {
1222  			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1223  			status |= skb_frag_size(this_frag);
1224  			np->tx_info[entry].mapping =
1225  				dma_map_single(&np->pci_dev->dev,
1226  					       skb_frag_address(this_frag),
1227  					       skb_frag_size(this_frag),
1228  					       DMA_TO_DEVICE);
1229  		}
1230  		if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
1231  			dev->stats.tx_dropped++;
1232  			goto err_out;
1233  		}
1234  
1235  		np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1236  		np->tx_ring[entry].status = cpu_to_le32(status);
1237  		if (debug > 3)
1238  			printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1239  			       dev->name, np->cur_tx, np->dirty_tx,
1240  			       entry, status);
1241  		if (wrap_ring) {
1242  			np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1243  			np->cur_tx += np->tx_info[entry].used_slots;
1244  			entry = 0;
1245  		} else {
1246  			np->tx_info[entry].used_slots = 1;
1247  			np->cur_tx += np->tx_info[entry].used_slots;
1248  			entry++;
1249  		}
1250  		/* scavenge the tx descriptors twice per TX_RING_SIZE */
1251  		if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1252  			np->reap_tx = 1;
1253  	}
1254  
1255  	/* Non-x86: explicitly flush descriptor cache lines here. */
1256  	/* Ensure all descriptors are written back before the transmit is
1257  	   initiated. - Jes */
1258  	wmb();
1259  
1260  	/* Update the producer index. */
1261  	writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1262  
1263  	/* 4 is arbitrary, but should be ok */
1264  	if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1265  		netif_stop_queue(dev);
1266  
1267  	return NETDEV_TX_OK;
1268  
1269  err_out:
1270  	entry = prev_tx % TX_RING_SIZE;
1271  	np->tx_info[entry].skb = NULL;
1272  	if (i > 0) {
1273  		dma_unmap_single(&np->pci_dev->dev,
1274  				 np->tx_info[entry].mapping,
1275  				 skb_first_frag_len(skb), DMA_TO_DEVICE);
1276  		np->tx_info[entry].mapping = 0;
1277  		entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1278  		for (j = 1; j < i; j++) {
1279  			dma_unmap_single(&np->pci_dev->dev,
1280  					 np->tx_info[entry].mapping,
1281  					 skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
1282  					 DMA_TO_DEVICE);
1283  			entry++;
1284  		}
1285  	}
1286  	dev_kfree_skb_any(skb);
1287  	np->cur_tx = prev_tx;
1288  	return NETDEV_TX_OK;
1289  }
1290  
1291  /* The interrupt handler does all of the Rx thread work and cleans up
1292     after the Tx thread. */
intr_handler(int irq,void * dev_instance)1293  static irqreturn_t intr_handler(int irq, void *dev_instance)
1294  {
1295  	struct net_device *dev = dev_instance;
1296  	struct netdev_private *np = netdev_priv(dev);
1297  	void __iomem *ioaddr = np->base;
1298  	int boguscnt = max_interrupt_work;
1299  	int consumer;
1300  	int tx_status;
1301  	int handled = 0;
1302  
1303  	do {
1304  		u32 intr_status = readl(ioaddr + IntrClear);
1305  
1306  		if (debug > 4)
1307  			printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1308  			       dev->name, intr_status);
1309  
1310  		if (intr_status == 0 || intr_status == (u32) -1)
1311  			break;
1312  
1313  		handled = 1;
1314  
1315  		if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1316  			u32 enable;
1317  
1318  			if (likely(napi_schedule_prep(&np->napi))) {
1319  				__napi_schedule(&np->napi);
1320  				enable = readl(ioaddr + IntrEnable);
1321  				enable &= ~(IntrRxDone | IntrRxEmpty);
1322  				writel(enable, ioaddr + IntrEnable);
1323  				/* flush PCI posting buffers */
1324  				readl(ioaddr + IntrEnable);
1325  			} else {
1326  				/* Paranoia check */
1327  				enable = readl(ioaddr + IntrEnable);
1328  				if (enable & (IntrRxDone | IntrRxEmpty)) {
1329  					printk(KERN_INFO
1330  					       "%s: interrupt while in poll!\n",
1331  					       dev->name);
1332  					enable &= ~(IntrRxDone | IntrRxEmpty);
1333  					writel(enable, ioaddr + IntrEnable);
1334  				}
1335  			}
1336  		}
1337  
1338  		/* Scavenge the skbuff list based on the Tx-done queue.
1339  		   There are redundant checks here that may be cleaned up
1340  		   after the driver has proven to be reliable. */
1341  		consumer = readl(ioaddr + TxConsumerIdx);
1342  		if (debug > 3)
1343  			printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1344  			       dev->name, consumer);
1345  
1346  		while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1347  			if (debug > 3)
1348  				printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1349  				       dev->name, np->dirty_tx, np->tx_done, tx_status);
1350  			if ((tx_status & 0xe0000000) == 0xa0000000) {
1351  				dev->stats.tx_packets++;
1352  			} else if ((tx_status & 0xe0000000) == 0x80000000) {
1353  				u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1354  				struct sk_buff *skb = np->tx_info[entry].skb;
1355  				np->tx_info[entry].skb = NULL;
1356  				dma_unmap_single(&np->pci_dev->dev,
1357  						 np->tx_info[entry].mapping,
1358  						 skb_first_frag_len(skb),
1359  						 DMA_TO_DEVICE);
1360  				np->tx_info[entry].mapping = 0;
1361  				np->dirty_tx += np->tx_info[entry].used_slots;
1362  				entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1363  				{
1364  					int i;
1365  					for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1366  						dma_unmap_single(&np->pci_dev->dev,
1367  								 np->tx_info[entry].mapping,
1368  								 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1369  								 DMA_TO_DEVICE);
1370  						np->dirty_tx++;
1371  						entry++;
1372  					}
1373  				}
1374  
1375  				dev_consume_skb_irq(skb);
1376  			}
1377  			np->tx_done_q[np->tx_done].status = 0;
1378  			np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1379  		}
1380  		writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1381  
1382  		if (netif_queue_stopped(dev) &&
1383  		    (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1384  			/* The ring is no longer full, wake the queue. */
1385  			netif_wake_queue(dev);
1386  		}
1387  
1388  		/* Stats overflow */
1389  		if (intr_status & IntrStatsMax)
1390  			get_stats(dev);
1391  
1392  		/* Media change interrupt. */
1393  		if (intr_status & IntrLinkChange)
1394  			netdev_media_change(dev);
1395  
1396  		/* Abnormal error summary/uncommon events handlers. */
1397  		if (intr_status & IntrAbnormalSummary)
1398  			netdev_error(dev, intr_status);
1399  
1400  		if (--boguscnt < 0) {
1401  			if (debug > 1)
1402  				printk(KERN_WARNING "%s: Too much work at interrupt, "
1403  				       "status=%#8.8x.\n",
1404  				       dev->name, intr_status);
1405  			break;
1406  		}
1407  	} while (1);
1408  
1409  	if (debug > 4)
1410  		printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1411  		       dev->name, (int) readl(ioaddr + IntrStatus));
1412  	return IRQ_RETVAL(handled);
1413  }
1414  
1415  
1416  /*
1417   * This routine is logically part of the interrupt/poll handler, but separated
1418   * for clarity and better register allocation.
1419   */
__netdev_rx(struct net_device * dev,int * quota)1420  static int __netdev_rx(struct net_device *dev, int *quota)
1421  {
1422  	struct netdev_private *np = netdev_priv(dev);
1423  	u32 desc_status;
1424  	int retcode = 0;
1425  
1426  	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1427  	while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1428  		struct sk_buff *skb;
1429  		u16 pkt_len;
1430  		int entry;
1431  		rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1432  
1433  		if (debug > 4)
1434  			printk(KERN_DEBUG "  netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1435  		if (!(desc_status & RxOK)) {
1436  			/* There was an error. */
1437  			if (debug > 2)
1438  				printk(KERN_DEBUG "  netdev_rx() Rx error was %#8.8x.\n", desc_status);
1439  			dev->stats.rx_errors++;
1440  			if (desc_status & RxFIFOErr)
1441  				dev->stats.rx_fifo_errors++;
1442  			goto next_rx;
1443  		}
1444  
1445  		if (*quota <= 0) {	/* out of rx quota */
1446  			retcode = 1;
1447  			goto out;
1448  		}
1449  		(*quota)--;
1450  
1451  		pkt_len = desc_status;	/* Implicitly Truncate */
1452  		entry = (desc_status >> 16) & 0x7ff;
1453  
1454  		if (debug > 4)
1455  			printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1456  		/* Check if the packet is long enough to accept without copying
1457  		   to a minimally-sized skbuff. */
1458  		if (pkt_len < rx_copybreak &&
1459  		    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1460  			skb_reserve(skb, 2);	/* 16 byte align the IP header */
1461  			dma_sync_single_for_cpu(&np->pci_dev->dev,
1462  						np->rx_info[entry].mapping,
1463  						pkt_len, DMA_FROM_DEVICE);
1464  			skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1465  			dma_sync_single_for_device(&np->pci_dev->dev,
1466  						   np->rx_info[entry].mapping,
1467  						   pkt_len, DMA_FROM_DEVICE);
1468  			skb_put(skb, pkt_len);
1469  		} else {
1470  			dma_unmap_single(&np->pci_dev->dev,
1471  					 np->rx_info[entry].mapping,
1472  					 np->rx_buf_sz, DMA_FROM_DEVICE);
1473  			skb = np->rx_info[entry].skb;
1474  			skb_put(skb, pkt_len);
1475  			np->rx_info[entry].skb = NULL;
1476  			np->rx_info[entry].mapping = 0;
1477  		}
1478  #ifndef final_version			/* Remove after testing. */
1479  		/* You will want this info for the initial debug. */
1480  		if (debug > 5) {
1481  			printk(KERN_DEBUG "  Rx data %pM %pM %2.2x%2.2x.\n",
1482  			       skb->data, skb->data + 6,
1483  			       skb->data[12], skb->data[13]);
1484  		}
1485  #endif
1486  
1487  		skb->protocol = eth_type_trans(skb, dev);
1488  #ifdef VLAN_SUPPORT
1489  		if (debug > 4)
1490  			printk(KERN_DEBUG "  netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1491  #endif
1492  		if (le16_to_cpu(desc->status2) & 0x0100) {
1493  			skb->ip_summed = CHECKSUM_UNNECESSARY;
1494  			dev->stats.rx_compressed++;
1495  		}
1496  		/*
1497  		 * This feature doesn't seem to be working, at least
1498  		 * with the two firmware versions I have. If the GFP sees
1499  		 * an IP fragment, it either ignores it completely, or reports
1500  		 * "bad checksum" on it.
1501  		 *
1502  		 * Maybe I missed something -- corrections are welcome.
1503  		 * Until then, the printk stays. :-) -Ion
1504  		 */
1505  		else if (le16_to_cpu(desc->status2) & 0x0040) {
1506  			skb->ip_summed = CHECKSUM_COMPLETE;
1507  			skb->csum = le16_to_cpu(desc->csum);
1508  			printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1509  		}
1510  #ifdef VLAN_SUPPORT
1511  		if (le16_to_cpu(desc->status2) & 0x0200) {
1512  			u16 vlid = le16_to_cpu(desc->vlanid);
1513  
1514  			if (debug > 4) {
1515  				printk(KERN_DEBUG "  netdev_rx() vlanid = %d\n",
1516  				       vlid);
1517  			}
1518  			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1519  		}
1520  #endif /* VLAN_SUPPORT */
1521  		netif_receive_skb(skb);
1522  		dev->stats.rx_packets++;
1523  
1524  	next_rx:
1525  		np->cur_rx++;
1526  		desc->status = 0;
1527  		np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1528  	}
1529  
1530  	if (*quota == 0) {	/* out of rx quota */
1531  		retcode = 1;
1532  		goto out;
1533  	}
1534  	writew(np->rx_done, np->base + CompletionQConsumerIdx);
1535  
1536   out:
1537  	refill_rx_ring(dev);
1538  	if (debug > 5)
1539  		printk(KERN_DEBUG "  exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1540  		       retcode, np->rx_done, desc_status);
1541  	return retcode;
1542  }
1543  
netdev_poll(struct napi_struct * napi,int budget)1544  static int netdev_poll(struct napi_struct *napi, int budget)
1545  {
1546  	struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1547  	struct net_device *dev = np->dev;
1548  	u32 intr_status;
1549  	void __iomem *ioaddr = np->base;
1550  	int quota = budget;
1551  
1552  	do {
1553  		writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1554  
1555  		if (__netdev_rx(dev, &quota))
1556  			goto out;
1557  
1558  		intr_status = readl(ioaddr + IntrStatus);
1559  	} while (intr_status & (IntrRxDone | IntrRxEmpty));
1560  
1561  	napi_complete(napi);
1562  	intr_status = readl(ioaddr + IntrEnable);
1563  	intr_status |= IntrRxDone | IntrRxEmpty;
1564  	writel(intr_status, ioaddr + IntrEnable);
1565  
1566   out:
1567  	if (debug > 5)
1568  		printk(KERN_DEBUG "  exiting netdev_poll(): %d.\n",
1569  		       budget - quota);
1570  
1571  	/* Restart Rx engine if stopped. */
1572  	return budget - quota;
1573  }
1574  
refill_rx_ring(struct net_device * dev)1575  static void refill_rx_ring(struct net_device *dev)
1576  {
1577  	struct netdev_private *np = netdev_priv(dev);
1578  	struct sk_buff *skb;
1579  	int entry = -1;
1580  
1581  	/* Refill the Rx ring buffers. */
1582  	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1583  		entry = np->dirty_rx % RX_RING_SIZE;
1584  		if (np->rx_info[entry].skb == NULL) {
1585  			skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1586  			np->rx_info[entry].skb = skb;
1587  			if (skb == NULL)
1588  				break;	/* Better luck next round. */
1589  			np->rx_info[entry].mapping =
1590  				dma_map_single(&np->pci_dev->dev, skb->data,
1591  					       np->rx_buf_sz, DMA_FROM_DEVICE);
1592  			if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
1593  				dev_kfree_skb(skb);
1594  				np->rx_info[entry].skb = NULL;
1595  				break;
1596  			}
1597  			np->rx_ring[entry].rxaddr =
1598  				cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1599  		}
1600  		if (entry == RX_RING_SIZE - 1)
1601  			np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1602  	}
1603  	if (entry >= 0)
1604  		writew(entry, np->base + RxDescQIdx);
1605  }
1606  
1607  
netdev_media_change(struct net_device * dev)1608  static void netdev_media_change(struct net_device *dev)
1609  {
1610  	struct netdev_private *np = netdev_priv(dev);
1611  	void __iomem *ioaddr = np->base;
1612  	u16 reg0, reg1, reg4, reg5;
1613  	u32 new_tx_mode;
1614  	u32 new_intr_timer_ctrl;
1615  
1616  	/* reset status first */
1617  	mdio_read(dev, np->phys[0], MII_BMCR);
1618  	mdio_read(dev, np->phys[0], MII_BMSR);
1619  
1620  	reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1621  	reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1622  
1623  	if (reg1 & BMSR_LSTATUS) {
1624  		/* link is up */
1625  		if (reg0 & BMCR_ANENABLE) {
1626  			/* autonegotiation is enabled */
1627  			reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1628  			reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1629  			if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1630  				np->speed100 = 1;
1631  				np->mii_if.full_duplex = 1;
1632  			} else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1633  				np->speed100 = 1;
1634  				np->mii_if.full_duplex = 0;
1635  			} else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1636  				np->speed100 = 0;
1637  				np->mii_if.full_duplex = 1;
1638  			} else {
1639  				np->speed100 = 0;
1640  				np->mii_if.full_duplex = 0;
1641  			}
1642  		} else {
1643  			/* autonegotiation is disabled */
1644  			if (reg0 & BMCR_SPEED100)
1645  				np->speed100 = 1;
1646  			else
1647  				np->speed100 = 0;
1648  			if (reg0 & BMCR_FULLDPLX)
1649  				np->mii_if.full_duplex = 1;
1650  			else
1651  				np->mii_if.full_duplex = 0;
1652  		}
1653  		netif_carrier_on(dev);
1654  		printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1655  		       dev->name,
1656  		       np->speed100 ? "100" : "10",
1657  		       np->mii_if.full_duplex ? "full" : "half");
1658  
1659  		new_tx_mode = np->tx_mode & ~FullDuplex;	/* duplex setting */
1660  		if (np->mii_if.full_duplex)
1661  			new_tx_mode |= FullDuplex;
1662  		if (np->tx_mode != new_tx_mode) {
1663  			np->tx_mode = new_tx_mode;
1664  			writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1665  			udelay(1000);
1666  			writel(np->tx_mode, ioaddr + TxMode);
1667  		}
1668  
1669  		new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1670  		if (np->speed100)
1671  			new_intr_timer_ctrl |= Timer10X;
1672  		if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1673  			np->intr_timer_ctrl = new_intr_timer_ctrl;
1674  			writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1675  		}
1676  	} else {
1677  		netif_carrier_off(dev);
1678  		printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1679  	}
1680  }
1681  
1682  
netdev_error(struct net_device * dev,int intr_status)1683  static void netdev_error(struct net_device *dev, int intr_status)
1684  {
1685  	struct netdev_private *np = netdev_priv(dev);
1686  
1687  	/* Came close to underrunning the Tx FIFO, increase threshold. */
1688  	if (intr_status & IntrTxDataLow) {
1689  		if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1690  			writel(++np->tx_threshold, np->base + TxThreshold);
1691  			printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1692  			       dev->name, np->tx_threshold * 16);
1693  		} else
1694  			printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1695  	}
1696  	if (intr_status & IntrRxGFPDead) {
1697  		dev->stats.rx_fifo_errors++;
1698  		dev->stats.rx_errors++;
1699  	}
1700  	if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1701  		dev->stats.tx_fifo_errors++;
1702  		dev->stats.tx_errors++;
1703  	}
1704  	if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1705  		printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1706  		       dev->name, intr_status);
1707  }
1708  
1709  
get_stats(struct net_device * dev)1710  static struct net_device_stats *get_stats(struct net_device *dev)
1711  {
1712  	struct netdev_private *np = netdev_priv(dev);
1713  	void __iomem *ioaddr = np->base;
1714  
1715  	/* This adapter architecture needs no SMP locks. */
1716  	dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1717  	dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1718  	dev->stats.tx_packets = readl(ioaddr + 0x57000);
1719  	dev->stats.tx_aborted_errors =
1720  		readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1721  	dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1722  	dev->stats.collisions =
1723  		readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1724  
1725  	/* The chip only need report frame silently dropped. */
1726  	dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1727  	writew(0, ioaddr + RxDMAStatus);
1728  	dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1729  	dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1730  	dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1731  	dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1732  
1733  	return &dev->stats;
1734  }
1735  
1736  #ifdef VLAN_SUPPORT
set_vlan_mode(struct netdev_private * np)1737  static u32 set_vlan_mode(struct netdev_private *np)
1738  {
1739  	u32 ret = VlanMode;
1740  	u16 vid;
1741  	void __iomem *filter_addr = np->base + HashTable + 8;
1742  	int vlan_count = 0;
1743  
1744  	for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1745  		if (vlan_count == 32)
1746  			break;
1747  		writew(vid, filter_addr);
1748  		filter_addr += 16;
1749  		vlan_count++;
1750  	}
1751  	if (vlan_count == 32) {
1752  		ret |= PerfectFilterVlan;
1753  		while (vlan_count < 32) {
1754  			writew(0, filter_addr);
1755  			filter_addr += 16;
1756  			vlan_count++;
1757  		}
1758  	}
1759  	return ret;
1760  }
1761  #endif /* VLAN_SUPPORT */
1762  
set_rx_mode(struct net_device * dev)1763  static void set_rx_mode(struct net_device *dev)
1764  {
1765  	struct netdev_private *np = netdev_priv(dev);
1766  	void __iomem *ioaddr = np->base;
1767  	u32 rx_mode = MinVLANPrio;
1768  	struct netdev_hw_addr *ha;
1769  	int i;
1770  
1771  #ifdef VLAN_SUPPORT
1772  	rx_mode |= set_vlan_mode(np);
1773  #endif /* VLAN_SUPPORT */
1774  
1775  	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
1776  		rx_mode |= AcceptAll;
1777  	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1778  		   (dev->flags & IFF_ALLMULTI)) {
1779  		/* Too many to match, or accept all multicasts. */
1780  		rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1781  	} else if (netdev_mc_count(dev) <= 14) {
1782  		/* Use the 16 element perfect filter, skip first two entries. */
1783  		void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1784  		const __be16 *eaddrs;
1785  		netdev_for_each_mc_addr(ha, dev) {
1786  			eaddrs = (__be16 *) ha->addr;
1787  			writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1788  			writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1789  			writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1790  		}
1791  		eaddrs = (const __be16 *)dev->dev_addr;
1792  		i = netdev_mc_count(dev) + 2;
1793  		while (i++ < 16) {
1794  			writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1795  			writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1796  			writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1797  		}
1798  		rx_mode |= AcceptBroadcast|PerfectFilter;
1799  	} else {
1800  		/* Must use a multicast hash table. */
1801  		void __iomem *filter_addr;
1802  		const __be16 *eaddrs;
1803  		__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long))));	/* Multicast hash filter */
1804  
1805  		memset(mc_filter, 0, sizeof(mc_filter));
1806  		netdev_for_each_mc_addr(ha, dev) {
1807  			/* The chip uses the upper 9 CRC bits
1808  			   as index into the hash table */
1809  			int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1810  			__le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1811  
1812  			*fptr |= cpu_to_le32(1 << (bit_nr & 31));
1813  		}
1814  		/* Clear the perfect filter list, skip first two entries. */
1815  		filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1816  		eaddrs = (const __be16 *)dev->dev_addr;
1817  		for (i = 2; i < 16; i++) {
1818  			writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1819  			writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1820  			writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1821  		}
1822  		for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1823  			writew(mc_filter[i], filter_addr);
1824  		rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1825  	}
1826  	writel(rx_mode, ioaddr + RxFilterMode);
1827  }
1828  
check_if_running(struct net_device * dev)1829  static int check_if_running(struct net_device *dev)
1830  {
1831  	if (!netif_running(dev))
1832  		return -EINVAL;
1833  	return 0;
1834  }
1835  
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1836  static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1837  {
1838  	struct netdev_private *np = netdev_priv(dev);
1839  	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1840  	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1841  }
1842  
get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1843  static int get_link_ksettings(struct net_device *dev,
1844  			      struct ethtool_link_ksettings *cmd)
1845  {
1846  	struct netdev_private *np = netdev_priv(dev);
1847  	spin_lock_irq(&np->lock);
1848  	mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1849  	spin_unlock_irq(&np->lock);
1850  	return 0;
1851  }
1852  
set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1853  static int set_link_ksettings(struct net_device *dev,
1854  			      const struct ethtool_link_ksettings *cmd)
1855  {
1856  	struct netdev_private *np = netdev_priv(dev);
1857  	int res;
1858  	spin_lock_irq(&np->lock);
1859  	res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1860  	spin_unlock_irq(&np->lock);
1861  	check_duplex(dev);
1862  	return res;
1863  }
1864  
nway_reset(struct net_device * dev)1865  static int nway_reset(struct net_device *dev)
1866  {
1867  	struct netdev_private *np = netdev_priv(dev);
1868  	return mii_nway_restart(&np->mii_if);
1869  }
1870  
get_link(struct net_device * dev)1871  static u32 get_link(struct net_device *dev)
1872  {
1873  	struct netdev_private *np = netdev_priv(dev);
1874  	return mii_link_ok(&np->mii_if);
1875  }
1876  
get_msglevel(struct net_device * dev)1877  static u32 get_msglevel(struct net_device *dev)
1878  {
1879  	return debug;
1880  }
1881  
set_msglevel(struct net_device * dev,u32 val)1882  static void set_msglevel(struct net_device *dev, u32 val)
1883  {
1884  	debug = val;
1885  }
1886  
1887  static const struct ethtool_ops ethtool_ops = {
1888  	.begin = check_if_running,
1889  	.get_drvinfo = get_drvinfo,
1890  	.nway_reset = nway_reset,
1891  	.get_link = get_link,
1892  	.get_msglevel = get_msglevel,
1893  	.set_msglevel = set_msglevel,
1894  	.get_link_ksettings = get_link_ksettings,
1895  	.set_link_ksettings = set_link_ksettings,
1896  };
1897  
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1898  static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1899  {
1900  	struct netdev_private *np = netdev_priv(dev);
1901  	struct mii_ioctl_data *data = if_mii(rq);
1902  	int rc;
1903  
1904  	if (!netif_running(dev))
1905  		return -EINVAL;
1906  
1907  	spin_lock_irq(&np->lock);
1908  	rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1909  	spin_unlock_irq(&np->lock);
1910  
1911  	if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1912  		check_duplex(dev);
1913  
1914  	return rc;
1915  }
1916  
netdev_close(struct net_device * dev)1917  static int netdev_close(struct net_device *dev)
1918  {
1919  	struct netdev_private *np = netdev_priv(dev);
1920  	void __iomem *ioaddr = np->base;
1921  	int i;
1922  
1923  	netif_stop_queue(dev);
1924  
1925  	napi_disable(&np->napi);
1926  
1927  	if (debug > 1) {
1928  		printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1929  			   dev->name, (int) readl(ioaddr + IntrStatus));
1930  		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1931  		       dev->name, np->cur_tx, np->dirty_tx,
1932  		       np->cur_rx, np->dirty_rx);
1933  	}
1934  
1935  	/* Disable interrupts by clearing the interrupt mask. */
1936  	writel(0, ioaddr + IntrEnable);
1937  
1938  	/* Stop the chip's Tx and Rx processes. */
1939  	writel(0, ioaddr + GenCtrl);
1940  	readl(ioaddr + GenCtrl);
1941  
1942  	if (debug > 5) {
1943  		printk(KERN_DEBUG"  Tx ring at %#llx:\n",
1944  		       (long long) np->tx_ring_dma);
1945  		for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1946  			printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1947  			       i, le32_to_cpu(np->tx_ring[i].status),
1948  			       (long long) dma_to_cpu(np->tx_ring[i].addr),
1949  			       le32_to_cpu(np->tx_done_q[i].status));
1950  		printk(KERN_DEBUG "  Rx ring at %#llx -> %p:\n",
1951  		       (long long) np->rx_ring_dma, np->rx_done_q);
1952  		if (np->rx_done_q)
1953  			for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1954  				printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1955  				       i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1956  		}
1957  	}
1958  
1959  	free_irq(np->pci_dev->irq, dev);
1960  
1961  	/* Free all the skbuffs in the Rx queue. */
1962  	for (i = 0; i < RX_RING_SIZE; i++) {
1963  		np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1964  		if (np->rx_info[i].skb != NULL) {
1965  			dma_unmap_single(&np->pci_dev->dev,
1966  					 np->rx_info[i].mapping,
1967  					 np->rx_buf_sz, DMA_FROM_DEVICE);
1968  			dev_kfree_skb(np->rx_info[i].skb);
1969  		}
1970  		np->rx_info[i].skb = NULL;
1971  		np->rx_info[i].mapping = 0;
1972  	}
1973  	for (i = 0; i < TX_RING_SIZE; i++) {
1974  		struct sk_buff *skb = np->tx_info[i].skb;
1975  		if (skb == NULL)
1976  			continue;
1977  		dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
1978  				 skb_first_frag_len(skb), DMA_TO_DEVICE);
1979  		np->tx_info[i].mapping = 0;
1980  		dev_kfree_skb(skb);
1981  		np->tx_info[i].skb = NULL;
1982  	}
1983  
1984  	return 0;
1985  }
1986  
starfire_suspend(struct device * dev_d)1987  static int __maybe_unused starfire_suspend(struct device *dev_d)
1988  {
1989  	struct net_device *dev = dev_get_drvdata(dev_d);
1990  
1991  	if (netif_running(dev)) {
1992  		netif_device_detach(dev);
1993  		netdev_close(dev);
1994  	}
1995  
1996  	return 0;
1997  }
1998  
starfire_resume(struct device * dev_d)1999  static int __maybe_unused starfire_resume(struct device *dev_d)
2000  {
2001  	struct net_device *dev = dev_get_drvdata(dev_d);
2002  
2003  	if (netif_running(dev)) {
2004  		netdev_open(dev);
2005  		netif_device_attach(dev);
2006  	}
2007  
2008  	return 0;
2009  }
2010  
starfire_remove_one(struct pci_dev * pdev)2011  static void starfire_remove_one(struct pci_dev *pdev)
2012  {
2013  	struct net_device *dev = pci_get_drvdata(pdev);
2014  	struct netdev_private *np = netdev_priv(dev);
2015  
2016  	BUG_ON(!dev);
2017  
2018  	unregister_netdev(dev);
2019  
2020  	if (np->queue_mem)
2021  		dma_free_coherent(&pdev->dev, np->queue_mem_size,
2022  				  np->queue_mem, np->queue_mem_dma);
2023  
2024  
2025  	/* XXX: add wakeup code -- requires firmware for MagicPacket */
2026  	pci_set_power_state(pdev, PCI_D3hot);	/* go to sleep in D3 mode */
2027  	pci_disable_device(pdev);
2028  
2029  	iounmap(np->base);
2030  	pci_release_regions(pdev);
2031  
2032  	free_netdev(dev);			/* Will also free np!! */
2033  }
2034  
2035  static SIMPLE_DEV_PM_OPS(starfire_pm_ops, starfire_suspend, starfire_resume);
2036  
2037  static struct pci_driver starfire_driver = {
2038  	.name		= DRV_NAME,
2039  	.probe		= starfire_init_one,
2040  	.remove		= starfire_remove_one,
2041  	.driver.pm	= &starfire_pm_ops,
2042  	.id_table	= starfire_pci_tbl,
2043  };
2044  
2045  
starfire_init(void)2046  static int __init starfire_init (void)
2047  {
2048  /* when a module, this is printed whether or not devices are found in probe */
2049  #ifdef MODULE
2050  	printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2051  #endif
2052  
2053  	BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2054  
2055  	return pci_register_driver(&starfire_driver);
2056  }
2057  
2058  
starfire_cleanup(void)2059  static void __exit starfire_cleanup (void)
2060  {
2061  	pci_unregister_driver (&starfire_driver);
2062  }
2063  
2064  
2065  module_init(starfire_init);
2066  module_exit(starfire_cleanup);
2067