1 /*	tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2 
3 	Copyright 2000,2001  The Linux Kernel Team
4 	Written/copyright 1994-2001 by Donald Becker.
5 
6 	This software may be used and distributed according to the terms
7 	of the GNU General Public License, incorporated herein by reference.
8 
9 	Please submit bugs to http://bugzilla.kernel.org/ .
10 */
11 
12 #define pr_fmt(fmt) "tulip: " fmt
13 
14 #define DRV_NAME	"tulip"
15 #ifdef CONFIG_TULIP_NAPI
16 #define DRV_VERSION    "1.1.15-NAPI" /* Keep at least for test */
17 #else
18 #define DRV_VERSION	"1.1.15"
19 #endif
20 #define DRV_RELDATE	"Feb 27, 2007"
21 
22 
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include "tulip.h"
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/etherdevice.h>
30 #include <linux/delay.h>
31 #include <linux/mii.h>
32 #include <linux/crc32.h>
33 #include <asm/unaligned.h>
34 #include <asm/uaccess.h>
35 
36 #ifdef CONFIG_SPARC
37 #include <asm/prom.h>
38 #endif
39 
40 static char version[] __devinitdata =
41 	"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42 
43 /* A few user-configurable values. */
44 
45 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
46 static unsigned int max_interrupt_work = 25;
47 
48 #define MAX_UNITS 8
49 /* Used to pass the full-duplex flag, etc. */
50 static int full_duplex[MAX_UNITS];
51 static int options[MAX_UNITS];
52 static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
53 
54 /*  The possible media types that can be set in options[] are: */
55 const char * const medianame[32] = {
56 	"10baseT", "10base2", "AUI", "100baseTx",
57 	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 	"","","","", "","","","",  "","","","Transceiver reset",
62 };
63 
64 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 	defined(CONFIG_SPARC) || defined(__ia64__) || \
67 	defined(__sh__) || defined(__mips__)
68 static int rx_copybreak = 1518;
69 #else
70 static int rx_copybreak = 100;
71 #endif
72 
73 /*
74   Set the bus performance register.
75 	Typical: Set 16 longword cache alignment, no burst limit.
76 	Cache alignment bits 15:14	     Burst length 13:8
77 		0000	No alignment  0x00000000 unlimited		0800 8 longwords
78 		4000	8  longwords		0100 1 longword		1000 16 longwords
79 		8000	16 longwords		0200 2 longwords	2000 32 longwords
80 		C000	32  longwords		0400 4 longwords
81 	Warning: many older 486 systems are broken and require setting 0x00A04800
82 	   8 longword cache alignment, 8 longword burst.
83 	ToDo: Non-Intel setting could be better.
84 */
85 
86 #if defined(__alpha__) || defined(__ia64__)
87 static int csr0 = 0x01A00000 | 0xE000;
88 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89 static int csr0 = 0x01A00000 | 0x8000;
90 #elif defined(CONFIG_SPARC) || defined(__hppa__)
91 /* The UltraSparc PCI controllers will disconnect at every 64-byte
92  * crossing anyways so it makes no sense to tell Tulip to burst
93  * any more than that.
94  */
95 static int csr0 = 0x01A00000 | 0x9000;
96 #elif defined(__arm__) || defined(__sh__)
97 static int csr0 = 0x01A00000 | 0x4800;
98 #elif defined(__mips__)
99 static int csr0 = 0x00200000 | 0x4000;
100 #else
101 #warning Processor architecture undefined!
102 static int csr0 = 0x00A00000 | 0x4800;
103 #endif
104 
105 /* Operational parameters that usually are not changed. */
106 /* Time in jiffies before concluding the transmitter is hung. */
107 #define TX_TIMEOUT  (4*HZ)
108 
109 
110 MODULE_AUTHOR("The Linux Kernel Team");
111 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(DRV_VERSION);
114 module_param(tulip_debug, int, 0);
115 module_param(max_interrupt_work, int, 0);
116 module_param(rx_copybreak, int, 0);
117 module_param(csr0, int, 0);
118 module_param_array(options, int, NULL, 0);
119 module_param_array(full_duplex, int, NULL, 0);
120 
121 #ifdef TULIP_DEBUG
122 int tulip_debug = TULIP_DEBUG;
123 #else
124 int tulip_debug = 1;
125 #endif
126 
tulip_timer(unsigned long data)127 static void tulip_timer(unsigned long data)
128 {
129 	struct net_device *dev = (struct net_device *)data;
130 	struct tulip_private *tp = netdev_priv(dev);
131 
132 	if (netif_running(dev))
133 		schedule_work(&tp->media_work);
134 }
135 
136 /*
137  * This table use during operation for capabilities and media timer.
138  *
139  * It is indexed via the values in 'enum chips'
140  */
141 
142 struct tulip_chip_table tulip_tbl[] = {
143   { }, /* placeholder for array, slot unused currently */
144   { }, /* placeholder for array, slot unused currently */
145 
146   /* DC21140 */
147   { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
149 	tulip_media_task },
150 
151   /* DC21142, DC21143 */
152   { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
153 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
154 	| HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
155 
156   /* LC82C168 */
157   { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
158 	HAS_MII | HAS_PNICNWAY, pnic_timer, },
159 
160   /* MX98713 */
161   { "Macronix 98713 PMAC", 128, 0x0001ebef,
162 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
163 
164   /* MX98715 */
165   { "Macronix 98715 PMAC", 256, 0x0001ebef,
166 	HAS_MEDIA_TABLE, mxic_timer, },
167 
168   /* MX98725 */
169   { "Macronix 98725 PMAC", 256, 0x0001ebef,
170 	HAS_MEDIA_TABLE, mxic_timer, },
171 
172   /* AX88140 */
173   { "ASIX AX88140", 128, 0x0001fbff,
174 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
175 	| IS_ASIX, tulip_timer, tulip_media_task },
176 
177   /* PNIC2 */
178   { "Lite-On PNIC-II", 256, 0x0801fbff,
179 	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
180 
181   /* COMET */
182   { "ADMtek Comet", 256, 0x0001abef,
183 	HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
184 
185   /* COMPEX9881 */
186   { "Compex 9881 PMAC", 128, 0x0001ebef,
187 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
188 
189   /* I21145 */
190   { "Intel DS21145 Tulip", 128, 0x0801fbff,
191 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
192 	| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
193 
194   /* DM910X */
195 #ifdef CONFIG_TULIP_DM910X
196   { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
197 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
198 	tulip_timer, tulip_media_task },
199 #else
200   { NULL },
201 #endif
202 
203   /* RS7112 */
204   { "Conexant LANfinity", 256, 0x0001ebef,
205 	HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
206 
207 };
208 
209 
210 static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
211 	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
212 	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
213 	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
214 	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
215 	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
216 /*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
217 	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
218 	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
219 	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 	{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
230 	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
231 #ifdef CONFIG_TULIP_DM910X
232 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
234 #endif
235 	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
237 	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 	{ 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 	{ 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 	{ 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 	{ 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
242 	{ 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
247 	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
248 	{ 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
249 	{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 	{ } /* terminate list */
251 };
252 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
253 
254 
255 /* A full-duplex map for media types. */
256 const char tulip_media_cap[32] =
257 {0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
258 
259 static void tulip_tx_timeout(struct net_device *dev);
260 static void tulip_init_ring(struct net_device *dev);
261 static void tulip_free_ring(struct net_device *dev);
262 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
263 					  struct net_device *dev);
264 static int tulip_open(struct net_device *dev);
265 static int tulip_close(struct net_device *dev);
266 static void tulip_up(struct net_device *dev);
267 static void tulip_down(struct net_device *dev);
268 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
269 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
270 static void set_rx_mode(struct net_device *dev);
271 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
272 #ifdef CONFIG_NET_POLL_CONTROLLER
273 static void poll_tulip(struct net_device *dev);
274 #endif
275 
tulip_set_power_state(struct tulip_private * tp,int sleep,int snooze)276 static void tulip_set_power_state (struct tulip_private *tp,
277 				   int sleep, int snooze)
278 {
279 	if (tp->flags & HAS_ACPI) {
280 		u32 tmp, newtmp;
281 		pci_read_config_dword (tp->pdev, CFDD, &tmp);
282 		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
283 		if (sleep)
284 			newtmp |= CFDD_Sleep;
285 		else if (snooze)
286 			newtmp |= CFDD_Snooze;
287 		if (tmp != newtmp)
288 			pci_write_config_dword (tp->pdev, CFDD, newtmp);
289 	}
290 
291 }
292 
293 
tulip_up(struct net_device * dev)294 static void tulip_up(struct net_device *dev)
295 {
296 	struct tulip_private *tp = netdev_priv(dev);
297 	void __iomem *ioaddr = tp->base_addr;
298 	int next_tick = 3*HZ;
299 	u32 reg;
300 	int i;
301 
302 #ifdef CONFIG_TULIP_NAPI
303 	napi_enable(&tp->napi);
304 #endif
305 
306 	/* Wake the chip from sleep/snooze mode. */
307 	tulip_set_power_state (tp, 0, 0);
308 
309 	/* Disable all WOL events */
310 	pci_enable_wake(tp->pdev, PCI_D3hot, 0);
311 	pci_enable_wake(tp->pdev, PCI_D3cold, 0);
312 	tulip_set_wolopts(tp->pdev, 0);
313 
314 	/* On some chip revs we must set the MII/SYM port before the reset!? */
315 	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
316 		iowrite32(0x00040000, ioaddr + CSR6);
317 
318 	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
319 	iowrite32(0x00000001, ioaddr + CSR0);
320 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
321 	udelay(100);
322 
323 	/* Deassert reset.
324 	   Wait the specified 50 PCI cycles after a reset by initializing
325 	   Tx and Rx queues and the address filter list. */
326 	iowrite32(tp->csr0, ioaddr + CSR0);
327 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
328 	udelay(100);
329 
330 	if (tulip_debug > 1)
331 		netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq);
332 
333 	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
335 	tp->cur_rx = tp->cur_tx = 0;
336 	tp->dirty_rx = tp->dirty_tx = 0;
337 
338 	if (tp->flags & MC_HASH_ONLY) {
339 		u32 addr_low = get_unaligned_le32(dev->dev_addr);
340 		u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
341 		if (tp->chip_id == AX88140) {
342 			iowrite32(0, ioaddr + CSR13);
343 			iowrite32(addr_low,  ioaddr + CSR14);
344 			iowrite32(1, ioaddr + CSR13);
345 			iowrite32(addr_high, ioaddr + CSR14);
346 		} else if (tp->flags & COMET_MAC_ADDR) {
347 			iowrite32(addr_low,  ioaddr + 0xA4);
348 			iowrite32(addr_high, ioaddr + 0xA8);
349 			iowrite32(0, ioaddr + CSR27);
350 			iowrite32(0, ioaddr + CSR28);
351 		}
352 	} else {
353 		/* This is set_rx_mode(), but without starting the transmitter. */
354 		u16 *eaddrs = (u16 *)dev->dev_addr;
355 		u16 *setup_frm = &tp->setup_frame[15*6];
356 		dma_addr_t mapping;
357 
358 		/* 21140 bug: you must add the broadcast address. */
359 		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
360 		/* Fill the final entry of the table with our physical address. */
361 		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
362 		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
363 		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
364 
365 		mapping = pci_map_single(tp->pdev, tp->setup_frame,
366 					 sizeof(tp->setup_frame),
367 					 PCI_DMA_TODEVICE);
368 		tp->tx_buffers[tp->cur_tx].skb = NULL;
369 		tp->tx_buffers[tp->cur_tx].mapping = mapping;
370 
371 		/* Put the setup frame on the Tx list. */
372 		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
373 		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
374 		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
375 
376 		tp->cur_tx++;
377 	}
378 
379 	tp->saved_if_port = dev->if_port;
380 	if (dev->if_port == 0)
381 		dev->if_port = tp->default_port;
382 
383 	/* Allow selecting a default media. */
384 	i = 0;
385 	if (tp->mtable == NULL)
386 		goto media_picked;
387 	if (dev->if_port) {
388 		int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
389 			(dev->if_port == 12 ? 0 : dev->if_port);
390 		for (i = 0; i < tp->mtable->leafcount; i++)
391 			if (tp->mtable->mleaf[i].media == looking_for) {
392 				dev_info(&dev->dev,
393 					 "Using user-specified media %s\n",
394 					 medianame[dev->if_port]);
395 				goto media_picked;
396 			}
397 	}
398 	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
399 		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
400 		for (i = 0; i < tp->mtable->leafcount; i++)
401 			if (tp->mtable->mleaf[i].media == looking_for) {
402 				dev_info(&dev->dev,
403 					 "Using EEPROM-set media %s\n",
404 					 medianame[looking_for]);
405 				goto media_picked;
406 			}
407 	}
408 	/* Start sensing first non-full-duplex media. */
409 	for (i = tp->mtable->leafcount - 1;
410 		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
411 		;
412 media_picked:
413 
414 	tp->csr6 = 0;
415 	tp->cur_index = i;
416 	tp->nwayset = 0;
417 
418 	if (dev->if_port) {
419 		if (tp->chip_id == DC21143  &&
420 		    (tulip_media_cap[dev->if_port] & MediaIsMII)) {
421 			/* We must reset the media CSRs when we force-select MII mode. */
422 			iowrite32(0x0000, ioaddr + CSR13);
423 			iowrite32(0x0000, ioaddr + CSR14);
424 			iowrite32(0x0008, ioaddr + CSR15);
425 		}
426 		tulip_select_media(dev, 1);
427 	} else if (tp->chip_id == DC21142) {
428 		if (tp->mii_cnt) {
429 			tulip_select_media(dev, 1);
430 			if (tulip_debug > 1)
431 				dev_info(&dev->dev,
432 					 "Using MII transceiver %d, status %04x\n",
433 					 tp->phys[0],
434 					 tulip_mdio_read(dev, tp->phys[0], 1));
435 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
436 			tp->csr6 = csr6_mask_hdcap;
437 			dev->if_port = 11;
438 			iowrite32(0x0000, ioaddr + CSR13);
439 			iowrite32(0x0000, ioaddr + CSR14);
440 		} else
441 			t21142_start_nway(dev);
442 	} else if (tp->chip_id == PNIC2) {
443 	        /* for initial startup advertise 10/100 Full and Half */
444 	        tp->sym_advertise = 0x01E0;
445                 /* enable autonegotiate end interrupt */
446 	        iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
447 	        iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
448 		pnic2_start_nway(dev);
449 	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
450 		if (tp->mii_cnt) {
451 			dev->if_port = 11;
452 			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
453 			iowrite32(0x0001, ioaddr + CSR15);
454 		} else if (ioread32(ioaddr + CSR5) & TPLnkPass)
455 			pnic_do_nway(dev);
456 		else {
457 			/* Start with 10mbps to do autonegotiation. */
458 			iowrite32(0x32, ioaddr + CSR12);
459 			tp->csr6 = 0x00420000;
460 			iowrite32(0x0001B078, ioaddr + 0xB8);
461 			iowrite32(0x0201B078, ioaddr + 0xB8);
462 			next_tick = 1*HZ;
463 		}
464 	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
465 		   ! tp->medialock) {
466 		dev->if_port = 0;
467 		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
468 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
469 	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
470 		/* Provided by BOLO, Macronix - 12/10/1998. */
471 		dev->if_port = 0;
472 		tp->csr6 = 0x01a80200;
473 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
474 		iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
475 	} else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
476 		/* Enable automatic Tx underrun recovery. */
477 		iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
478 		dev->if_port = tp->mii_cnt ? 11 : 0;
479 		tp->csr6 = 0x00040000;
480 	} else if (tp->chip_id == AX88140) {
481 		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
482 	} else
483 		tulip_select_media(dev, 1);
484 
485 	/* Start the chip's Tx to process setup frame. */
486 	tulip_stop_rxtx(tp);
487 	barrier();
488 	udelay(5);
489 	iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
490 
491 	/* Enable interrupts by setting the interrupt mask. */
492 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
493 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
494 	tulip_start_rxtx(tp);
495 	iowrite32(0, ioaddr + CSR2);		/* Rx poll demand */
496 
497 	if (tulip_debug > 2) {
498 		netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
499 			   ioread32(ioaddr + CSR0),
500 			   ioread32(ioaddr + CSR5),
501 			   ioread32(ioaddr + CSR6));
502 	}
503 
504 	/* Set the timer to switch to check for link beat and perhaps switch
505 	   to an alternate media type. */
506 	tp->timer.expires = RUN_AT(next_tick);
507 	add_timer(&tp->timer);
508 #ifdef CONFIG_TULIP_NAPI
509 	init_timer(&tp->oom_timer);
510         tp->oom_timer.data = (unsigned long)dev;
511         tp->oom_timer.function = oom_timer;
512 #endif
513 }
514 
515 static int
tulip_open(struct net_device * dev)516 tulip_open(struct net_device *dev)
517 {
518 	int retval;
519 
520 	tulip_init_ring (dev);
521 
522 	retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
523 	if (retval)
524 		goto free_ring;
525 
526 	tulip_up (dev);
527 
528 	netif_start_queue (dev);
529 
530 	return 0;
531 
532 free_ring:
533 	tulip_free_ring (dev);
534 	return retval;
535 }
536 
537 
tulip_tx_timeout(struct net_device * dev)538 static void tulip_tx_timeout(struct net_device *dev)
539 {
540 	struct tulip_private *tp = netdev_priv(dev);
541 	void __iomem *ioaddr = tp->base_addr;
542 	unsigned long flags;
543 
544 	spin_lock_irqsave (&tp->lock, flags);
545 
546 	if (tulip_media_cap[dev->if_port] & MediaIsMII) {
547 		/* Do nothing -- the media monitor should handle this. */
548 		if (tulip_debug > 1)
549 			dev_warn(&dev->dev,
550 				 "Transmit timeout using MII device\n");
551 	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
552 		   tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
553 		   tp->chip_id == DM910X) {
554 		dev_warn(&dev->dev,
555 			 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
556 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
557 			 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
558 			 ioread32(ioaddr + CSR15));
559 		tp->timeout_recovery = 1;
560 		schedule_work(&tp->media_work);
561 		goto out_unlock;
562 	} else if (tp->chip_id == PNIC2) {
563 		dev_warn(&dev->dev,
564 			 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
565 			 (int)ioread32(ioaddr + CSR5),
566 			 (int)ioread32(ioaddr + CSR6),
567 			 (int)ioread32(ioaddr + CSR7),
568 			 (int)ioread32(ioaddr + CSR12));
569 	} else {
570 		dev_warn(&dev->dev,
571 			 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
572 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
573 		dev->if_port = 0;
574 	}
575 
576 #if defined(way_too_many_messages)
577 	if (tulip_debug > 3) {
578 		int i;
579 		for (i = 0; i < RX_RING_SIZE; i++) {
580 			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
581 			int j;
582 			printk(KERN_DEBUG
583 			       "%2d: %08x %08x %08x %08x  %02x %02x %02x\n",
584 			       i,
585 			       (unsigned int)tp->rx_ring[i].status,
586 			       (unsigned int)tp->rx_ring[i].length,
587 			       (unsigned int)tp->rx_ring[i].buffer1,
588 			       (unsigned int)tp->rx_ring[i].buffer2,
589 			       buf[0], buf[1], buf[2]);
590 			for (j = 0; buf[j] != 0xee && j < 1600; j++)
591 				if (j < 100)
592 					pr_cont(" %02x", buf[j]);
593 			pr_cont(" j=%d\n", j);
594 		}
595 		printk(KERN_DEBUG "  Rx ring %p: ", tp->rx_ring);
596 		for (i = 0; i < RX_RING_SIZE; i++)
597 			pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
598 		printk(KERN_DEBUG "  Tx ring %p: ", tp->tx_ring);
599 		for (i = 0; i < TX_RING_SIZE; i++)
600 			pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
601 		pr_cont("\n");
602 	}
603 #endif
604 
605 	tulip_tx_timeout_complete(tp, ioaddr);
606 
607 out_unlock:
608 	spin_unlock_irqrestore (&tp->lock, flags);
609 	dev->trans_start = jiffies; /* prevent tx timeout */
610 	netif_wake_queue (dev);
611 }
612 
613 
614 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
tulip_init_ring(struct net_device * dev)615 static void tulip_init_ring(struct net_device *dev)
616 {
617 	struct tulip_private *tp = netdev_priv(dev);
618 	int i;
619 
620 	tp->susp_rx = 0;
621 	tp->ttimer = 0;
622 	tp->nir = 0;
623 
624 	for (i = 0; i < RX_RING_SIZE; i++) {
625 		tp->rx_ring[i].status = 0x00000000;
626 		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
627 		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
628 		tp->rx_buffers[i].skb = NULL;
629 		tp->rx_buffers[i].mapping = 0;
630 	}
631 	/* Mark the last entry as wrapping the ring. */
632 	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
633 	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
634 
635 	for (i = 0; i < RX_RING_SIZE; i++) {
636 		dma_addr_t mapping;
637 
638 		/* Note the receive buffer must be longword aligned.
639 		   dev_alloc_skb() provides 16 byte alignment.  But do *not*
640 		   use skb_reserve() to align the IP header! */
641 		struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
642 		tp->rx_buffers[i].skb = skb;
643 		if (skb == NULL)
644 			break;
645 		mapping = pci_map_single(tp->pdev, skb->data,
646 					 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
647 		tp->rx_buffers[i].mapping = mapping;
648 		skb->dev = dev;			/* Mark as being used by this device. */
649 		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
650 		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
651 	}
652 	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
653 
654 	/* The Tx buffer descriptor is filled in as needed, but we
655 	   do need to clear the ownership bit. */
656 	for (i = 0; i < TX_RING_SIZE; i++) {
657 		tp->tx_buffers[i].skb = NULL;
658 		tp->tx_buffers[i].mapping = 0;
659 		tp->tx_ring[i].status = 0x00000000;
660 		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
661 	}
662 	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
663 }
664 
665 static netdev_tx_t
tulip_start_xmit(struct sk_buff * skb,struct net_device * dev)666 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
667 {
668 	struct tulip_private *tp = netdev_priv(dev);
669 	int entry;
670 	u32 flag;
671 	dma_addr_t mapping;
672 	unsigned long flags;
673 
674 	spin_lock_irqsave(&tp->lock, flags);
675 
676 	/* Calculate the next Tx descriptor entry. */
677 	entry = tp->cur_tx % TX_RING_SIZE;
678 
679 	tp->tx_buffers[entry].skb = skb;
680 	mapping = pci_map_single(tp->pdev, skb->data,
681 				 skb->len, PCI_DMA_TODEVICE);
682 	tp->tx_buffers[entry].mapping = mapping;
683 	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
684 
685 	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
686 		flag = 0x60000000; /* No interrupt */
687 	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
688 		flag = 0xe0000000; /* Tx-done intr. */
689 	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
690 		flag = 0x60000000; /* No Tx-done intr. */
691 	} else {		/* Leave room for set_rx_mode() to fill entries. */
692 		flag = 0xe0000000; /* Tx-done intr. */
693 		netif_stop_queue(dev);
694 	}
695 	if (entry == TX_RING_SIZE-1)
696 		flag = 0xe0000000 | DESC_RING_WRAP;
697 
698 	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
699 	/* if we were using Transmit Automatic Polling, we would need a
700 	 * wmb() here. */
701 	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
702 	wmb();
703 
704 	tp->cur_tx++;
705 
706 	/* Trigger an immediate transmit demand. */
707 	iowrite32(0, tp->base_addr + CSR1);
708 
709 	spin_unlock_irqrestore(&tp->lock, flags);
710 
711 	return NETDEV_TX_OK;
712 }
713 
tulip_clean_tx_ring(struct tulip_private * tp)714 static void tulip_clean_tx_ring(struct tulip_private *tp)
715 {
716 	unsigned int dirty_tx;
717 
718 	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
719 		dirty_tx++) {
720 		int entry = dirty_tx % TX_RING_SIZE;
721 		int status = le32_to_cpu(tp->tx_ring[entry].status);
722 
723 		if (status < 0) {
724 			tp->dev->stats.tx_errors++;	/* It wasn't Txed */
725 			tp->tx_ring[entry].status = 0;
726 		}
727 
728 		/* Check for Tx filter setup frames. */
729 		if (tp->tx_buffers[entry].skb == NULL) {
730 			/* test because dummy frames not mapped */
731 			if (tp->tx_buffers[entry].mapping)
732 				pci_unmap_single(tp->pdev,
733 					tp->tx_buffers[entry].mapping,
734 					sizeof(tp->setup_frame),
735 					PCI_DMA_TODEVICE);
736 			continue;
737 		}
738 
739 		pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
740 				tp->tx_buffers[entry].skb->len,
741 				PCI_DMA_TODEVICE);
742 
743 		/* Free the original skb. */
744 		dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
745 		tp->tx_buffers[entry].skb = NULL;
746 		tp->tx_buffers[entry].mapping = 0;
747 	}
748 }
749 
tulip_down(struct net_device * dev)750 static void tulip_down (struct net_device *dev)
751 {
752 	struct tulip_private *tp = netdev_priv(dev);
753 	void __iomem *ioaddr = tp->base_addr;
754 	unsigned long flags;
755 
756 	cancel_work_sync(&tp->media_work);
757 
758 #ifdef CONFIG_TULIP_NAPI
759 	napi_disable(&tp->napi);
760 #endif
761 
762 	del_timer_sync (&tp->timer);
763 #ifdef CONFIG_TULIP_NAPI
764 	del_timer_sync (&tp->oom_timer);
765 #endif
766 	spin_lock_irqsave (&tp->lock, flags);
767 
768 	/* Disable interrupts by clearing the interrupt mask. */
769 	iowrite32 (0x00000000, ioaddr + CSR7);
770 
771 	/* Stop the Tx and Rx processes. */
772 	tulip_stop_rxtx(tp);
773 
774 	/* prepare receive buffers */
775 	tulip_refill_rx(dev);
776 
777 	/* release any unconsumed transmit buffers */
778 	tulip_clean_tx_ring(tp);
779 
780 	if (ioread32(ioaddr + CSR6) != 0xffffffff)
781 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
782 
783 	spin_unlock_irqrestore (&tp->lock, flags);
784 
785 	init_timer(&tp->timer);
786 	tp->timer.data = (unsigned long)dev;
787 	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
788 
789 	dev->if_port = tp->saved_if_port;
790 
791 	/* Leave the driver in snooze, not sleep, mode. */
792 	tulip_set_power_state (tp, 0, 1);
793 }
794 
tulip_free_ring(struct net_device * dev)795 static void tulip_free_ring (struct net_device *dev)
796 {
797 	struct tulip_private *tp = netdev_priv(dev);
798 	int i;
799 
800 	/* Free all the skbuffs in the Rx queue. */
801 	for (i = 0; i < RX_RING_SIZE; i++) {
802 		struct sk_buff *skb = tp->rx_buffers[i].skb;
803 		dma_addr_t mapping = tp->rx_buffers[i].mapping;
804 
805 		tp->rx_buffers[i].skb = NULL;
806 		tp->rx_buffers[i].mapping = 0;
807 
808 		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
809 		tp->rx_ring[i].length = 0;
810 		/* An invalid address. */
811 		tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
812 		if (skb) {
813 			pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
814 					 PCI_DMA_FROMDEVICE);
815 			dev_kfree_skb (skb);
816 		}
817 	}
818 
819 	for (i = 0; i < TX_RING_SIZE; i++) {
820 		struct sk_buff *skb = tp->tx_buffers[i].skb;
821 
822 		if (skb != NULL) {
823 			pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
824 					 skb->len, PCI_DMA_TODEVICE);
825 			dev_kfree_skb (skb);
826 		}
827 		tp->tx_buffers[i].skb = NULL;
828 		tp->tx_buffers[i].mapping = 0;
829 	}
830 }
831 
tulip_close(struct net_device * dev)832 static int tulip_close (struct net_device *dev)
833 {
834 	struct tulip_private *tp = netdev_priv(dev);
835 	void __iomem *ioaddr = tp->base_addr;
836 
837 	netif_stop_queue (dev);
838 
839 	tulip_down (dev);
840 
841 	if (tulip_debug > 1)
842 		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
843 			   ioread32 (ioaddr + CSR5));
844 
845 	free_irq (dev->irq, dev);
846 
847 	tulip_free_ring (dev);
848 
849 	return 0;
850 }
851 
tulip_get_stats(struct net_device * dev)852 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
853 {
854 	struct tulip_private *tp = netdev_priv(dev);
855 	void __iomem *ioaddr = tp->base_addr;
856 
857 	if (netif_running(dev)) {
858 		unsigned long flags;
859 
860 		spin_lock_irqsave (&tp->lock, flags);
861 
862 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
863 
864 		spin_unlock_irqrestore(&tp->lock, flags);
865 	}
866 
867 	return &dev->stats;
868 }
869 
870 
tulip_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)871 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
872 {
873 	struct tulip_private *np = netdev_priv(dev);
874 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
875 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
876 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
877 }
878 
879 
tulip_ethtool_set_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)880 static int tulip_ethtool_set_wol(struct net_device *dev,
881 				 struct ethtool_wolinfo *wolinfo)
882 {
883 	struct tulip_private *tp = netdev_priv(dev);
884 
885 	if (wolinfo->wolopts & (~tp->wolinfo.supported))
886 		   return -EOPNOTSUPP;
887 
888 	tp->wolinfo.wolopts = wolinfo->wolopts;
889 	device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
890 	return 0;
891 }
892 
tulip_ethtool_get_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)893 static void tulip_ethtool_get_wol(struct net_device *dev,
894 				  struct ethtool_wolinfo *wolinfo)
895 {
896 	struct tulip_private *tp = netdev_priv(dev);
897 
898 	wolinfo->supported = tp->wolinfo.supported;
899 	wolinfo->wolopts = tp->wolinfo.wolopts;
900 	return;
901 }
902 
903 
904 static const struct ethtool_ops ops = {
905 	.get_drvinfo = tulip_get_drvinfo,
906 	.set_wol     = tulip_ethtool_set_wol,
907 	.get_wol     = tulip_ethtool_get_wol,
908 };
909 
910 /* Provide ioctl() calls to examine the MII xcvr state. */
private_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)911 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
912 {
913 	struct tulip_private *tp = netdev_priv(dev);
914 	void __iomem *ioaddr = tp->base_addr;
915 	struct mii_ioctl_data *data = if_mii(rq);
916 	const unsigned int phy_idx = 0;
917 	int phy = tp->phys[phy_idx] & 0x1f;
918 	unsigned int regnum = data->reg_num;
919 
920 	switch (cmd) {
921 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
922 		if (tp->mii_cnt)
923 			data->phy_id = phy;
924 		else if (tp->flags & HAS_NWAY)
925 			data->phy_id = 32;
926 		else if (tp->chip_id == COMET)
927 			data->phy_id = 1;
928 		else
929 			return -ENODEV;
930 
931 	case SIOCGMIIREG:		/* Read MII PHY register. */
932 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
933 			int csr12 = ioread32 (ioaddr + CSR12);
934 			int csr14 = ioread32 (ioaddr + CSR14);
935 			switch (regnum) {
936 			case 0:
937                                 if (((csr14<<5) & 0x1000) ||
938                                         (dev->if_port == 5 && tp->nwayset))
939                                         data->val_out = 0x1000;
940                                 else
941                                         data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
942                                                 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
943 				break;
944 			case 1:
945                                 data->val_out =
946 					0x1848 +
947 					((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
948 					((csr12&0x06) == 6 ? 0 : 4);
949                                 data->val_out |= 0x6048;
950 				break;
951 			case 4:
952                                 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
953                                 data->val_out =
954 					((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
955 					((csr14 >> 1) & 0x20) + 1;
956                                 data->val_out |= ((csr14 >> 9) & 0x03C0);
957 				break;
958 			case 5: data->val_out = tp->lpar; break;
959 			default: data->val_out = 0; break;
960 			}
961 		} else {
962 			data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
963 		}
964 		return 0;
965 
966 	case SIOCSMIIREG:		/* Write MII PHY register. */
967 		if (regnum & ~0x1f)
968 			return -EINVAL;
969 		if (data->phy_id == phy) {
970 			u16 value = data->val_in;
971 			switch (regnum) {
972 			case 0:	/* Check for autonegotiation on or reset. */
973 				tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
974 				if (tp->full_duplex_lock)
975 					tp->full_duplex = (value & 0x0100) ? 1 : 0;
976 				break;
977 			case 4:
978 				tp->advertising[phy_idx] =
979 				tp->mii_advertise = data->val_in;
980 				break;
981 			}
982 		}
983 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
984 			u16 value = data->val_in;
985 			if (regnum == 0) {
986 			  if ((value & 0x1200) == 0x1200) {
987 			    if (tp->chip_id == PNIC2) {
988                                    pnic2_start_nway (dev);
989                             } else {
990 				   t21142_start_nway (dev);
991                             }
992 			  }
993 			} else if (regnum == 4)
994 				tp->sym_advertise = value;
995 		} else {
996 			tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
997 		}
998 		return 0;
999 	default:
1000 		return -EOPNOTSUPP;
1001 	}
1002 
1003 	return -EOPNOTSUPP;
1004 }
1005 
1006 
1007 /* Set or clear the multicast filter for this adaptor.
1008    Note that we only use exclusion around actually queueing the
1009    new frame, not around filling tp->setup_frame.  This is non-deterministic
1010    when re-entered but still correct. */
1011 
1012 #undef set_bit_le
1013 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
1014 
build_setup_frame_hash(u16 * setup_frm,struct net_device * dev)1015 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1016 {
1017 	struct tulip_private *tp = netdev_priv(dev);
1018 	u16 hash_table[32];
1019 	struct netdev_hw_addr *ha;
1020 	int i;
1021 	u16 *eaddrs;
1022 
1023 	memset(hash_table, 0, sizeof(hash_table));
1024 	set_bit_le(255, hash_table); 			/* Broadcast entry */
1025 	/* This should work on big-endian machines as well. */
1026 	netdev_for_each_mc_addr(ha, dev) {
1027 		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1028 
1029 		set_bit_le(index, hash_table);
1030 	}
1031 	for (i = 0; i < 32; i++) {
1032 		*setup_frm++ = hash_table[i];
1033 		*setup_frm++ = hash_table[i];
1034 	}
1035 	setup_frm = &tp->setup_frame[13*6];
1036 
1037 	/* Fill the final entry with our physical address. */
1038 	eaddrs = (u16 *)dev->dev_addr;
1039 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1040 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1041 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1042 }
1043 
build_setup_frame_perfect(u16 * setup_frm,struct net_device * dev)1044 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1045 {
1046 	struct tulip_private *tp = netdev_priv(dev);
1047 	struct netdev_hw_addr *ha;
1048 	u16 *eaddrs;
1049 
1050 	/* We have <= 14 addresses so we can use the wonderful
1051 	   16 address perfect filtering of the Tulip. */
1052 	netdev_for_each_mc_addr(ha, dev) {
1053 		eaddrs = (u16 *) ha->addr;
1054 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1055 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1056 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1057 	}
1058 	/* Fill the unused entries with the broadcast address. */
1059 	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1060 	setup_frm = &tp->setup_frame[15*6];
1061 
1062 	/* Fill the final entry with our physical address. */
1063 	eaddrs = (u16 *)dev->dev_addr;
1064 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1065 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1066 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1067 }
1068 
1069 
set_rx_mode(struct net_device * dev)1070 static void set_rx_mode(struct net_device *dev)
1071 {
1072 	struct tulip_private *tp = netdev_priv(dev);
1073 	void __iomem *ioaddr = tp->base_addr;
1074 	int csr6;
1075 
1076 	csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1077 
1078 	tp->csr6 &= ~0x00D5;
1079 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1080 		tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1081 		csr6 |= AcceptAllMulticast | AcceptAllPhys;
1082 	} else if ((netdev_mc_count(dev) > 1000) ||
1083 		   (dev->flags & IFF_ALLMULTI)) {
1084 		/* Too many to filter well -- accept all multicasts. */
1085 		tp->csr6 |= AcceptAllMulticast;
1086 		csr6 |= AcceptAllMulticast;
1087 	} else	if (tp->flags & MC_HASH_ONLY) {
1088 		/* Some work-alikes have only a 64-entry hash filter table. */
1089 		/* Should verify correctness on big-endian/__powerpc__ */
1090 		struct netdev_hw_addr *ha;
1091 		if (netdev_mc_count(dev) > 64) {
1092 			/* Arbitrary non-effective limit. */
1093 			tp->csr6 |= AcceptAllMulticast;
1094 			csr6 |= AcceptAllMulticast;
1095 		} else {
1096 			u32 mc_filter[2] = {0, 0};		 /* Multicast hash filter */
1097 			int filterbit;
1098 			netdev_for_each_mc_addr(ha, dev) {
1099 				if (tp->flags & COMET_MAC_ADDR)
1100 					filterbit = ether_crc_le(ETH_ALEN,
1101 								 ha->addr);
1102 				else
1103 					filterbit = ether_crc(ETH_ALEN,
1104 							      ha->addr) >> 26;
1105 				filterbit &= 0x3f;
1106 				mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1107 				if (tulip_debug > 2)
1108 					dev_info(&dev->dev,
1109 						 "Added filter for %pM  %08x bit %d\n",
1110 						 ha->addr,
1111 						 ether_crc(ETH_ALEN, ha->addr),
1112 						 filterbit);
1113 			}
1114 			if (mc_filter[0] == tp->mc_filter[0]  &&
1115 				mc_filter[1] == tp->mc_filter[1])
1116 				;				/* No change. */
1117 			else if (tp->flags & IS_ASIX) {
1118 				iowrite32(2, ioaddr + CSR13);
1119 				iowrite32(mc_filter[0], ioaddr + CSR14);
1120 				iowrite32(3, ioaddr + CSR13);
1121 				iowrite32(mc_filter[1], ioaddr + CSR14);
1122 			} else if (tp->flags & COMET_MAC_ADDR) {
1123 				iowrite32(mc_filter[0], ioaddr + CSR27);
1124 				iowrite32(mc_filter[1], ioaddr + CSR28);
1125 			}
1126 			tp->mc_filter[0] = mc_filter[0];
1127 			tp->mc_filter[1] = mc_filter[1];
1128 		}
1129 	} else {
1130 		unsigned long flags;
1131 		u32 tx_flags = 0x08000000 | 192;
1132 
1133 		/* Note that only the low-address shortword of setup_frame is valid!
1134 		   The values are doubled for big-endian architectures. */
1135 		if (netdev_mc_count(dev) > 14) {
1136 			/* Must use a multicast hash table. */
1137 			build_setup_frame_hash(tp->setup_frame, dev);
1138 			tx_flags = 0x08400000 | 192;
1139 		} else {
1140 			build_setup_frame_perfect(tp->setup_frame, dev);
1141 		}
1142 
1143 		spin_lock_irqsave(&tp->lock, flags);
1144 
1145 		if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1146 			/* Same setup recently queued, we need not add it. */
1147 		} else {
1148 			unsigned int entry;
1149 			int dummy = -1;
1150 
1151 			/* Now add this frame to the Tx list. */
1152 
1153 			entry = tp->cur_tx++ % TX_RING_SIZE;
1154 
1155 			if (entry != 0) {
1156 				/* Avoid a chip errata by prefixing a dummy entry. */
1157 				tp->tx_buffers[entry].skb = NULL;
1158 				tp->tx_buffers[entry].mapping = 0;
1159 				tp->tx_ring[entry].length =
1160 					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1161 				tp->tx_ring[entry].buffer1 = 0;
1162 				/* Must set DescOwned later to avoid race with chip */
1163 				dummy = entry;
1164 				entry = tp->cur_tx++ % TX_RING_SIZE;
1165 
1166 			}
1167 
1168 			tp->tx_buffers[entry].skb = NULL;
1169 			tp->tx_buffers[entry].mapping =
1170 				pci_map_single(tp->pdev, tp->setup_frame,
1171 					       sizeof(tp->setup_frame),
1172 					       PCI_DMA_TODEVICE);
1173 			/* Put the setup frame on the Tx list. */
1174 			if (entry == TX_RING_SIZE-1)
1175 				tx_flags |= DESC_RING_WRAP;		/* Wrap ring. */
1176 			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1177 			tp->tx_ring[entry].buffer1 =
1178 				cpu_to_le32(tp->tx_buffers[entry].mapping);
1179 			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1180 			if (dummy >= 0)
1181 				tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1182 			if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1183 				netif_stop_queue(dev);
1184 
1185 			/* Trigger an immediate transmit demand. */
1186 			iowrite32(0, ioaddr + CSR1);
1187 		}
1188 
1189 		spin_unlock_irqrestore(&tp->lock, flags);
1190 	}
1191 
1192 	iowrite32(csr6, ioaddr + CSR6);
1193 }
1194 
1195 #ifdef CONFIG_TULIP_MWI
tulip_mwi_config(struct pci_dev * pdev,struct net_device * dev)1196 static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1197 					struct net_device *dev)
1198 {
1199 	struct tulip_private *tp = netdev_priv(dev);
1200 	u8 cache;
1201 	u16 pci_command;
1202 	u32 csr0;
1203 
1204 	if (tulip_debug > 3)
1205 		netdev_dbg(dev, "tulip_mwi_config()\n");
1206 
1207 	tp->csr0 = csr0 = 0;
1208 
1209 	/* if we have any cache line size at all, we can do MRM and MWI */
1210 	csr0 |= MRM | MWI;
1211 
1212 	/* Enable MWI in the standard PCI command bit.
1213 	 * Check for the case where MWI is desired but not available
1214 	 */
1215 	pci_try_set_mwi(pdev);
1216 
1217 	/* read result from hardware (in case bit refused to enable) */
1218 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1219 	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1220 		csr0 &= ~MWI;
1221 
1222 	/* if cache line size hardwired to zero, no MWI */
1223 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1224 	if ((csr0 & MWI) && (cache == 0)) {
1225 		csr0 &= ~MWI;
1226 		pci_clear_mwi(pdev);
1227 	}
1228 
1229 	/* assign per-cacheline-size cache alignment and
1230 	 * burst length values
1231 	 */
1232 	switch (cache) {
1233 	case 8:
1234 		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1235 		break;
1236 	case 16:
1237 		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1238 		break;
1239 	case 32:
1240 		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1241 		break;
1242 	default:
1243 		cache = 0;
1244 		break;
1245 	}
1246 
1247 	/* if we have a good cache line size, we by now have a good
1248 	 * csr0, so save it and exit
1249 	 */
1250 	if (cache)
1251 		goto out;
1252 
1253 	/* we don't have a good csr0 or cache line size, disable MWI */
1254 	if (csr0 & MWI) {
1255 		pci_clear_mwi(pdev);
1256 		csr0 &= ~MWI;
1257 	}
1258 
1259 	/* sane defaults for burst length and cache alignment
1260 	 * originally from de4x5 driver
1261 	 */
1262 	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1263 
1264 out:
1265 	tp->csr0 = csr0;
1266 	if (tulip_debug > 2)
1267 		netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1268 			   cache, csr0);
1269 }
1270 #endif
1271 
1272 /*
1273  *	Chips that have the MRM/reserved bit quirk and the burst quirk. That
1274  *	is the DM910X and the on chip ULi devices
1275  */
1276 
tulip_uli_dm_quirk(struct pci_dev * pdev)1277 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1278 {
1279 	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1280 		return 1;
1281 	return 0;
1282 }
1283 
1284 static const struct net_device_ops tulip_netdev_ops = {
1285 	.ndo_open		= tulip_open,
1286 	.ndo_start_xmit		= tulip_start_xmit,
1287 	.ndo_tx_timeout		= tulip_tx_timeout,
1288 	.ndo_stop		= tulip_close,
1289 	.ndo_get_stats		= tulip_get_stats,
1290 	.ndo_do_ioctl 		= private_ioctl,
1291 	.ndo_set_rx_mode	= set_rx_mode,
1292 	.ndo_change_mtu		= eth_change_mtu,
1293 	.ndo_set_mac_address	= eth_mac_addr,
1294 	.ndo_validate_addr	= eth_validate_addr,
1295 #ifdef CONFIG_NET_POLL_CONTROLLER
1296 	.ndo_poll_controller	 = poll_tulip,
1297 #endif
1298 };
1299 
1300 DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
1301 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1302 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1303 	{ },
1304 };
1305 
tulip_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1306 static int __devinit tulip_init_one (struct pci_dev *pdev,
1307 				     const struct pci_device_id *ent)
1308 {
1309 	struct tulip_private *tp;
1310 	/* See note below on the multiport cards. */
1311 	static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1312 	static int last_irq;
1313 	static int multiport_cnt;	/* For four-port boards w/one EEPROM */
1314 	int i, irq;
1315 	unsigned short sum;
1316 	unsigned char *ee_data;
1317 	struct net_device *dev;
1318 	void __iomem *ioaddr;
1319 	static int board_idx = -1;
1320 	int chip_idx = ent->driver_data;
1321 	const char *chip_name = tulip_tbl[chip_idx].chip_name;
1322 	unsigned int eeprom_missing = 0;
1323 	unsigned int force_csr0 = 0;
1324 
1325 #ifndef MODULE
1326 	if (tulip_debug > 0)
1327 		printk_once(KERN_INFO "%s", version);
1328 #endif
1329 
1330 	board_idx++;
1331 
1332 	/*
1333 	 *	Lan media wire a tulip chip to a wan interface. Needs a very
1334 	 *	different driver (lmc driver)
1335 	 */
1336 
1337         if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1338 		pr_err("skipping LMC card\n");
1339 		return -ENODEV;
1340 	} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1341 		   (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1342 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1343 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1344 		pr_err("skipping SBE T3E3 port\n");
1345 		return -ENODEV;
1346 	}
1347 
1348 	/*
1349 	 *	DM910x chips should be handled by the dmfe driver, except
1350 	 *	on-board chips on SPARC systems.  Also, early DM9100s need
1351 	 *	software CRC which only the dmfe driver supports.
1352 	 */
1353 
1354 #ifdef CONFIG_TULIP_DM910X
1355 	if (chip_idx == DM910X) {
1356 		struct device_node *dp;
1357 
1358 		if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1359 		    pdev->revision < 0x30) {
1360 			pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1361 			return -ENODEV;
1362 		}
1363 
1364 		dp = pci_device_to_OF_node(pdev);
1365 		if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1366 			pr_info("skipping DM910x expansion card (use dmfe)\n");
1367 			return -ENODEV;
1368 		}
1369 	}
1370 #endif
1371 
1372 	/*
1373 	 *	Looks for early PCI chipsets where people report hangs
1374 	 *	without the workarounds being on.
1375 	 */
1376 
1377 	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1378 	      aligned.  Aries might need this too. The Saturn errata are not
1379 	      pretty reading but thankfully it's an old 486 chipset.
1380 
1381 	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
1382 	      Saturn.
1383 	*/
1384 
1385 	if (pci_dev_present(early_486_chipsets)) {
1386 		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1387 		force_csr0 = 1;
1388 	}
1389 
1390 	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
1391 	if (chip_idx == AX88140) {
1392 		if ((csr0 & 0x3f00) == 0)
1393 			csr0 |= 0x2000;
1394 	}
1395 
1396 	/* PNIC doesn't have MWI/MRL/MRM... */
1397 	if (chip_idx == LC82C168)
1398 		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1399 
1400 	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1401 	if (tulip_uli_dm_quirk(pdev)) {
1402 		csr0 &= ~0x01f100ff;
1403 #if defined(CONFIG_SPARC)
1404                 csr0 = (csr0 & ~0xff00) | 0xe000;
1405 #endif
1406 	}
1407 	/*
1408 	 *	And back to business
1409 	 */
1410 
1411 	i = pci_enable_device(pdev);
1412 	if (i) {
1413 		pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1414 		return i;
1415 	}
1416 
1417 	/* The chip will fail to enter a low-power state later unless
1418 	 * first explicitly commanded into D0 */
1419 	if (pci_set_power_state(pdev, PCI_D0)) {
1420 		pr_notice("Failed to set power state to D0\n");
1421 	}
1422 
1423 	irq = pdev->irq;
1424 
1425 	/* alloc_etherdev ensures aligned and zeroed private structures */
1426 	dev = alloc_etherdev (sizeof (*tp));
1427 	if (!dev) {
1428 		pr_err("ether device alloc failed, aborting\n");
1429 		return -ENOMEM;
1430 	}
1431 
1432 	SET_NETDEV_DEV(dev, &pdev->dev);
1433 	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1434 		pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1435 		       pci_name(pdev),
1436 		       (unsigned long long)pci_resource_len (pdev, 0),
1437 		       (unsigned long long)pci_resource_start (pdev, 0));
1438 		goto err_out_free_netdev;
1439 	}
1440 
1441 	/* grab all resources from both PIO and MMIO regions, as we
1442 	 * don't want anyone else messing around with our hardware */
1443 	if (pci_request_regions (pdev, DRV_NAME))
1444 		goto err_out_free_netdev;
1445 
1446 	ioaddr =  pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1447 
1448 	if (!ioaddr)
1449 		goto err_out_free_res;
1450 
1451 	/*
1452 	 * initialize private data structure 'tp'
1453 	 * it is zeroed and aligned in alloc_etherdev
1454 	 */
1455 	tp = netdev_priv(dev);
1456 	tp->dev = dev;
1457 
1458 	tp->rx_ring = pci_alloc_consistent(pdev,
1459 					   sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1460 					   sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1461 					   &tp->rx_ring_dma);
1462 	if (!tp->rx_ring)
1463 		goto err_out_mtable;
1464 	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1465 	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1466 
1467 	tp->chip_id = chip_idx;
1468 	tp->flags = tulip_tbl[chip_idx].flags;
1469 
1470 	tp->wolinfo.supported = 0;
1471 	tp->wolinfo.wolopts = 0;
1472 	/* COMET: Enable power management only for AN983B */
1473 	if (chip_idx == COMET ) {
1474 		u32 sig;
1475 		pci_read_config_dword (pdev, 0x80, &sig);
1476 		if (sig == 0x09811317) {
1477 			tp->flags |= COMET_PM;
1478 			tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1479 			pr_info("%s: Enabled WOL support for AN983B\n",
1480 				__func__);
1481 		}
1482 	}
1483 	tp->pdev = pdev;
1484 	tp->base_addr = ioaddr;
1485 	tp->revision = pdev->revision;
1486 	tp->csr0 = csr0;
1487 	spin_lock_init(&tp->lock);
1488 	spin_lock_init(&tp->mii_lock);
1489 	init_timer(&tp->timer);
1490 	tp->timer.data = (unsigned long)dev;
1491 	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1492 
1493 	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1494 
1495 	dev->base_addr = (unsigned long)ioaddr;
1496 
1497 #ifdef CONFIG_TULIP_MWI
1498 	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1499 		tulip_mwi_config (pdev, dev);
1500 #endif
1501 
1502 	/* Stop the chip's Tx and Rx processes. */
1503 	tulip_stop_rxtx(tp);
1504 
1505 	pci_set_master(pdev);
1506 
1507 #ifdef CONFIG_GSC
1508 	if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1509 		switch (pdev->subsystem_device) {
1510 		default:
1511 			break;
1512 		case 0x1061:
1513 		case 0x1062:
1514 		case 0x1063:
1515 		case 0x1098:
1516 		case 0x1099:
1517 		case 0x10EE:
1518 			tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1519 			chip_name = "GSC DS21140 Tulip";
1520 		}
1521 	}
1522 #endif
1523 
1524 	/* Clear the missed-packet counter. */
1525 	ioread32(ioaddr + CSR8);
1526 
1527 	/* The station address ROM is read byte serially.  The register must
1528 	   be polled, waiting for the value to be read bit serially from the
1529 	   EEPROM.
1530 	   */
1531 	ee_data = tp->eeprom;
1532 	memset(ee_data, 0, sizeof(tp->eeprom));
1533 	sum = 0;
1534 	if (chip_idx == LC82C168) {
1535 		for (i = 0; i < 3; i++) {
1536 			int value, boguscnt = 100000;
1537 			iowrite32(0x600 | i, ioaddr + 0x98);
1538 			do {
1539 				value = ioread32(ioaddr + CSR9);
1540 			} while (value < 0  && --boguscnt > 0);
1541 			put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1542 			sum += value & 0xffff;
1543 		}
1544 	} else if (chip_idx == COMET) {
1545 		/* No need to read the EEPROM. */
1546 		put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1547 		put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1548 		for (i = 0; i < 6; i ++)
1549 			sum += dev->dev_addr[i];
1550 	} else {
1551 		/* A serial EEPROM interface, we read now and sort it out later. */
1552 		int sa_offset = 0;
1553 		int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1554 		int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1555 
1556 		if (ee_max_addr > sizeof(tp->eeprom))
1557 			ee_max_addr = sizeof(tp->eeprom);
1558 
1559 		for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1560 			u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1561 			ee_data[i] = data & 0xff;
1562 			ee_data[i + 1] = data >> 8;
1563 		}
1564 
1565 		/* DEC now has a specification (see Notes) but early board makers
1566 		   just put the address in the first EEPROM locations. */
1567 		/* This does  memcmp(ee_data, ee_data+16, 8) */
1568 		for (i = 0; i < 8; i ++)
1569 			if (ee_data[i] != ee_data[16+i])
1570 				sa_offset = 20;
1571 		if (chip_idx == CONEXANT) {
1572 			/* Check that the tuple type and length is correct. */
1573 			if (ee_data[0x198] == 0x04  &&  ee_data[0x199] == 6)
1574 				sa_offset = 0x19A;
1575 		} else if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&
1576 				   ee_data[2] == 0) {
1577 			sa_offset = 2;		/* Grrr, damn Matrox boards. */
1578 			multiport_cnt = 4;
1579 		}
1580 #ifdef CONFIG_MIPS_COBALT
1581                if ((pdev->bus->number == 0) &&
1582                    ((PCI_SLOT(pdev->devfn) == 7) ||
1583                     (PCI_SLOT(pdev->devfn) == 12))) {
1584                        /* Cobalt MAC address in first EEPROM locations. */
1585                        sa_offset = 0;
1586 		       /* Ensure our media table fixup get's applied */
1587 		       memcpy(ee_data + 16, ee_data, 8);
1588                }
1589 #endif
1590 #ifdef CONFIG_GSC
1591 		/* Check to see if we have a broken srom */
1592 		if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1593 			/* pci_vendor_id and subsystem_id are swapped */
1594 			ee_data[0] = ee_data[2];
1595 			ee_data[1] = ee_data[3];
1596 			ee_data[2] = 0x61;
1597 			ee_data[3] = 0x10;
1598 
1599 			/* HSC-PCI boards need to be byte-swaped and shifted
1600 			 * up 1 word.  This shift needs to happen at the end
1601 			 * of the MAC first because of the 2 byte overlap.
1602 			 */
1603 			for (i = 4; i >= 0; i -= 2) {
1604 				ee_data[17 + i + 3] = ee_data[17 + i];
1605 				ee_data[16 + i + 5] = ee_data[16 + i];
1606 			}
1607 		}
1608 #endif
1609 
1610 		for (i = 0; i < 6; i ++) {
1611 			dev->dev_addr[i] = ee_data[i + sa_offset];
1612 			sum += ee_data[i + sa_offset];
1613 		}
1614 	}
1615 	/* Lite-On boards have the address byte-swapped. */
1616 	if ((dev->dev_addr[0] == 0xA0 ||
1617 	     dev->dev_addr[0] == 0xC0 ||
1618 	     dev->dev_addr[0] == 0x02) &&
1619 	    dev->dev_addr[1] == 0x00)
1620 		for (i = 0; i < 6; i+=2) {
1621 			char tmp = dev->dev_addr[i];
1622 			dev->dev_addr[i] = dev->dev_addr[i+1];
1623 			dev->dev_addr[i+1] = tmp;
1624 		}
1625 	/* On the Zynx 315 Etherarray and other multiport boards only the
1626 	   first Tulip has an EEPROM.
1627 	   On Sparc systems the mac address is held in the OBP property
1628 	   "local-mac-address".
1629 	   The addresses of the subsequent ports are derived from the first.
1630 	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1631 	   that here as well. */
1632 	if (sum == 0  || sum == 6*0xff) {
1633 #if defined(CONFIG_SPARC)
1634 		struct device_node *dp = pci_device_to_OF_node(pdev);
1635 		const unsigned char *addr;
1636 		int len;
1637 #endif
1638 		eeprom_missing = 1;
1639 		for (i = 0; i < 5; i++)
1640 			dev->dev_addr[i] = last_phys_addr[i];
1641 		dev->dev_addr[i] = last_phys_addr[i] + 1;
1642 #if defined(CONFIG_SPARC)
1643 		addr = of_get_property(dp, "local-mac-address", &len);
1644 		if (addr && len == 6)
1645 			memcpy(dev->dev_addr, addr, 6);
1646 #endif
1647 #if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1648 		if (last_irq)
1649 			irq = last_irq;
1650 #endif
1651 	}
1652 
1653 	for (i = 0; i < 6; i++)
1654 		last_phys_addr[i] = dev->dev_addr[i];
1655 	last_irq = irq;
1656 	dev->irq = irq;
1657 
1658 	/* The lower four bits are the media type. */
1659 	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
1660 		if (options[board_idx] & MEDIA_MASK)
1661 			tp->default_port = options[board_idx] & MEDIA_MASK;
1662 		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1663 			tp->full_duplex = 1;
1664 		if (mtu[board_idx] > 0)
1665 			dev->mtu = mtu[board_idx];
1666 	}
1667 	if (dev->mem_start & MEDIA_MASK)
1668 		tp->default_port = dev->mem_start & MEDIA_MASK;
1669 	if (tp->default_port) {
1670 		pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1671 			board_idx, medianame[tp->default_port & MEDIA_MASK]);
1672 		tp->medialock = 1;
1673 		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1674 			tp->full_duplex = 1;
1675 	}
1676 	if (tp->full_duplex)
1677 		tp->full_duplex_lock = 1;
1678 
1679 	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1680 		static const u16 media2advert[] = {
1681 			0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1682 		};
1683 		tp->mii_advertise = media2advert[tp->default_port - 9];
1684 		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1685 	}
1686 
1687 	if (tp->flags & HAS_MEDIA_TABLE) {
1688 		sprintf(dev->name, DRV_NAME "%d", board_idx);	/* hack */
1689 		tulip_parse_eeprom(dev);
1690 		strcpy(dev->name, "eth%d");			/* un-hack */
1691 	}
1692 
1693 	if ((tp->flags & ALWAYS_CHECK_MII) ||
1694 		(tp->mtable  &&  tp->mtable->has_mii) ||
1695 		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
1696 		if (tp->mtable  &&  tp->mtable->has_mii) {
1697 			for (i = 0; i < tp->mtable->leafcount; i++)
1698 				if (tp->mtable->mleaf[i].media == 11) {
1699 					tp->cur_index = i;
1700 					tp->saved_if_port = dev->if_port;
1701 					tulip_select_media(dev, 2);
1702 					dev->if_port = tp->saved_if_port;
1703 					break;
1704 				}
1705 		}
1706 
1707 		/* Find the connected MII xcvrs.
1708 		   Doing this in open() would allow detecting external xcvrs
1709 		   later, but takes much time. */
1710 		tulip_find_mii (dev, board_idx);
1711 	}
1712 
1713 	/* The Tulip-specific entries in the device structure. */
1714 	dev->netdev_ops = &tulip_netdev_ops;
1715 	dev->watchdog_timeo = TX_TIMEOUT;
1716 #ifdef CONFIG_TULIP_NAPI
1717 	netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1718 #endif
1719 	SET_ETHTOOL_OPS(dev, &ops);
1720 
1721 	if (register_netdev(dev))
1722 		goto err_out_free_ring;
1723 
1724 	pci_set_drvdata(pdev, dev);
1725 
1726 	dev_info(&dev->dev,
1727 #ifdef CONFIG_TULIP_MMIO
1728 		 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1729 #else
1730 		 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1731 #endif
1732 		 chip_name, pdev->revision,
1733 		 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1734 		 eeprom_missing ? " EEPROM not present," : "",
1735 		 dev->dev_addr, irq);
1736 
1737         if (tp->chip_id == PNIC2)
1738 		tp->link_change = pnic2_lnk_change;
1739 	else if (tp->flags & HAS_NWAY)
1740 		tp->link_change = t21142_lnk_change;
1741 	else if (tp->flags & HAS_PNICNWAY)
1742 		tp->link_change = pnic_lnk_change;
1743 
1744 	/* Reset the xcvr interface and turn on heartbeat. */
1745 	switch (chip_idx) {
1746 	case DC21140:
1747 	case DM910X:
1748 	default:
1749 		if (tp->mtable)
1750 			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1751 		break;
1752 	case DC21142:
1753 		if (tp->mii_cnt  ||  tulip_media_cap[dev->if_port] & MediaIsMII) {
1754 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1755 			iowrite32(0x0000, ioaddr + CSR13);
1756 			iowrite32(0x0000, ioaddr + CSR14);
1757 			iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1758 		} else
1759 			t21142_start_nway(dev);
1760 		break;
1761 	case PNIC2:
1762 	        /* just do a reset for sanity sake */
1763 		iowrite32(0x0000, ioaddr + CSR13);
1764 		iowrite32(0x0000, ioaddr + CSR14);
1765 		break;
1766 	case LC82C168:
1767 		if ( ! tp->mii_cnt) {
1768 			tp->nway = 1;
1769 			tp->nwayset = 0;
1770 			iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1771 			iowrite32(0x30, ioaddr + CSR12);
1772 			iowrite32(0x0001F078, ioaddr + CSR6);
1773 			iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1774 		}
1775 		break;
1776 	case MX98713:
1777 	case COMPEX9881:
1778 		iowrite32(0x00000000, ioaddr + CSR6);
1779 		iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1780 		iowrite32(0x00000001, ioaddr + CSR13);
1781 		break;
1782 	case MX98715:
1783 	case MX98725:
1784 		iowrite32(0x01a80000, ioaddr + CSR6);
1785 		iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1786 		iowrite32(0x00001000, ioaddr + CSR12);
1787 		break;
1788 	case COMET:
1789 		/* No initialization necessary. */
1790 		break;
1791 	}
1792 
1793 	/* put the chip in snooze mode until opened */
1794 	tulip_set_power_state (tp, 0, 1);
1795 
1796 	return 0;
1797 
1798 err_out_free_ring:
1799 	pci_free_consistent (pdev,
1800 			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1801 			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1802 			     tp->rx_ring, tp->rx_ring_dma);
1803 
1804 err_out_mtable:
1805 	kfree (tp->mtable);
1806 	pci_iounmap(pdev, ioaddr);
1807 
1808 err_out_free_res:
1809 	pci_release_regions (pdev);
1810 
1811 err_out_free_netdev:
1812 	free_netdev (dev);
1813 	return -ENODEV;
1814 }
1815 
1816 
1817 /* set the registers according to the given wolopts */
tulip_set_wolopts(struct pci_dev * pdev,u32 wolopts)1818 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1819 {
1820 	struct net_device *dev = pci_get_drvdata(pdev);
1821 	struct tulip_private *tp = netdev_priv(dev);
1822 	void __iomem *ioaddr = tp->base_addr;
1823 
1824 	if (tp->flags & COMET_PM) {
1825 
1826 		unsigned int tmp;
1827 
1828 		tmp = ioread32(ioaddr + CSR18);
1829 		tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1830 		tmp |= comet_csr18_pm_mode;
1831 		iowrite32(tmp, ioaddr + CSR18);
1832 
1833 		/* Set the Wake-up Control/Status Register to the given WOL options*/
1834 		tmp = ioread32(ioaddr + CSR13);
1835 		tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1836 		if (wolopts & WAKE_MAGIC)
1837 			tmp |= comet_csr13_mpre;
1838 		if (wolopts & WAKE_PHY)
1839 			tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1840 		/* Clear the event flags */
1841 		tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1842 		iowrite32(tmp, ioaddr + CSR13);
1843 	}
1844 }
1845 
1846 #ifdef CONFIG_PM
1847 
1848 
tulip_suspend(struct pci_dev * pdev,pm_message_t state)1849 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1850 {
1851 	pci_power_t pstate;
1852 	struct net_device *dev = pci_get_drvdata(pdev);
1853 	struct tulip_private *tp = netdev_priv(dev);
1854 
1855 	if (!dev)
1856 		return -EINVAL;
1857 
1858 	if (!netif_running(dev))
1859 		goto save_state;
1860 
1861 	tulip_down(dev);
1862 
1863 	netif_device_detach(dev);
1864 	free_irq(dev->irq, dev);
1865 
1866 save_state:
1867 	pci_save_state(pdev);
1868 	pci_disable_device(pdev);
1869 	pstate = pci_choose_state(pdev, state);
1870 	if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1871 		int rc;
1872 
1873 		tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1874 		rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1875 		if (rc)
1876 			pr_err("pci_enable_wake failed (%d)\n", rc);
1877 	}
1878 	pci_set_power_state(pdev, pstate);
1879 
1880 	return 0;
1881 }
1882 
1883 
tulip_resume(struct pci_dev * pdev)1884 static int tulip_resume(struct pci_dev *pdev)
1885 {
1886 	struct net_device *dev = pci_get_drvdata(pdev);
1887 	struct tulip_private *tp = netdev_priv(dev);
1888 	void __iomem *ioaddr = tp->base_addr;
1889 	int retval;
1890 	unsigned int tmp;
1891 
1892 	if (!dev)
1893 		return -EINVAL;
1894 
1895 	pci_set_power_state(pdev, PCI_D0);
1896 	pci_restore_state(pdev);
1897 
1898 	if (!netif_running(dev))
1899 		return 0;
1900 
1901 	if ((retval = pci_enable_device(pdev))) {
1902 		pr_err("pci_enable_device failed in resume\n");
1903 		return retval;
1904 	}
1905 
1906 	if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
1907 		pr_err("request_irq failed in resume\n");
1908 		return retval;
1909 	}
1910 
1911 	if (tp->flags & COMET_PM) {
1912 		pci_enable_wake(pdev, PCI_D3hot, 0);
1913 		pci_enable_wake(pdev, PCI_D3cold, 0);
1914 
1915 		/* Clear the PMES flag */
1916 		tmp = ioread32(ioaddr + CSR20);
1917 		tmp |= comet_csr20_pmes;
1918 		iowrite32(tmp, ioaddr + CSR20);
1919 
1920 		/* Disable all wake-up events */
1921 		tulip_set_wolopts(pdev, 0);
1922 	}
1923 	netif_device_attach(dev);
1924 
1925 	if (netif_running(dev))
1926 		tulip_up(dev);
1927 
1928 	return 0;
1929 }
1930 
1931 #endif /* CONFIG_PM */
1932 
1933 
tulip_remove_one(struct pci_dev * pdev)1934 static void __devexit tulip_remove_one (struct pci_dev *pdev)
1935 {
1936 	struct net_device *dev = pci_get_drvdata (pdev);
1937 	struct tulip_private *tp;
1938 
1939 	if (!dev)
1940 		return;
1941 
1942 	tp = netdev_priv(dev);
1943 	unregister_netdev(dev);
1944 	pci_free_consistent (pdev,
1945 			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1946 			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1947 			     tp->rx_ring, tp->rx_ring_dma);
1948 	kfree (tp->mtable);
1949 	pci_iounmap(pdev, tp->base_addr);
1950 	free_netdev (dev);
1951 	pci_release_regions (pdev);
1952 	pci_set_drvdata (pdev, NULL);
1953 
1954 	/* pci_power_off (pdev, -1); */
1955 }
1956 
1957 #ifdef CONFIG_NET_POLL_CONTROLLER
1958 /*
1959  * Polling 'interrupt' - used by things like netconsole to send skbs
1960  * without having to re-enable interrupts. It's not called while
1961  * the interrupt routine is executing.
1962  */
1963 
poll_tulip(struct net_device * dev)1964 static void poll_tulip (struct net_device *dev)
1965 {
1966 	/* disable_irq here is not very nice, but with the lockless
1967 	   interrupt handler we have no other choice. */
1968 	disable_irq(dev->irq);
1969 	tulip_interrupt (dev->irq, dev);
1970 	enable_irq(dev->irq);
1971 }
1972 #endif
1973 
1974 static struct pci_driver tulip_driver = {
1975 	.name		= DRV_NAME,
1976 	.id_table	= tulip_pci_tbl,
1977 	.probe		= tulip_init_one,
1978 	.remove		= __devexit_p(tulip_remove_one),
1979 #ifdef CONFIG_PM
1980 	.suspend	= tulip_suspend,
1981 	.resume		= tulip_resume,
1982 #endif /* CONFIG_PM */
1983 };
1984 
1985 
tulip_init(void)1986 static int __init tulip_init (void)
1987 {
1988 #ifdef MODULE
1989 	pr_info("%s", version);
1990 #endif
1991 
1992 	/* copy module parms into globals */
1993 	tulip_rx_copybreak = rx_copybreak;
1994 	tulip_max_interrupt_work = max_interrupt_work;
1995 
1996 	/* probe for and init boards */
1997 	return pci_register_driver(&tulip_driver);
1998 }
1999 
2000 
tulip_cleanup(void)2001 static void __exit tulip_cleanup (void)
2002 {
2003 	pci_unregister_driver (&tulip_driver);
2004 }
2005 
2006 
2007 module_init(tulip_init);
2008 module_exit(tulip_cleanup);
2009