1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2 /*
3 	Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4 
5 	Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 	Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 	Copyright 2001 Manfred Spraul				    [natsemi.c]
8 	Copyright 1999-2001 by Donald Becker.			    [natsemi.c]
9        	Written 1997-2001 by Donald Becker.			    [8139too.c]
10 	Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11 
12 	This software may be used and distributed according to the terms of
13 	the GNU General Public License (GPL), incorporated herein by reference.
14 	Drivers based on or derived from this code fall under the GPL and must
15 	retain the authorship, copyright and license notice.  This file is not
16 	a complete program and may only be used when the entire operating
17 	system is licensed under the GPL.
18 
19 	See the file COPYING in this distribution for more information.
20 
21 	Contributors:
22 
23 		Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 		PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
25 		LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
26 
27 	TODO:
28 	* Test Tx checksumming thoroughly
29 
30 	Low priority TODO:
31 	* Complete reset on PciErr
32 	* Consider Rx interrupt mitigation using TimerIntr
33 	* Investigate using skb->priority with h/w VLAN priority
34 	* Investigate using High Priority Tx Queue with skb->priority
35 	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36 	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37 	* Implement Tx software interrupt mitigation via
38 	  Tx descriptor bit
39 	* The real minimum of CP_MIN_MTU is 4 bytes.  However,
40 	  for this to be supported, one must(?) turn on packet padding.
41 	* Support external MII transceivers (patch available)
42 
43 	NOTES:
44 	* TX checksumming is considered experimental.  It is off by
45 	  default, use ethtool to turn it on.
46 
47  */
48 
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 
51 #define DRV_NAME		"8139cp"
52 #define DRV_VERSION		"1.3"
53 #define DRV_RELDATE		"Mar 22, 2004"
54 
55 
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/kernel.h>
59 #include <linux/compiler.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/init.h>
63 #include <linux/interrupt.h>
64 #include <linux/pci.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/delay.h>
67 #include <linux/ethtool.h>
68 #include <linux/gfp.h>
69 #include <linux/mii.h>
70 #include <linux/if_vlan.h>
71 #include <linux/crc32.h>
72 #include <linux/in.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <linux/udp.h>
76 #include <linux/cache.h>
77 #include <asm/io.h>
78 #include <asm/irq.h>
79 #include <asm/uaccess.h>
80 
81 /* These identify the driver base version and may not be removed. */
82 static char version[] =
83 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
84 
85 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
86 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
87 MODULE_VERSION(DRV_VERSION);
88 MODULE_LICENSE("GPL");
89 
90 static int debug = -1;
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
93 
94 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
95    The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
96 static int multicast_filter_limit = 32;
97 module_param(multicast_filter_limit, int, 0);
98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
99 
100 #define CP_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
101 				 NETIF_MSG_PROBE 	| \
102 				 NETIF_MSG_LINK)
103 #define CP_NUM_STATS		14	/* struct cp_dma_stats, plus one */
104 #define CP_STATS_SIZE		64	/* size in bytes of DMA stats block */
105 #define CP_REGS_SIZE		(0xff + 1)
106 #define CP_REGS_VER		1		/* version 1 */
107 #define CP_RX_RING_SIZE		64
108 #define CP_TX_RING_SIZE		64
109 #define CP_RING_BYTES		\
110 		((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +	\
111 		 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +	\
112 		 CP_STATS_SIZE)
113 #define NEXT_TX(N)		(((N) + 1) & (CP_TX_RING_SIZE - 1))
114 #define NEXT_RX(N)		(((N) + 1) & (CP_RX_RING_SIZE - 1))
115 #define TX_BUFFS_AVAIL(CP)					\
116 	(((CP)->tx_tail <= (CP)->tx_head) ?			\
117 	  (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :	\
118 	  (CP)->tx_tail - (CP)->tx_head - 1)
119 
120 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
121 #define CP_INTERNAL_PHY		32
122 
123 /* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
124 #define RX_FIFO_THRESH		5	/* Rx buffer level before first PCI xfer.  */
125 #define RX_DMA_BURST		4	/* Maximum PCI burst, '4' is 256 */
126 #define TX_DMA_BURST		6	/* Maximum PCI burst, '6' is 1024 */
127 #define TX_EARLY_THRESH		256	/* Early Tx threshold, in bytes */
128 
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT		(6*HZ)
131 
132 /* hardware minimum and maximum for a single frame's data payload */
133 #define CP_MIN_MTU		60	/* TODO: allow lower, but pad */
134 #define CP_MAX_MTU		4096
135 
136 enum {
137 	/* NIC register offsets */
138 	MAC0		= 0x00,	/* Ethernet hardware address. */
139 	MAR0		= 0x08,	/* Multicast filter. */
140 	StatsAddr	= 0x10,	/* 64-bit start addr of 64-byte DMA stats blk */
141 	TxRingAddr	= 0x20, /* 64-bit start addr of Tx ring */
142 	HiTxRingAddr	= 0x28, /* 64-bit start addr of high priority Tx ring */
143 	Cmd		= 0x37, /* Command register */
144 	IntrMask	= 0x3C, /* Interrupt mask */
145 	IntrStatus	= 0x3E, /* Interrupt status */
146 	TxConfig	= 0x40, /* Tx configuration */
147 	ChipVersion	= 0x43, /* 8-bit chip version, inside TxConfig */
148 	RxConfig	= 0x44, /* Rx configuration */
149 	RxMissed	= 0x4C,	/* 24 bits valid, write clears */
150 	Cfg9346		= 0x50, /* EEPROM select/control; Cfg reg [un]lock */
151 	Config1		= 0x52, /* Config1 */
152 	Config3		= 0x59, /* Config3 */
153 	Config4		= 0x5A, /* Config4 */
154 	MultiIntr	= 0x5C, /* Multiple interrupt select */
155 	BasicModeCtrl	= 0x62,	/* MII BMCR */
156 	BasicModeStatus	= 0x64, /* MII BMSR */
157 	NWayAdvert	= 0x66, /* MII ADVERTISE */
158 	NWayLPAR	= 0x68, /* MII LPA */
159 	NWayExpansion	= 0x6A, /* MII Expansion */
160 	Config5		= 0xD8,	/* Config5 */
161 	TxPoll		= 0xD9,	/* Tell chip to check Tx descriptors for work */
162 	RxMaxSize	= 0xDA, /* Max size of an Rx packet (8169 only) */
163 	CpCmd		= 0xE0, /* C+ Command register (C+ mode only) */
164 	IntrMitigate	= 0xE2,	/* rx/tx interrupt mitigation control */
165 	RxRingAddr	= 0xE4, /* 64-bit start addr of Rx ring */
166 	TxThresh	= 0xEC, /* Early Tx threshold */
167 	OldRxBufAddr	= 0x30, /* DMA address of Rx ring buffer (C mode) */
168 	OldTSD0		= 0x10, /* DMA address of first Tx desc (C mode) */
169 
170 	/* Tx and Rx status descriptors */
171 	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
172 	RingEnd		= (1 << 30), /* End of descriptor ring */
173 	FirstFrag	= (1 << 29), /* First segment of a packet */
174 	LastFrag	= (1 << 28), /* Final segment of a packet */
175 	LargeSend	= (1 << 27), /* TCP Large Send Offload (TSO) */
176 	MSSShift	= 16,	     /* MSS value position */
177 	MSSMask		= 0xfff,     /* MSS value: 11 bits */
178 	TxError		= (1 << 23), /* Tx error summary */
179 	RxError		= (1 << 20), /* Rx error summary */
180 	IPCS		= (1 << 18), /* Calculate IP checksum */
181 	UDPCS		= (1 << 17), /* Calculate UDP/IP checksum */
182 	TCPCS		= (1 << 16), /* Calculate TCP/IP checksum */
183 	TxVlanTag	= (1 << 17), /* Add VLAN tag */
184 	RxVlanTagged	= (1 << 16), /* Rx VLAN tag available */
185 	IPFail		= (1 << 15), /* IP checksum failed */
186 	UDPFail		= (1 << 14), /* UDP/IP checksum failed */
187 	TCPFail		= (1 << 13), /* TCP/IP checksum failed */
188 	NormalTxPoll	= (1 << 6),  /* One or more normal Tx packets to send */
189 	PID1		= (1 << 17), /* 2 protocol id bits:  0==non-IP, */
190 	PID0		= (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
191 	RxProtoTCP	= 1,
192 	RxProtoUDP	= 2,
193 	RxProtoIP	= 3,
194 	TxFIFOUnder	= (1 << 25), /* Tx FIFO underrun */
195 	TxOWC		= (1 << 22), /* Tx Out-of-window collision */
196 	TxLinkFail	= (1 << 21), /* Link failed during Tx of packet */
197 	TxMaxCol	= (1 << 20), /* Tx aborted due to excessive collisions */
198 	TxColCntShift	= 16,	     /* Shift, to get 4-bit Tx collision cnt */
199 	TxColCntMask	= 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
200 	RxErrFrame	= (1 << 27), /* Rx frame alignment error */
201 	RxMcast		= (1 << 26), /* Rx multicast packet rcv'd */
202 	RxErrCRC	= (1 << 18), /* Rx CRC error */
203 	RxErrRunt	= (1 << 19), /* Rx error, packet < 64 bytes */
204 	RxErrLong	= (1 << 21), /* Rx error, packet > 4096 bytes */
205 	RxErrFIFO	= (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
206 
207 	/* StatsAddr register */
208 	DumpStats	= (1 << 3),  /* Begin stats dump */
209 
210 	/* RxConfig register */
211 	RxCfgFIFOShift	= 13,	     /* Shift, to get Rx FIFO thresh value */
212 	RxCfgDMAShift	= 8,	     /* Shift, to get Rx Max DMA value */
213 	AcceptErr	= 0x20,	     /* Accept packets with CRC errors */
214 	AcceptRunt	= 0x10,	     /* Accept runt (<64 bytes) packets */
215 	AcceptBroadcast	= 0x08,	     /* Accept broadcast packets */
216 	AcceptMulticast	= 0x04,	     /* Accept multicast packets */
217 	AcceptMyPhys	= 0x02,	     /* Accept pkts with our MAC as dest */
218 	AcceptAllPhys	= 0x01,	     /* Accept all pkts w/ physical dest */
219 
220 	/* IntrMask / IntrStatus registers */
221 	PciErr		= (1 << 15), /* System error on the PCI bus */
222 	TimerIntr	= (1 << 14), /* Asserted when TCTR reaches TimerInt value */
223 	LenChg		= (1 << 13), /* Cable length change */
224 	SWInt		= (1 << 8),  /* Software-requested interrupt */
225 	TxEmpty		= (1 << 7),  /* No Tx descriptors available */
226 	RxFIFOOvr	= (1 << 6),  /* Rx FIFO Overflow */
227 	LinkChg		= (1 << 5),  /* Packet underrun, or link change */
228 	RxEmpty		= (1 << 4),  /* No Rx descriptors available */
229 	TxErr		= (1 << 3),  /* Tx error */
230 	TxOK		= (1 << 2),  /* Tx packet sent */
231 	RxErr		= (1 << 1),  /* Rx error */
232 	RxOK		= (1 << 0),  /* Rx packet received */
233 	IntrResvd	= (1 << 10), /* reserved, according to RealTek engineers,
234 					but hardware likes to raise it */
235 
236 	IntrAll		= PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
237 			  RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
238 			  RxErr | RxOK | IntrResvd,
239 
240 	/* C mode command register */
241 	CmdReset	= (1 << 4),  /* Enable to reset; self-clearing */
242 	RxOn		= (1 << 3),  /* Rx mode enable */
243 	TxOn		= (1 << 2),  /* Tx mode enable */
244 
245 	/* C+ mode command register */
246 	RxVlanOn	= (1 << 6),  /* Rx VLAN de-tagging enable */
247 	RxChkSum	= (1 << 5),  /* Rx checksum offload enable */
248 	PCIDAC		= (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
249 	PCIMulRW	= (1 << 3),  /* Enable PCI read/write multiple */
250 	CpRxOn		= (1 << 1),  /* Rx mode enable */
251 	CpTxOn		= (1 << 0),  /* Tx mode enable */
252 
253 	/* Cfg9436 EEPROM control register */
254 	Cfg9346_Lock	= 0x00,	     /* Lock ConfigX/MII register access */
255 	Cfg9346_Unlock	= 0xC0,	     /* Unlock ConfigX/MII register access */
256 
257 	/* TxConfig register */
258 	IFG		= (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
259 	TxDMAShift	= 8,	     /* DMA burst value (0-7) is shift this many bits */
260 
261 	/* Early Tx Threshold register */
262 	TxThreshMask	= 0x3f,	     /* Mask bits 5-0 */
263 	TxThreshMax	= 2048,	     /* Max early Tx threshold */
264 
265 	/* Config1 register */
266 	DriverLoaded	= (1 << 5),  /* Software marker, driver is loaded */
267 	LWACT           = (1 << 4),  /* LWAKE active mode */
268 	PMEnable	= (1 << 0),  /* Enable various PM features of chip */
269 
270 	/* Config3 register */
271 	PARMEnable	= (1 << 6),  /* Enable auto-loading of PHY parms */
272 	MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
273 	LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
274 
275 	/* Config4 register */
276 	LWPTN           = (1 << 1),  /* LWAKE Pattern */
277 	LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
278 
279 	/* Config5 register */
280 	BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
281 	MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
282 	UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
283 	LANWake         = (1 << 1),  /* Enable LANWake signal */
284 	PMEStatus	= (1 << 0),  /* PME status can be reset by PCI RST# */
285 
286 	cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
287 	cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
288 	cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
289 };
290 
291 static const unsigned int cp_rx_config =
292 	  (RX_FIFO_THRESH << RxCfgFIFOShift) |
293 	  (RX_DMA_BURST << RxCfgDMAShift);
294 
295 struct cp_desc {
296 	__le32		opts1;
297 	__le32		opts2;
298 	__le64		addr;
299 };
300 
301 struct cp_dma_stats {
302 	__le64			tx_ok;
303 	__le64			rx_ok;
304 	__le64			tx_err;
305 	__le32			rx_err;
306 	__le16			rx_fifo;
307 	__le16			frame_align;
308 	__le32			tx_ok_1col;
309 	__le32			tx_ok_mcol;
310 	__le64			rx_ok_phys;
311 	__le64			rx_ok_bcast;
312 	__le32			rx_ok_mcast;
313 	__le16			tx_abort;
314 	__le16			tx_underrun;
315 } __packed;
316 
317 struct cp_extra_stats {
318 	unsigned long		rx_frags;
319 };
320 
321 struct cp_private {
322 	void			__iomem *regs;
323 	struct net_device	*dev;
324 	spinlock_t		lock;
325 	u32			msg_enable;
326 
327 	struct napi_struct	napi;
328 
329 	struct pci_dev		*pdev;
330 	u32			rx_config;
331 	u16			cpcmd;
332 
333 	struct cp_extra_stats	cp_stats;
334 
335 	unsigned		rx_head		____cacheline_aligned;
336 	unsigned		rx_tail;
337 	struct cp_desc		*rx_ring;
338 	struct sk_buff		*rx_skb[CP_RX_RING_SIZE];
339 
340 	unsigned		tx_head		____cacheline_aligned;
341 	unsigned		tx_tail;
342 	struct cp_desc		*tx_ring;
343 	struct sk_buff		*tx_skb[CP_TX_RING_SIZE];
344 
345 	unsigned		rx_buf_sz;
346 	unsigned		wol_enabled : 1; /* Is Wake-on-LAN enabled? */
347 
348 	dma_addr_t		ring_dma;
349 
350 	struct mii_if_info	mii_if;
351 };
352 
353 #define cpr8(reg)	readb(cp->regs + (reg))
354 #define cpr16(reg)	readw(cp->regs + (reg))
355 #define cpr32(reg)	readl(cp->regs + (reg))
356 #define cpw8(reg,val)	writeb((val), cp->regs + (reg))
357 #define cpw16(reg,val)	writew((val), cp->regs + (reg))
358 #define cpw32(reg,val)	writel((val), cp->regs + (reg))
359 #define cpw8_f(reg,val) do {			\
360 	writeb((val), cp->regs + (reg));	\
361 	readb(cp->regs + (reg));		\
362 	} while (0)
363 #define cpw16_f(reg,val) do {			\
364 	writew((val), cp->regs + (reg));	\
365 	readw(cp->regs + (reg));		\
366 	} while (0)
367 #define cpw32_f(reg,val) do {			\
368 	writel((val), cp->regs + (reg));	\
369 	readl(cp->regs + (reg));		\
370 	} while (0)
371 
372 
373 static void __cp_set_rx_mode (struct net_device *dev);
374 static void cp_tx (struct cp_private *cp);
375 static void cp_clean_rings (struct cp_private *cp);
376 #ifdef CONFIG_NET_POLL_CONTROLLER
377 static void cp_poll_controller(struct net_device *dev);
378 #endif
379 static int cp_get_eeprom_len(struct net_device *dev);
380 static int cp_get_eeprom(struct net_device *dev,
381 			 struct ethtool_eeprom *eeprom, u8 *data);
382 static int cp_set_eeprom(struct net_device *dev,
383 			 struct ethtool_eeprom *eeprom, u8 *data);
384 
385 static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
386 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	PCI_DEVICE_ID_REALTEK_8139), },
387 	{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH,	PCI_DEVICE_ID_TTTECH_MC322), },
388 	{ },
389 };
390 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
391 
392 static struct {
393 	const char str[ETH_GSTRING_LEN];
394 } ethtool_stats_keys[] = {
395 	{ "tx_ok" },
396 	{ "rx_ok" },
397 	{ "tx_err" },
398 	{ "rx_err" },
399 	{ "rx_fifo" },
400 	{ "frame_align" },
401 	{ "tx_ok_1col" },
402 	{ "tx_ok_mcol" },
403 	{ "rx_ok_phys" },
404 	{ "rx_ok_bcast" },
405 	{ "rx_ok_mcast" },
406 	{ "tx_abort" },
407 	{ "tx_underrun" },
408 	{ "rx_frags" },
409 };
410 
411 
cp_set_rxbufsize(struct cp_private * cp)412 static inline void cp_set_rxbufsize (struct cp_private *cp)
413 {
414 	unsigned int mtu = cp->dev->mtu;
415 
416 	if (mtu > ETH_DATA_LEN)
417 		/* MTU + ethernet header + FCS + optional VLAN tag */
418 		cp->rx_buf_sz = mtu + ETH_HLEN + 8;
419 	else
420 		cp->rx_buf_sz = PKT_BUF_SZ;
421 }
422 
cp_rx_skb(struct cp_private * cp,struct sk_buff * skb,struct cp_desc * desc)423 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
424 			      struct cp_desc *desc)
425 {
426 	u32 opts2 = le32_to_cpu(desc->opts2);
427 
428 	skb->protocol = eth_type_trans (skb, cp->dev);
429 
430 	cp->dev->stats.rx_packets++;
431 	cp->dev->stats.rx_bytes += skb->len;
432 
433 	if (opts2 & RxVlanTagged)
434 		__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
435 
436 	napi_gro_receive(&cp->napi, skb);
437 }
438 
cp_rx_err_acct(struct cp_private * cp,unsigned rx_tail,u32 status,u32 len)439 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
440 			    u32 status, u32 len)
441 {
442 	netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
443 		  rx_tail, status, len);
444 	cp->dev->stats.rx_errors++;
445 	if (status & RxErrFrame)
446 		cp->dev->stats.rx_frame_errors++;
447 	if (status & RxErrCRC)
448 		cp->dev->stats.rx_crc_errors++;
449 	if ((status & RxErrRunt) || (status & RxErrLong))
450 		cp->dev->stats.rx_length_errors++;
451 	if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
452 		cp->dev->stats.rx_length_errors++;
453 	if (status & RxErrFIFO)
454 		cp->dev->stats.rx_fifo_errors++;
455 }
456 
cp_rx_csum_ok(u32 status)457 static inline unsigned int cp_rx_csum_ok (u32 status)
458 {
459 	unsigned int protocol = (status >> 16) & 0x3;
460 
461 	if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
462 	    ((protocol == RxProtoUDP) && !(status & UDPFail)))
463 		return 1;
464 	else
465 		return 0;
466 }
467 
cp_rx_poll(struct napi_struct * napi,int budget)468 static int cp_rx_poll(struct napi_struct *napi, int budget)
469 {
470 	struct cp_private *cp = container_of(napi, struct cp_private, napi);
471 	struct net_device *dev = cp->dev;
472 	unsigned int rx_tail = cp->rx_tail;
473 	int rx;
474 
475 rx_status_loop:
476 	rx = 0;
477 	cpw16(IntrStatus, cp_rx_intr_mask);
478 
479 	while (1) {
480 		u32 status, len;
481 		dma_addr_t mapping;
482 		struct sk_buff *skb, *new_skb;
483 		struct cp_desc *desc;
484 		const unsigned buflen = cp->rx_buf_sz;
485 
486 		skb = cp->rx_skb[rx_tail];
487 		BUG_ON(!skb);
488 
489 		desc = &cp->rx_ring[rx_tail];
490 		status = le32_to_cpu(desc->opts1);
491 		if (status & DescOwn)
492 			break;
493 
494 		len = (status & 0x1fff) - 4;
495 		mapping = le64_to_cpu(desc->addr);
496 
497 		if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
498 			/* we don't support incoming fragmented frames.
499 			 * instead, we attempt to ensure that the
500 			 * pre-allocated RX skbs are properly sized such
501 			 * that RX fragments are never encountered
502 			 */
503 			cp_rx_err_acct(cp, rx_tail, status, len);
504 			dev->stats.rx_dropped++;
505 			cp->cp_stats.rx_frags++;
506 			goto rx_next;
507 		}
508 
509 		if (status & (RxError | RxErrFIFO)) {
510 			cp_rx_err_acct(cp, rx_tail, status, len);
511 			goto rx_next;
512 		}
513 
514 		netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
515 			  rx_tail, status, len);
516 
517 		new_skb = netdev_alloc_skb_ip_align(dev, buflen);
518 		if (!new_skb) {
519 			dev->stats.rx_dropped++;
520 			goto rx_next;
521 		}
522 
523 		dma_unmap_single(&cp->pdev->dev, mapping,
524 				 buflen, PCI_DMA_FROMDEVICE);
525 
526 		/* Handle checksum offloading for incoming packets. */
527 		if (cp_rx_csum_ok(status))
528 			skb->ip_summed = CHECKSUM_UNNECESSARY;
529 		else
530 			skb_checksum_none_assert(skb);
531 
532 		skb_put(skb, len);
533 
534 		mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
535 					 PCI_DMA_FROMDEVICE);
536 		cp->rx_skb[rx_tail] = new_skb;
537 
538 		cp_rx_skb(cp, skb, desc);
539 		rx++;
540 
541 rx_next:
542 		cp->rx_ring[rx_tail].opts2 = 0;
543 		cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
544 		if (rx_tail == (CP_RX_RING_SIZE - 1))
545 			desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
546 						  cp->rx_buf_sz);
547 		else
548 			desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
549 		rx_tail = NEXT_RX(rx_tail);
550 
551 		if (rx >= budget)
552 			break;
553 	}
554 
555 	cp->rx_tail = rx_tail;
556 
557 	/* if we did not reach work limit, then we're done with
558 	 * this round of polling
559 	 */
560 	if (rx < budget) {
561 		unsigned long flags;
562 
563 		if (cpr16(IntrStatus) & cp_rx_intr_mask)
564 			goto rx_status_loop;
565 
566 		napi_gro_flush(napi);
567 		spin_lock_irqsave(&cp->lock, flags);
568 		__napi_complete(napi);
569 		cpw16_f(IntrMask, cp_intr_mask);
570 		spin_unlock_irqrestore(&cp->lock, flags);
571 	}
572 
573 	return rx;
574 }
575 
cp_interrupt(int irq,void * dev_instance)576 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
577 {
578 	struct net_device *dev = dev_instance;
579 	struct cp_private *cp;
580 	u16 status;
581 
582 	if (unlikely(dev == NULL))
583 		return IRQ_NONE;
584 	cp = netdev_priv(dev);
585 
586 	status = cpr16(IntrStatus);
587 	if (!status || (status == 0xFFFF))
588 		return IRQ_NONE;
589 
590 	netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
591 		  status, cpr8(Cmd), cpr16(CpCmd));
592 
593 	cpw16(IntrStatus, status & ~cp_rx_intr_mask);
594 
595 	spin_lock(&cp->lock);
596 
597 	/* close possible race's with dev_close */
598 	if (unlikely(!netif_running(dev))) {
599 		cpw16(IntrMask, 0);
600 		spin_unlock(&cp->lock);
601 		return IRQ_HANDLED;
602 	}
603 
604 	if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
605 		if (napi_schedule_prep(&cp->napi)) {
606 			cpw16_f(IntrMask, cp_norx_intr_mask);
607 			__napi_schedule(&cp->napi);
608 		}
609 
610 	if (status & (TxOK | TxErr | TxEmpty | SWInt))
611 		cp_tx(cp);
612 	if (status & LinkChg)
613 		mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
614 
615 	spin_unlock(&cp->lock);
616 
617 	if (status & PciErr) {
618 		u16 pci_status;
619 
620 		pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
621 		pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
622 		netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
623 			   status, pci_status);
624 
625 		/* TODO: reset hardware */
626 	}
627 
628 	return IRQ_HANDLED;
629 }
630 
631 #ifdef CONFIG_NET_POLL_CONTROLLER
632 /*
633  * Polling receive - used by netconsole and other diagnostic tools
634  * to allow network i/o with interrupts disabled.
635  */
cp_poll_controller(struct net_device * dev)636 static void cp_poll_controller(struct net_device *dev)
637 {
638 	disable_irq(dev->irq);
639 	cp_interrupt(dev->irq, dev);
640 	enable_irq(dev->irq);
641 }
642 #endif
643 
cp_tx(struct cp_private * cp)644 static void cp_tx (struct cp_private *cp)
645 {
646 	unsigned tx_head = cp->tx_head;
647 	unsigned tx_tail = cp->tx_tail;
648 
649 	while (tx_tail != tx_head) {
650 		struct cp_desc *txd = cp->tx_ring + tx_tail;
651 		struct sk_buff *skb;
652 		u32 status;
653 
654 		rmb();
655 		status = le32_to_cpu(txd->opts1);
656 		if (status & DescOwn)
657 			break;
658 
659 		skb = cp->tx_skb[tx_tail];
660 		BUG_ON(!skb);
661 
662 		dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
663 				 le32_to_cpu(txd->opts1) & 0xffff,
664 				 PCI_DMA_TODEVICE);
665 
666 		if (status & LastFrag) {
667 			if (status & (TxError | TxFIFOUnder)) {
668 				netif_dbg(cp, tx_err, cp->dev,
669 					  "tx err, status 0x%x\n", status);
670 				cp->dev->stats.tx_errors++;
671 				if (status & TxOWC)
672 					cp->dev->stats.tx_window_errors++;
673 				if (status & TxMaxCol)
674 					cp->dev->stats.tx_aborted_errors++;
675 				if (status & TxLinkFail)
676 					cp->dev->stats.tx_carrier_errors++;
677 				if (status & TxFIFOUnder)
678 					cp->dev->stats.tx_fifo_errors++;
679 			} else {
680 				cp->dev->stats.collisions +=
681 					((status >> TxColCntShift) & TxColCntMask);
682 				cp->dev->stats.tx_packets++;
683 				cp->dev->stats.tx_bytes += skb->len;
684 				netif_dbg(cp, tx_done, cp->dev,
685 					  "tx done, slot %d\n", tx_tail);
686 			}
687 			dev_kfree_skb_irq(skb);
688 		}
689 
690 		cp->tx_skb[tx_tail] = NULL;
691 
692 		tx_tail = NEXT_TX(tx_tail);
693 	}
694 
695 	cp->tx_tail = tx_tail;
696 
697 	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
698 		netif_wake_queue(cp->dev);
699 }
700 
cp_tx_vlan_tag(struct sk_buff * skb)701 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
702 {
703 	return vlan_tx_tag_present(skb) ?
704 		TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
705 }
706 
cp_start_xmit(struct sk_buff * skb,struct net_device * dev)707 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
708 					struct net_device *dev)
709 {
710 	struct cp_private *cp = netdev_priv(dev);
711 	unsigned entry;
712 	u32 eor, flags;
713 	unsigned long intr_flags;
714 	__le32 opts2;
715 	int mss = 0;
716 
717 	spin_lock_irqsave(&cp->lock, intr_flags);
718 
719 	/* This is a hard error, log it. */
720 	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
721 		netif_stop_queue(dev);
722 		spin_unlock_irqrestore(&cp->lock, intr_flags);
723 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
724 		return NETDEV_TX_BUSY;
725 	}
726 
727 	entry = cp->tx_head;
728 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
729 	mss = skb_shinfo(skb)->gso_size;
730 
731 	opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
732 
733 	if (skb_shinfo(skb)->nr_frags == 0) {
734 		struct cp_desc *txd = &cp->tx_ring[entry];
735 		u32 len;
736 		dma_addr_t mapping;
737 
738 		len = skb->len;
739 		mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
740 		txd->opts2 = opts2;
741 		txd->addr = cpu_to_le64(mapping);
742 		wmb();
743 
744 		flags = eor | len | DescOwn | FirstFrag | LastFrag;
745 
746 		if (mss)
747 			flags |= LargeSend | ((mss & MSSMask) << MSSShift);
748 		else if (skb->ip_summed == CHECKSUM_PARTIAL) {
749 			const struct iphdr *ip = ip_hdr(skb);
750 			if (ip->protocol == IPPROTO_TCP)
751 				flags |= IPCS | TCPCS;
752 			else if (ip->protocol == IPPROTO_UDP)
753 				flags |= IPCS | UDPCS;
754 			else
755 				WARN_ON(1);	/* we need a WARN() */
756 		}
757 
758 		txd->opts1 = cpu_to_le32(flags);
759 		wmb();
760 
761 		cp->tx_skb[entry] = skb;
762 		entry = NEXT_TX(entry);
763 	} else {
764 		struct cp_desc *txd;
765 		u32 first_len, first_eor;
766 		dma_addr_t first_mapping;
767 		int frag, first_entry = entry;
768 		const struct iphdr *ip = ip_hdr(skb);
769 
770 		/* We must give this initial chunk to the device last.
771 		 * Otherwise we could race with the device.
772 		 */
773 		first_eor = eor;
774 		first_len = skb_headlen(skb);
775 		first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
776 					       first_len, PCI_DMA_TODEVICE);
777 		cp->tx_skb[entry] = skb;
778 		entry = NEXT_TX(entry);
779 
780 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
781 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
782 			u32 len;
783 			u32 ctrl;
784 			dma_addr_t mapping;
785 
786 			len = skb_frag_size(this_frag);
787 			mapping = dma_map_single(&cp->pdev->dev,
788 						 skb_frag_address(this_frag),
789 						 len, PCI_DMA_TODEVICE);
790 			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
791 
792 			ctrl = eor | len | DescOwn;
793 
794 			if (mss)
795 				ctrl |= LargeSend |
796 					((mss & MSSMask) << MSSShift);
797 			else if (skb->ip_summed == CHECKSUM_PARTIAL) {
798 				if (ip->protocol == IPPROTO_TCP)
799 					ctrl |= IPCS | TCPCS;
800 				else if (ip->protocol == IPPROTO_UDP)
801 					ctrl |= IPCS | UDPCS;
802 				else
803 					BUG();
804 			}
805 
806 			if (frag == skb_shinfo(skb)->nr_frags - 1)
807 				ctrl |= LastFrag;
808 
809 			txd = &cp->tx_ring[entry];
810 			txd->opts2 = opts2;
811 			txd->addr = cpu_to_le64(mapping);
812 			wmb();
813 
814 			txd->opts1 = cpu_to_le32(ctrl);
815 			wmb();
816 
817 			cp->tx_skb[entry] = skb;
818 			entry = NEXT_TX(entry);
819 		}
820 
821 		txd = &cp->tx_ring[first_entry];
822 		txd->opts2 = opts2;
823 		txd->addr = cpu_to_le64(first_mapping);
824 		wmb();
825 
826 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
827 			if (ip->protocol == IPPROTO_TCP)
828 				txd->opts1 = cpu_to_le32(first_eor | first_len |
829 							 FirstFrag | DescOwn |
830 							 IPCS | TCPCS);
831 			else if (ip->protocol == IPPROTO_UDP)
832 				txd->opts1 = cpu_to_le32(first_eor | first_len |
833 							 FirstFrag | DescOwn |
834 							 IPCS | UDPCS);
835 			else
836 				BUG();
837 		} else
838 			txd->opts1 = cpu_to_le32(first_eor | first_len |
839 						 FirstFrag | DescOwn);
840 		wmb();
841 	}
842 	cp->tx_head = entry;
843 	netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
844 		  entry, skb->len);
845 	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
846 		netif_stop_queue(dev);
847 
848 	spin_unlock_irqrestore(&cp->lock, intr_flags);
849 
850 	cpw8(TxPoll, NormalTxPoll);
851 
852 	return NETDEV_TX_OK;
853 }
854 
855 /* Set or clear the multicast filter for this adaptor.
856    This routine is not state sensitive and need not be SMP locked. */
857 
__cp_set_rx_mode(struct net_device * dev)858 static void __cp_set_rx_mode (struct net_device *dev)
859 {
860 	struct cp_private *cp = netdev_priv(dev);
861 	u32 mc_filter[2];	/* Multicast hash filter */
862 	int rx_mode;
863 
864 	/* Note: do not reorder, GCC is clever about common statements. */
865 	if (dev->flags & IFF_PROMISC) {
866 		/* Unconditionally log net taps. */
867 		rx_mode =
868 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
869 		    AcceptAllPhys;
870 		mc_filter[1] = mc_filter[0] = 0xffffffff;
871 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
872 		   (dev->flags & IFF_ALLMULTI)) {
873 		/* Too many to filter perfectly -- accept all multicasts. */
874 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
875 		mc_filter[1] = mc_filter[0] = 0xffffffff;
876 	} else {
877 		struct netdev_hw_addr *ha;
878 		rx_mode = AcceptBroadcast | AcceptMyPhys;
879 		mc_filter[1] = mc_filter[0] = 0;
880 		netdev_for_each_mc_addr(ha, dev) {
881 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
882 
883 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
884 			rx_mode |= AcceptMulticast;
885 		}
886 	}
887 
888 	/* We can safely update without stopping the chip. */
889 	cp->rx_config = cp_rx_config | rx_mode;
890 	cpw32_f(RxConfig, cp->rx_config);
891 
892 	cpw32_f (MAR0 + 0, mc_filter[0]);
893 	cpw32_f (MAR0 + 4, mc_filter[1]);
894 }
895 
cp_set_rx_mode(struct net_device * dev)896 static void cp_set_rx_mode (struct net_device *dev)
897 {
898 	unsigned long flags;
899 	struct cp_private *cp = netdev_priv(dev);
900 
901 	spin_lock_irqsave (&cp->lock, flags);
902 	__cp_set_rx_mode(dev);
903 	spin_unlock_irqrestore (&cp->lock, flags);
904 }
905 
__cp_get_stats(struct cp_private * cp)906 static void __cp_get_stats(struct cp_private *cp)
907 {
908 	/* only lower 24 bits valid; write any value to clear */
909 	cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
910 	cpw32 (RxMissed, 0);
911 }
912 
cp_get_stats(struct net_device * dev)913 static struct net_device_stats *cp_get_stats(struct net_device *dev)
914 {
915 	struct cp_private *cp = netdev_priv(dev);
916 	unsigned long flags;
917 
918 	/* The chip only need report frame silently dropped. */
919 	spin_lock_irqsave(&cp->lock, flags);
920  	if (netif_running(dev) && netif_device_present(dev))
921  		__cp_get_stats(cp);
922 	spin_unlock_irqrestore(&cp->lock, flags);
923 
924 	return &dev->stats;
925 }
926 
cp_stop_hw(struct cp_private * cp)927 static void cp_stop_hw (struct cp_private *cp)
928 {
929 	cpw16(IntrStatus, ~(cpr16(IntrStatus)));
930 	cpw16_f(IntrMask, 0);
931 	cpw8(Cmd, 0);
932 	cpw16_f(CpCmd, 0);
933 	cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
934 
935 	cp->rx_tail = 0;
936 	cp->tx_head = cp->tx_tail = 0;
937 }
938 
cp_reset_hw(struct cp_private * cp)939 static void cp_reset_hw (struct cp_private *cp)
940 {
941 	unsigned work = 1000;
942 
943 	cpw8(Cmd, CmdReset);
944 
945 	while (work--) {
946 		if (!(cpr8(Cmd) & CmdReset))
947 			return;
948 
949 		schedule_timeout_uninterruptible(10);
950 	}
951 
952 	netdev_err(cp->dev, "hardware reset timeout\n");
953 }
954 
cp_start_hw(struct cp_private * cp)955 static inline void cp_start_hw (struct cp_private *cp)
956 {
957 	cpw16(CpCmd, cp->cpcmd);
958 	cpw8(Cmd, RxOn | TxOn);
959 }
960 
cp_init_hw(struct cp_private * cp)961 static void cp_init_hw (struct cp_private *cp)
962 {
963 	struct net_device *dev = cp->dev;
964 	dma_addr_t ring_dma;
965 
966 	cp_reset_hw(cp);
967 
968 	cpw8_f (Cfg9346, Cfg9346_Unlock);
969 
970 	/* Restore our idea of the MAC address. */
971 	cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
972 	cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
973 
974 	cp_start_hw(cp);
975 	cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
976 
977 	__cp_set_rx_mode(dev);
978 	cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
979 
980 	cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
981 	/* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
982 	cpw8(Config3, PARMEnable);
983 	cp->wol_enabled = 0;
984 
985 	cpw8(Config5, cpr8(Config5) & PMEStatus);
986 
987 	cpw32_f(HiTxRingAddr, 0);
988 	cpw32_f(HiTxRingAddr + 4, 0);
989 
990 	ring_dma = cp->ring_dma;
991 	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
992 	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
993 
994 	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
995 	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
996 	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
997 
998 	cpw16(MultiIntr, 0);
999 
1000 	cpw16_f(IntrMask, cp_intr_mask);
1001 
1002 	cpw8_f(Cfg9346, Cfg9346_Lock);
1003 }
1004 
cp_refill_rx(struct cp_private * cp)1005 static int cp_refill_rx(struct cp_private *cp)
1006 {
1007 	struct net_device *dev = cp->dev;
1008 	unsigned i;
1009 
1010 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1011 		struct sk_buff *skb;
1012 		dma_addr_t mapping;
1013 
1014 		skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1015 		if (!skb)
1016 			goto err_out;
1017 
1018 		mapping = dma_map_single(&cp->pdev->dev, skb->data,
1019 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1020 		cp->rx_skb[i] = skb;
1021 
1022 		cp->rx_ring[i].opts2 = 0;
1023 		cp->rx_ring[i].addr = cpu_to_le64(mapping);
1024 		if (i == (CP_RX_RING_SIZE - 1))
1025 			cp->rx_ring[i].opts1 =
1026 				cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1027 		else
1028 			cp->rx_ring[i].opts1 =
1029 				cpu_to_le32(DescOwn | cp->rx_buf_sz);
1030 	}
1031 
1032 	return 0;
1033 
1034 err_out:
1035 	cp_clean_rings(cp);
1036 	return -ENOMEM;
1037 }
1038 
cp_init_rings_index(struct cp_private * cp)1039 static void cp_init_rings_index (struct cp_private *cp)
1040 {
1041 	cp->rx_tail = 0;
1042 	cp->tx_head = cp->tx_tail = 0;
1043 }
1044 
cp_init_rings(struct cp_private * cp)1045 static int cp_init_rings (struct cp_private *cp)
1046 {
1047 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1048 	cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1049 
1050 	cp_init_rings_index(cp);
1051 
1052 	return cp_refill_rx (cp);
1053 }
1054 
cp_alloc_rings(struct cp_private * cp)1055 static int cp_alloc_rings (struct cp_private *cp)
1056 {
1057 	void *mem;
1058 
1059 	mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1060 				 &cp->ring_dma, GFP_KERNEL);
1061 	if (!mem)
1062 		return -ENOMEM;
1063 
1064 	cp->rx_ring = mem;
1065 	cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1066 
1067 	return cp_init_rings(cp);
1068 }
1069 
cp_clean_rings(struct cp_private * cp)1070 static void cp_clean_rings (struct cp_private *cp)
1071 {
1072 	struct cp_desc *desc;
1073 	unsigned i;
1074 
1075 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1076 		if (cp->rx_skb[i]) {
1077 			desc = cp->rx_ring + i;
1078 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1079 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1080 			dev_kfree_skb(cp->rx_skb[i]);
1081 		}
1082 	}
1083 
1084 	for (i = 0; i < CP_TX_RING_SIZE; i++) {
1085 		if (cp->tx_skb[i]) {
1086 			struct sk_buff *skb = cp->tx_skb[i];
1087 
1088 			desc = cp->tx_ring + i;
1089 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1090 					 le32_to_cpu(desc->opts1) & 0xffff,
1091 					 PCI_DMA_TODEVICE);
1092 			if (le32_to_cpu(desc->opts1) & LastFrag)
1093 				dev_kfree_skb(skb);
1094 			cp->dev->stats.tx_dropped++;
1095 		}
1096 	}
1097 
1098 	memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1099 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1100 
1101 	memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1102 	memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1103 }
1104 
cp_free_rings(struct cp_private * cp)1105 static void cp_free_rings (struct cp_private *cp)
1106 {
1107 	cp_clean_rings(cp);
1108 	dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1109 			  cp->ring_dma);
1110 	cp->rx_ring = NULL;
1111 	cp->tx_ring = NULL;
1112 }
1113 
cp_open(struct net_device * dev)1114 static int cp_open (struct net_device *dev)
1115 {
1116 	struct cp_private *cp = netdev_priv(dev);
1117 	int rc;
1118 
1119 	netif_dbg(cp, ifup, dev, "enabling interface\n");
1120 
1121 	rc = cp_alloc_rings(cp);
1122 	if (rc)
1123 		return rc;
1124 
1125 	napi_enable(&cp->napi);
1126 
1127 	cp_init_hw(cp);
1128 
1129 	rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1130 	if (rc)
1131 		goto err_out_hw;
1132 
1133 	netif_carrier_off(dev);
1134 	mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1135 	netif_start_queue(dev);
1136 
1137 	return 0;
1138 
1139 err_out_hw:
1140 	napi_disable(&cp->napi);
1141 	cp_stop_hw(cp);
1142 	cp_free_rings(cp);
1143 	return rc;
1144 }
1145 
cp_close(struct net_device * dev)1146 static int cp_close (struct net_device *dev)
1147 {
1148 	struct cp_private *cp = netdev_priv(dev);
1149 	unsigned long flags;
1150 
1151 	napi_disable(&cp->napi);
1152 
1153 	netif_dbg(cp, ifdown, dev, "disabling interface\n");
1154 
1155 	spin_lock_irqsave(&cp->lock, flags);
1156 
1157 	netif_stop_queue(dev);
1158 	netif_carrier_off(dev);
1159 
1160 	cp_stop_hw(cp);
1161 
1162 	spin_unlock_irqrestore(&cp->lock, flags);
1163 
1164 	free_irq(dev->irq, dev);
1165 
1166 	cp_free_rings(cp);
1167 	return 0;
1168 }
1169 
cp_tx_timeout(struct net_device * dev)1170 static void cp_tx_timeout(struct net_device *dev)
1171 {
1172 	struct cp_private *cp = netdev_priv(dev);
1173 	unsigned long flags;
1174 	int rc;
1175 
1176 	netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1177 		    cpr8(Cmd), cpr16(CpCmd),
1178 		    cpr16(IntrStatus), cpr16(IntrMask));
1179 
1180 	spin_lock_irqsave(&cp->lock, flags);
1181 
1182 	cp_stop_hw(cp);
1183 	cp_clean_rings(cp);
1184 	rc = cp_init_rings(cp);
1185 	cp_start_hw(cp);
1186 
1187 	netif_wake_queue(dev);
1188 
1189 	spin_unlock_irqrestore(&cp->lock, flags);
1190 }
1191 
1192 #ifdef BROKEN
cp_change_mtu(struct net_device * dev,int new_mtu)1193 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1194 {
1195 	struct cp_private *cp = netdev_priv(dev);
1196 	int rc;
1197 	unsigned long flags;
1198 
1199 	/* check for invalid MTU, according to hardware limits */
1200 	if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1201 		return -EINVAL;
1202 
1203 	/* if network interface not up, no need for complexity */
1204 	if (!netif_running(dev)) {
1205 		dev->mtu = new_mtu;
1206 		cp_set_rxbufsize(cp);	/* set new rx buf size */
1207 		return 0;
1208 	}
1209 
1210 	spin_lock_irqsave(&cp->lock, flags);
1211 
1212 	cp_stop_hw(cp);			/* stop h/w and free rings */
1213 	cp_clean_rings(cp);
1214 
1215 	dev->mtu = new_mtu;
1216 	cp_set_rxbufsize(cp);		/* set new rx buf size */
1217 
1218 	rc = cp_init_rings(cp);		/* realloc and restart h/w */
1219 	cp_start_hw(cp);
1220 
1221 	spin_unlock_irqrestore(&cp->lock, flags);
1222 
1223 	return rc;
1224 }
1225 #endif /* BROKEN */
1226 
1227 static const char mii_2_8139_map[8] = {
1228 	BasicModeCtrl,
1229 	BasicModeStatus,
1230 	0,
1231 	0,
1232 	NWayAdvert,
1233 	NWayLPAR,
1234 	NWayExpansion,
1235 	0
1236 };
1237 
mdio_read(struct net_device * dev,int phy_id,int location)1238 static int mdio_read(struct net_device *dev, int phy_id, int location)
1239 {
1240 	struct cp_private *cp = netdev_priv(dev);
1241 
1242 	return location < 8 && mii_2_8139_map[location] ?
1243 	       readw(cp->regs + mii_2_8139_map[location]) : 0;
1244 }
1245 
1246 
mdio_write(struct net_device * dev,int phy_id,int location,int value)1247 static void mdio_write(struct net_device *dev, int phy_id, int location,
1248 		       int value)
1249 {
1250 	struct cp_private *cp = netdev_priv(dev);
1251 
1252 	if (location == 0) {
1253 		cpw8(Cfg9346, Cfg9346_Unlock);
1254 		cpw16(BasicModeCtrl, value);
1255 		cpw8(Cfg9346, Cfg9346_Lock);
1256 	} else if (location < 8 && mii_2_8139_map[location])
1257 		cpw16(mii_2_8139_map[location], value);
1258 }
1259 
1260 /* Set the ethtool Wake-on-LAN settings */
netdev_set_wol(struct cp_private * cp,const struct ethtool_wolinfo * wol)1261 static int netdev_set_wol (struct cp_private *cp,
1262 			   const struct ethtool_wolinfo *wol)
1263 {
1264 	u8 options;
1265 
1266 	options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1267 	/* If WOL is being disabled, no need for complexity */
1268 	if (wol->wolopts) {
1269 		if (wol->wolopts & WAKE_PHY)	options |= LinkUp;
1270 		if (wol->wolopts & WAKE_MAGIC)	options |= MagicPacket;
1271 	}
1272 
1273 	cpw8 (Cfg9346, Cfg9346_Unlock);
1274 	cpw8 (Config3, options);
1275 	cpw8 (Cfg9346, Cfg9346_Lock);
1276 
1277 	options = 0; /* Paranoia setting */
1278 	options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1279 	/* If WOL is being disabled, no need for complexity */
1280 	if (wol->wolopts) {
1281 		if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1282 		if (wol->wolopts & WAKE_BCAST)	options |= BWF;
1283 		if (wol->wolopts & WAKE_MCAST)	options |= MWF;
1284 	}
1285 
1286 	cpw8 (Config5, options);
1287 
1288 	cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1289 
1290 	return 0;
1291 }
1292 
1293 /* Get the ethtool Wake-on-LAN settings */
netdev_get_wol(struct cp_private * cp,struct ethtool_wolinfo * wol)1294 static void netdev_get_wol (struct cp_private *cp,
1295 	             struct ethtool_wolinfo *wol)
1296 {
1297 	u8 options;
1298 
1299 	wol->wolopts   = 0; /* Start from scratch */
1300 	wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1301 		         WAKE_MCAST | WAKE_UCAST;
1302 	/* We don't need to go on if WOL is disabled */
1303 	if (!cp->wol_enabled) return;
1304 
1305 	options        = cpr8 (Config3);
1306 	if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1307 	if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1308 
1309 	options        = 0; /* Paranoia setting */
1310 	options        = cpr8 (Config5);
1311 	if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1312 	if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1313 	if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1314 }
1315 
cp_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1316 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1317 {
1318 	struct cp_private *cp = netdev_priv(dev);
1319 
1320 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1321 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1322 	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1323 }
1324 
cp_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring)1325 static void cp_get_ringparam(struct net_device *dev,
1326 				struct ethtool_ringparam *ring)
1327 {
1328 	ring->rx_max_pending = CP_RX_RING_SIZE;
1329 	ring->tx_max_pending = CP_TX_RING_SIZE;
1330 	ring->rx_pending = CP_RX_RING_SIZE;
1331 	ring->tx_pending = CP_TX_RING_SIZE;
1332 }
1333 
cp_get_regs_len(struct net_device * dev)1334 static int cp_get_regs_len(struct net_device *dev)
1335 {
1336 	return CP_REGS_SIZE;
1337 }
1338 
cp_get_sset_count(struct net_device * dev,int sset)1339 static int cp_get_sset_count (struct net_device *dev, int sset)
1340 {
1341 	switch (sset) {
1342 	case ETH_SS_STATS:
1343 		return CP_NUM_STATS;
1344 	default:
1345 		return -EOPNOTSUPP;
1346 	}
1347 }
1348 
cp_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1349 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1350 {
1351 	struct cp_private *cp = netdev_priv(dev);
1352 	int rc;
1353 	unsigned long flags;
1354 
1355 	spin_lock_irqsave(&cp->lock, flags);
1356 	rc = mii_ethtool_gset(&cp->mii_if, cmd);
1357 	spin_unlock_irqrestore(&cp->lock, flags);
1358 
1359 	return rc;
1360 }
1361 
cp_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1362 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1363 {
1364 	struct cp_private *cp = netdev_priv(dev);
1365 	int rc;
1366 	unsigned long flags;
1367 
1368 	spin_lock_irqsave(&cp->lock, flags);
1369 	rc = mii_ethtool_sset(&cp->mii_if, cmd);
1370 	spin_unlock_irqrestore(&cp->lock, flags);
1371 
1372 	return rc;
1373 }
1374 
cp_nway_reset(struct net_device * dev)1375 static int cp_nway_reset(struct net_device *dev)
1376 {
1377 	struct cp_private *cp = netdev_priv(dev);
1378 	return mii_nway_restart(&cp->mii_if);
1379 }
1380 
cp_get_msglevel(struct net_device * dev)1381 static u32 cp_get_msglevel(struct net_device *dev)
1382 {
1383 	struct cp_private *cp = netdev_priv(dev);
1384 	return cp->msg_enable;
1385 }
1386 
cp_set_msglevel(struct net_device * dev,u32 value)1387 static void cp_set_msglevel(struct net_device *dev, u32 value)
1388 {
1389 	struct cp_private *cp = netdev_priv(dev);
1390 	cp->msg_enable = value;
1391 }
1392 
cp_set_features(struct net_device * dev,netdev_features_t features)1393 static int cp_set_features(struct net_device *dev, netdev_features_t features)
1394 {
1395 	struct cp_private *cp = netdev_priv(dev);
1396 	unsigned long flags;
1397 
1398 	if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1399 		return 0;
1400 
1401 	spin_lock_irqsave(&cp->lock, flags);
1402 
1403 	if (features & NETIF_F_RXCSUM)
1404 		cp->cpcmd |= RxChkSum;
1405 	else
1406 		cp->cpcmd &= ~RxChkSum;
1407 
1408 	if (features & NETIF_F_HW_VLAN_RX)
1409 		cp->cpcmd |= RxVlanOn;
1410 	else
1411 		cp->cpcmd &= ~RxVlanOn;
1412 
1413 	cpw16_f(CpCmd, cp->cpcmd);
1414 	spin_unlock_irqrestore(&cp->lock, flags);
1415 
1416 	return 0;
1417 }
1418 
cp_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * p)1419 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1420 		        void *p)
1421 {
1422 	struct cp_private *cp = netdev_priv(dev);
1423 	unsigned long flags;
1424 
1425 	if (regs->len < CP_REGS_SIZE)
1426 		return /* -EINVAL */;
1427 
1428 	regs->version = CP_REGS_VER;
1429 
1430 	spin_lock_irqsave(&cp->lock, flags);
1431 	memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1432 	spin_unlock_irqrestore(&cp->lock, flags);
1433 }
1434 
cp_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1435 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1436 {
1437 	struct cp_private *cp = netdev_priv(dev);
1438 	unsigned long flags;
1439 
1440 	spin_lock_irqsave (&cp->lock, flags);
1441 	netdev_get_wol (cp, wol);
1442 	spin_unlock_irqrestore (&cp->lock, flags);
1443 }
1444 
cp_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1445 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1446 {
1447 	struct cp_private *cp = netdev_priv(dev);
1448 	unsigned long flags;
1449 	int rc;
1450 
1451 	spin_lock_irqsave (&cp->lock, flags);
1452 	rc = netdev_set_wol (cp, wol);
1453 	spin_unlock_irqrestore (&cp->lock, flags);
1454 
1455 	return rc;
1456 }
1457 
cp_get_strings(struct net_device * dev,u32 stringset,u8 * buf)1458 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1459 {
1460 	switch (stringset) {
1461 	case ETH_SS_STATS:
1462 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1463 		break;
1464 	default:
1465 		BUG();
1466 		break;
1467 	}
1468 }
1469 
cp_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)1470 static void cp_get_ethtool_stats (struct net_device *dev,
1471 				  struct ethtool_stats *estats, u64 *tmp_stats)
1472 {
1473 	struct cp_private *cp = netdev_priv(dev);
1474 	struct cp_dma_stats *nic_stats;
1475 	dma_addr_t dma;
1476 	int i;
1477 
1478 	nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1479 				       &dma, GFP_KERNEL);
1480 	if (!nic_stats)
1481 		return;
1482 
1483 	/* begin NIC statistics dump */
1484 	cpw32(StatsAddr + 4, (u64)dma >> 32);
1485 	cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1486 	cpr32(StatsAddr);
1487 
1488 	for (i = 0; i < 1000; i++) {
1489 		if ((cpr32(StatsAddr) & DumpStats) == 0)
1490 			break;
1491 		udelay(10);
1492 	}
1493 	cpw32(StatsAddr, 0);
1494 	cpw32(StatsAddr + 4, 0);
1495 	cpr32(StatsAddr);
1496 
1497 	i = 0;
1498 	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1499 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1500 	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1501 	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1502 	tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1503 	tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1504 	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1505 	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1506 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1507 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1508 	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1509 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1510 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1511 	tmp_stats[i++] = cp->cp_stats.rx_frags;
1512 	BUG_ON(i != CP_NUM_STATS);
1513 
1514 	dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1515 }
1516 
1517 static const struct ethtool_ops cp_ethtool_ops = {
1518 	.get_drvinfo		= cp_get_drvinfo,
1519 	.get_regs_len		= cp_get_regs_len,
1520 	.get_sset_count		= cp_get_sset_count,
1521 	.get_settings		= cp_get_settings,
1522 	.set_settings		= cp_set_settings,
1523 	.nway_reset		= cp_nway_reset,
1524 	.get_link		= ethtool_op_get_link,
1525 	.get_msglevel		= cp_get_msglevel,
1526 	.set_msglevel		= cp_set_msglevel,
1527 	.get_regs		= cp_get_regs,
1528 	.get_wol		= cp_get_wol,
1529 	.set_wol		= cp_set_wol,
1530 	.get_strings		= cp_get_strings,
1531 	.get_ethtool_stats	= cp_get_ethtool_stats,
1532 	.get_eeprom_len		= cp_get_eeprom_len,
1533 	.get_eeprom		= cp_get_eeprom,
1534 	.set_eeprom		= cp_set_eeprom,
1535 	.get_ringparam		= cp_get_ringparam,
1536 };
1537 
cp_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1538 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1539 {
1540 	struct cp_private *cp = netdev_priv(dev);
1541 	int rc;
1542 	unsigned long flags;
1543 
1544 	if (!netif_running(dev))
1545 		return -EINVAL;
1546 
1547 	spin_lock_irqsave(&cp->lock, flags);
1548 	rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1549 	spin_unlock_irqrestore(&cp->lock, flags);
1550 	return rc;
1551 }
1552 
cp_set_mac_address(struct net_device * dev,void * p)1553 static int cp_set_mac_address(struct net_device *dev, void *p)
1554 {
1555 	struct cp_private *cp = netdev_priv(dev);
1556 	struct sockaddr *addr = p;
1557 
1558 	if (!is_valid_ether_addr(addr->sa_data))
1559 		return -EADDRNOTAVAIL;
1560 
1561 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1562 
1563 	spin_lock_irq(&cp->lock);
1564 
1565 	cpw8_f(Cfg9346, Cfg9346_Unlock);
1566 	cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1567 	cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1568 	cpw8_f(Cfg9346, Cfg9346_Lock);
1569 
1570 	spin_unlock_irq(&cp->lock);
1571 
1572 	return 0;
1573 }
1574 
1575 /* Serial EEPROM section. */
1576 
1577 /*  EEPROM_Ctrl bits. */
1578 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
1579 #define EE_CS			0x08	/* EEPROM chip select. */
1580 #define EE_DATA_WRITE	0x02	/* EEPROM chip data in. */
1581 #define EE_WRITE_0		0x00
1582 #define EE_WRITE_1		0x02
1583 #define EE_DATA_READ	0x01	/* EEPROM chip data out. */
1584 #define EE_ENB			(0x80 | EE_CS)
1585 
1586 /* Delay between EEPROM clock transitions.
1587    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1588  */
1589 
1590 #define eeprom_delay()	readb(ee_addr)
1591 
1592 /* The EEPROM commands include the alway-set leading bit. */
1593 #define EE_EXTEND_CMD	(4)
1594 #define EE_WRITE_CMD	(5)
1595 #define EE_READ_CMD		(6)
1596 #define EE_ERASE_CMD	(7)
1597 
1598 #define EE_EWDS_ADDR	(0)
1599 #define EE_WRAL_ADDR	(1)
1600 #define EE_ERAL_ADDR	(2)
1601 #define EE_EWEN_ADDR	(3)
1602 
1603 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1604 
eeprom_cmd_start(void __iomem * ee_addr)1605 static void eeprom_cmd_start(void __iomem *ee_addr)
1606 {
1607 	writeb (EE_ENB & ~EE_CS, ee_addr);
1608 	writeb (EE_ENB, ee_addr);
1609 	eeprom_delay ();
1610 }
1611 
eeprom_cmd(void __iomem * ee_addr,int cmd,int cmd_len)1612 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1613 {
1614 	int i;
1615 
1616 	/* Shift the command bits out. */
1617 	for (i = cmd_len - 1; i >= 0; i--) {
1618 		int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1619 		writeb (EE_ENB | dataval, ee_addr);
1620 		eeprom_delay ();
1621 		writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1622 		eeprom_delay ();
1623 	}
1624 	writeb (EE_ENB, ee_addr);
1625 	eeprom_delay ();
1626 }
1627 
eeprom_cmd_end(void __iomem * ee_addr)1628 static void eeprom_cmd_end(void __iomem *ee_addr)
1629 {
1630 	writeb (~EE_CS, ee_addr);
1631 	eeprom_delay ();
1632 }
1633 
eeprom_extend_cmd(void __iomem * ee_addr,int extend_cmd,int addr_len)1634 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1635 			      int addr_len)
1636 {
1637 	int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1638 
1639 	eeprom_cmd_start(ee_addr);
1640 	eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1641 	eeprom_cmd_end(ee_addr);
1642 }
1643 
read_eeprom(void __iomem * ioaddr,int location,int addr_len)1644 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1645 {
1646 	int i;
1647 	u16 retval = 0;
1648 	void __iomem *ee_addr = ioaddr + Cfg9346;
1649 	int read_cmd = location | (EE_READ_CMD << addr_len);
1650 
1651 	eeprom_cmd_start(ee_addr);
1652 	eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1653 
1654 	for (i = 16; i > 0; i--) {
1655 		writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1656 		eeprom_delay ();
1657 		retval =
1658 		    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1659 				     0);
1660 		writeb (EE_ENB, ee_addr);
1661 		eeprom_delay ();
1662 	}
1663 
1664 	eeprom_cmd_end(ee_addr);
1665 
1666 	return retval;
1667 }
1668 
write_eeprom(void __iomem * ioaddr,int location,u16 val,int addr_len)1669 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1670 			 int addr_len)
1671 {
1672 	int i;
1673 	void __iomem *ee_addr = ioaddr + Cfg9346;
1674 	int write_cmd = location | (EE_WRITE_CMD << addr_len);
1675 
1676 	eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1677 
1678 	eeprom_cmd_start(ee_addr);
1679 	eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1680 	eeprom_cmd(ee_addr, val, 16);
1681 	eeprom_cmd_end(ee_addr);
1682 
1683 	eeprom_cmd_start(ee_addr);
1684 	for (i = 0; i < 20000; i++)
1685 		if (readb(ee_addr) & EE_DATA_READ)
1686 			break;
1687 	eeprom_cmd_end(ee_addr);
1688 
1689 	eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1690 }
1691 
cp_get_eeprom_len(struct net_device * dev)1692 static int cp_get_eeprom_len(struct net_device *dev)
1693 {
1694 	struct cp_private *cp = netdev_priv(dev);
1695 	int size;
1696 
1697 	spin_lock_irq(&cp->lock);
1698 	size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1699 	spin_unlock_irq(&cp->lock);
1700 
1701 	return size;
1702 }
1703 
cp_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1704 static int cp_get_eeprom(struct net_device *dev,
1705 			 struct ethtool_eeprom *eeprom, u8 *data)
1706 {
1707 	struct cp_private *cp = netdev_priv(dev);
1708 	unsigned int addr_len;
1709 	u16 val;
1710 	u32 offset = eeprom->offset >> 1;
1711 	u32 len = eeprom->len;
1712 	u32 i = 0;
1713 
1714 	eeprom->magic = CP_EEPROM_MAGIC;
1715 
1716 	spin_lock_irq(&cp->lock);
1717 
1718 	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1719 
1720 	if (eeprom->offset & 1) {
1721 		val = read_eeprom(cp->regs, offset, addr_len);
1722 		data[i++] = (u8)(val >> 8);
1723 		offset++;
1724 	}
1725 
1726 	while (i < len - 1) {
1727 		val = read_eeprom(cp->regs, offset, addr_len);
1728 		data[i++] = (u8)val;
1729 		data[i++] = (u8)(val >> 8);
1730 		offset++;
1731 	}
1732 
1733 	if (i < len) {
1734 		val = read_eeprom(cp->regs, offset, addr_len);
1735 		data[i] = (u8)val;
1736 	}
1737 
1738 	spin_unlock_irq(&cp->lock);
1739 	return 0;
1740 }
1741 
cp_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1742 static int cp_set_eeprom(struct net_device *dev,
1743 			 struct ethtool_eeprom *eeprom, u8 *data)
1744 {
1745 	struct cp_private *cp = netdev_priv(dev);
1746 	unsigned int addr_len;
1747 	u16 val;
1748 	u32 offset = eeprom->offset >> 1;
1749 	u32 len = eeprom->len;
1750 	u32 i = 0;
1751 
1752 	if (eeprom->magic != CP_EEPROM_MAGIC)
1753 		return -EINVAL;
1754 
1755 	spin_lock_irq(&cp->lock);
1756 
1757 	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1758 
1759 	if (eeprom->offset & 1) {
1760 		val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1761 		val |= (u16)data[i++] << 8;
1762 		write_eeprom(cp->regs, offset, val, addr_len);
1763 		offset++;
1764 	}
1765 
1766 	while (i < len - 1) {
1767 		val = (u16)data[i++];
1768 		val |= (u16)data[i++] << 8;
1769 		write_eeprom(cp->regs, offset, val, addr_len);
1770 		offset++;
1771 	}
1772 
1773 	if (i < len) {
1774 		val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1775 		val |= (u16)data[i];
1776 		write_eeprom(cp->regs, offset, val, addr_len);
1777 	}
1778 
1779 	spin_unlock_irq(&cp->lock);
1780 	return 0;
1781 }
1782 
1783 /* Put the board into D3cold state and wait for WakeUp signal */
cp_set_d3_state(struct cp_private * cp)1784 static void cp_set_d3_state (struct cp_private *cp)
1785 {
1786 	pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1787 	pci_set_power_state (cp->pdev, PCI_D3hot);
1788 }
1789 
1790 static const struct net_device_ops cp_netdev_ops = {
1791 	.ndo_open		= cp_open,
1792 	.ndo_stop		= cp_close,
1793 	.ndo_validate_addr	= eth_validate_addr,
1794 	.ndo_set_mac_address 	= cp_set_mac_address,
1795 	.ndo_set_rx_mode	= cp_set_rx_mode,
1796 	.ndo_get_stats		= cp_get_stats,
1797 	.ndo_do_ioctl		= cp_ioctl,
1798 	.ndo_start_xmit		= cp_start_xmit,
1799 	.ndo_tx_timeout		= cp_tx_timeout,
1800 	.ndo_set_features	= cp_set_features,
1801 #ifdef BROKEN
1802 	.ndo_change_mtu		= cp_change_mtu,
1803 #endif
1804 
1805 #ifdef CONFIG_NET_POLL_CONTROLLER
1806 	.ndo_poll_controller	= cp_poll_controller,
1807 #endif
1808 };
1809 
cp_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1810 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1811 {
1812 	struct net_device *dev;
1813 	struct cp_private *cp;
1814 	int rc;
1815 	void __iomem *regs;
1816 	resource_size_t pciaddr;
1817 	unsigned int addr_len, i, pci_using_dac;
1818 
1819 #ifndef MODULE
1820 	static int version_printed;
1821 	if (version_printed++ == 0)
1822 		pr_info("%s", version);
1823 #endif
1824 
1825 	if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1826 	    pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1827 		dev_info(&pdev->dev,
1828 			 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1829 			 pdev->vendor, pdev->device, pdev->revision);
1830 		return -ENODEV;
1831 	}
1832 
1833 	dev = alloc_etherdev(sizeof(struct cp_private));
1834 	if (!dev)
1835 		return -ENOMEM;
1836 	SET_NETDEV_DEV(dev, &pdev->dev);
1837 
1838 	cp = netdev_priv(dev);
1839 	cp->pdev = pdev;
1840 	cp->dev = dev;
1841 	cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1842 	spin_lock_init (&cp->lock);
1843 	cp->mii_if.dev = dev;
1844 	cp->mii_if.mdio_read = mdio_read;
1845 	cp->mii_if.mdio_write = mdio_write;
1846 	cp->mii_if.phy_id = CP_INTERNAL_PHY;
1847 	cp->mii_if.phy_id_mask = 0x1f;
1848 	cp->mii_if.reg_num_mask = 0x1f;
1849 	cp_set_rxbufsize(cp);
1850 
1851 	rc = pci_enable_device(pdev);
1852 	if (rc)
1853 		goto err_out_free;
1854 
1855 	rc = pci_set_mwi(pdev);
1856 	if (rc)
1857 		goto err_out_disable;
1858 
1859 	rc = pci_request_regions(pdev, DRV_NAME);
1860 	if (rc)
1861 		goto err_out_mwi;
1862 
1863 	pciaddr = pci_resource_start(pdev, 1);
1864 	if (!pciaddr) {
1865 		rc = -EIO;
1866 		dev_err(&pdev->dev, "no MMIO resource\n");
1867 		goto err_out_res;
1868 	}
1869 	if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1870 		rc = -EIO;
1871 		dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1872 		       (unsigned long long)pci_resource_len(pdev, 1));
1873 		goto err_out_res;
1874 	}
1875 
1876 	/* Configure DMA attributes. */
1877 	if ((sizeof(dma_addr_t) > 4) &&
1878 	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1879 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1880 		pci_using_dac = 1;
1881 	} else {
1882 		pci_using_dac = 0;
1883 
1884 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1885 		if (rc) {
1886 			dev_err(&pdev->dev,
1887 				"No usable DMA configuration, aborting\n");
1888 			goto err_out_res;
1889 		}
1890 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1891 		if (rc) {
1892 			dev_err(&pdev->dev,
1893 				"No usable consistent DMA configuration, aborting\n");
1894 			goto err_out_res;
1895 		}
1896 	}
1897 
1898 	cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1899 		    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1900 
1901 	dev->features |= NETIF_F_RXCSUM;
1902 	dev->hw_features |= NETIF_F_RXCSUM;
1903 
1904 	regs = ioremap(pciaddr, CP_REGS_SIZE);
1905 	if (!regs) {
1906 		rc = -EIO;
1907 		dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1908 			(unsigned long long)pci_resource_len(pdev, 1),
1909 		       (unsigned long long)pciaddr);
1910 		goto err_out_res;
1911 	}
1912 	dev->base_addr = (unsigned long) regs;
1913 	cp->regs = regs;
1914 
1915 	cp_stop_hw(cp);
1916 
1917 	/* read MAC address from EEPROM */
1918 	addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1919 	for (i = 0; i < 3; i++)
1920 		((__le16 *) (dev->dev_addr))[i] =
1921 		    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1922 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1923 
1924 	dev->netdev_ops = &cp_netdev_ops;
1925 	netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1926 	dev->ethtool_ops = &cp_ethtool_ops;
1927 	dev->watchdog_timeo = TX_TIMEOUT;
1928 
1929 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1930 
1931 	if (pci_using_dac)
1932 		dev->features |= NETIF_F_HIGHDMA;
1933 
1934 	/* disabled by default until verified */
1935 	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1936 		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1937 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1938 		NETIF_F_HIGHDMA;
1939 
1940 	dev->irq = pdev->irq;
1941 
1942 	rc = register_netdev(dev);
1943 	if (rc)
1944 		goto err_out_iomap;
1945 
1946 	netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
1947 		    dev->base_addr, dev->dev_addr, dev->irq);
1948 
1949 	pci_set_drvdata(pdev, dev);
1950 
1951 	/* enable busmastering and memory-write-invalidate */
1952 	pci_set_master(pdev);
1953 
1954 	if (cp->wol_enabled)
1955 		cp_set_d3_state (cp);
1956 
1957 	return 0;
1958 
1959 err_out_iomap:
1960 	iounmap(regs);
1961 err_out_res:
1962 	pci_release_regions(pdev);
1963 err_out_mwi:
1964 	pci_clear_mwi(pdev);
1965 err_out_disable:
1966 	pci_disable_device(pdev);
1967 err_out_free:
1968 	free_netdev(dev);
1969 	return rc;
1970 }
1971 
cp_remove_one(struct pci_dev * pdev)1972 static void cp_remove_one (struct pci_dev *pdev)
1973 {
1974 	struct net_device *dev = pci_get_drvdata(pdev);
1975 	struct cp_private *cp = netdev_priv(dev);
1976 
1977 	unregister_netdev(dev);
1978 	iounmap(cp->regs);
1979 	if (cp->wol_enabled)
1980 		pci_set_power_state (pdev, PCI_D0);
1981 	pci_release_regions(pdev);
1982 	pci_clear_mwi(pdev);
1983 	pci_disable_device(pdev);
1984 	pci_set_drvdata(pdev, NULL);
1985 	free_netdev(dev);
1986 }
1987 
1988 #ifdef CONFIG_PM
cp_suspend(struct pci_dev * pdev,pm_message_t state)1989 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1990 {
1991 	struct net_device *dev = pci_get_drvdata(pdev);
1992 	struct cp_private *cp = netdev_priv(dev);
1993 	unsigned long flags;
1994 
1995 	if (!netif_running(dev))
1996 		return 0;
1997 
1998 	netif_device_detach (dev);
1999 	netif_stop_queue (dev);
2000 
2001 	spin_lock_irqsave (&cp->lock, flags);
2002 
2003 	/* Disable Rx and Tx */
2004 	cpw16 (IntrMask, 0);
2005 	cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2006 
2007 	spin_unlock_irqrestore (&cp->lock, flags);
2008 
2009 	pci_save_state(pdev);
2010 	pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2011 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2012 
2013 	return 0;
2014 }
2015 
cp_resume(struct pci_dev * pdev)2016 static int cp_resume (struct pci_dev *pdev)
2017 {
2018 	struct net_device *dev = pci_get_drvdata (pdev);
2019 	struct cp_private *cp = netdev_priv(dev);
2020 	unsigned long flags;
2021 
2022 	if (!netif_running(dev))
2023 		return 0;
2024 
2025 	netif_device_attach (dev);
2026 
2027 	pci_set_power_state(pdev, PCI_D0);
2028 	pci_restore_state(pdev);
2029 	pci_enable_wake(pdev, PCI_D0, 0);
2030 
2031 	/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2032 	cp_init_rings_index (cp);
2033 	cp_init_hw (cp);
2034 	netif_start_queue (dev);
2035 
2036 	spin_lock_irqsave (&cp->lock, flags);
2037 
2038 	mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2039 
2040 	spin_unlock_irqrestore (&cp->lock, flags);
2041 
2042 	return 0;
2043 }
2044 #endif /* CONFIG_PM */
2045 
2046 static struct pci_driver cp_driver = {
2047 	.name         = DRV_NAME,
2048 	.id_table     = cp_pci_tbl,
2049 	.probe        =	cp_init_one,
2050 	.remove       = cp_remove_one,
2051 #ifdef CONFIG_PM
2052 	.resume       = cp_resume,
2053 	.suspend      = cp_suspend,
2054 #endif
2055 };
2056 
cp_init(void)2057 static int __init cp_init (void)
2058 {
2059 #ifdef MODULE
2060 	pr_info("%s", version);
2061 #endif
2062 	return pci_register_driver(&cp_driver);
2063 }
2064 
cp_exit(void)2065 static void __exit cp_exit (void)
2066 {
2067 	pci_unregister_driver (&cp_driver);
2068 }
2069 
2070 module_init(cp_init);
2071 module_exit(cp_exit);
2072