1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
10  *
11  *------------------------------------------------------------------------------
12  *
13  * SOFTWARE LICENSE
14  *
15  * This software is provided subject to the following terms and conditions,
16  * which you should read carefully before using the software.  Using this
17  * software indicates your acceptance of these terms and conditions.  If you do
18  * not agree with these terms and conditions, do not use the software.
19  *
20  * Copyright © 2005 Agere Systems Inc.
21  * All rights reserved.
22  *
23  * Redistribution and use in source or binary forms, with or without
24  * modifications, are permitted provided that the following conditions are met:
25  *
26  * . Redistributions of source code must retain the above copyright notice, this
27  *    list of conditions and the following Disclaimer as comments in the code as
28  *    well as in the documentation and/or other materials provided with the
29  *    distribution.
30  *
31  * . Redistributions in binary form must reproduce the above copyright notice,
32  *    this list of conditions and the following Disclaimer in the documentation
33  *    and/or other materials provided with the distribution.
34  *
35  * . Neither the name of Agere Systems Inc. nor the names of the contributors
36  *    may be used to endorse or promote products derived from this software
37  *    without specific prior written permission.
38  *
39  * Disclaimer
40  *
41  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
42  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
44  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
45  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
46  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
47  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
48  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
49  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
52  * DAMAGE.
53  *
54  */
55 
56 #include <linux/pci.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
59 #include <linux/types.h>
60 #include <linux/kernel.h>
61 
62 #include <linux/sched.h>
63 #include <linux/ptrace.h>
64 #include <linux/slab.h>
65 #include <linux/ctype.h>
66 #include <linux/string.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/in.h>
70 #include <linux/delay.h>
71 #include <linux/bitops.h>
72 #include <linux/io.h>
73 #include <asm/system.h>
74 
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/skbuff.h>
78 #include <linux/if_arp.h>
79 #include <linux/ioport.h>
80 #include <linux/crc32.h>
81 #include <linux/random.h>
82 #include <linux/phy.h>
83 
84 #include "et131x.h"
85 
86 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
87 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
88 MODULE_LICENSE("Dual BSD/GPL");
89 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
90 		   "for the ET1310 by Agere Systems");
91 
92 /* EEPROM defines */
93 #define MAX_NUM_REGISTER_POLLS          1000
94 #define MAX_NUM_WRITE_RETRIES           2
95 
96 /* MAC defines */
97 #define COUNTER_WRAP_16_BIT 0x10000
98 #define COUNTER_WRAP_12_BIT 0x1000
99 
100 /* PCI defines */
101 #define INTERNAL_MEM_SIZE       0x400	/* 1024 of internal memory */
102 #define INTERNAL_MEM_RX_OFFSET  0x1FF	/* 50%   Tx, 50%   Rx */
103 
104 /* ISR defines */
105 /*
106  * For interrupts, normal running is:
107  *       rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
108  *       watchdog_interrupt & txdma_xfer_done
109  *
110  * In both cases, when flow control is enabled for either Tx or bi-direction,
111  * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
112  * buffer rings are running low.
113  */
114 #define INT_MASK_DISABLE            0xffffffff
115 
116 /* NOTE: Masking out MAC_STAT Interrupt for now...
117  * #define INT_MASK_ENABLE             0xfff6bf17
118  * #define INT_MASK_ENABLE_NO_FLOW     0xfff6bfd7
119  */
120 #define INT_MASK_ENABLE             0xfffebf17
121 #define INT_MASK_ENABLE_NO_FLOW     0xfffebfd7
122 
123 /* General defines */
124 /* Packet and header sizes */
125 #define NIC_MIN_PACKET_SIZE	60
126 
127 /* Multicast list size */
128 #define NIC_MAX_MCAST_LIST	128
129 
130 /* Supported Filters */
131 #define ET131X_PACKET_TYPE_DIRECTED		0x0001
132 #define ET131X_PACKET_TYPE_MULTICAST		0x0002
133 #define ET131X_PACKET_TYPE_BROADCAST		0x0004
134 #define ET131X_PACKET_TYPE_PROMISCUOUS		0x0008
135 #define ET131X_PACKET_TYPE_ALL_MULTICAST	0x0010
136 
137 /* Tx Timeout */
138 #define ET131X_TX_TIMEOUT	(1 * HZ)
139 #define NIC_SEND_HANG_THRESHOLD	0
140 
141 /* MP_TCB flags */
142 #define fMP_DEST_MULTI			0x00000001
143 #define fMP_DEST_BROAD			0x00000002
144 
145 /* MP_ADAPTER flags */
146 #define fMP_ADAPTER_RECV_LOOKASIDE	0x00000004
147 #define fMP_ADAPTER_INTERRUPT_IN_USE	0x00000008
148 
149 /* MP_SHARED flags */
150 #define fMP_ADAPTER_LOWER_POWER		0x00200000
151 
152 #define fMP_ADAPTER_NON_RECOVER_ERROR	0x00800000
153 #define fMP_ADAPTER_HARDWARE_ERROR	0x04000000
154 
155 #define fMP_ADAPTER_FAIL_SEND_MASK	0x3ff00000
156 
157 /* Some offsets in PCI config space that are actually used. */
158 #define ET1310_PCI_MAC_ADDRESS		0xA4
159 #define ET1310_PCI_EEPROM_STATUS	0xB2
160 #define ET1310_PCI_ACK_NACK		0xC0
161 #define ET1310_PCI_REPLAY		0xC2
162 #define ET1310_PCI_L0L1LATENCY		0xCF
163 
164 /* PCI Product IDs */
165 #define ET131X_PCI_DEVICE_ID_GIG	0xED00	/* ET1310 1000 Base-T 8 */
166 #define ET131X_PCI_DEVICE_ID_FAST	0xED01	/* ET1310 100  Base-T */
167 
168 /* Define order of magnitude converter */
169 #define NANO_IN_A_MICRO	1000
170 
171 #define PARM_RX_NUM_BUFS_DEF    4
172 #define PARM_RX_TIME_INT_DEF    10
173 #define PARM_RX_MEM_END_DEF     0x2bc
174 #define PARM_TX_TIME_INT_DEF    40
175 #define PARM_TX_NUM_BUFS_DEF    4
176 #define PARM_DMA_CACHE_DEF      0
177 
178 /* RX defines */
179 #define USE_FBR0 1
180 #define FBR_CHUNKS 32
181 #define MAX_DESC_PER_RING_RX         1024
182 
183 /* number of RFDs - default and min */
184 #ifdef USE_FBR0
185 #define RFD_LOW_WATER_MARK	40
186 #define NIC_DEFAULT_NUM_RFD	1024
187 #define NUM_FBRS		2
188 #else
189 #define RFD_LOW_WATER_MARK	20
190 #define NIC_DEFAULT_NUM_RFD	256
191 #define NUM_FBRS		1
192 #endif
193 
194 #define NIC_MIN_NUM_RFD		64
195 #define NUM_PACKETS_HANDLED	256
196 
197 #define ALCATEL_MULTICAST_PKT	0x01000000
198 #define ALCATEL_BROADCAST_PKT	0x02000000
199 
200 /* typedefs for Free Buffer Descriptors */
201 struct fbr_desc {
202 	u32 addr_lo;
203 	u32 addr_hi;
204 	u32 word2;		/* Bits 10-31 reserved, 0-9 descriptor */
205 };
206 
207 /* Packet Status Ring Descriptors
208  *
209  * Word 0:
210  *
211  * top 16 bits are from the Alcatel Status Word as enumerated in
212  * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
213  *
214  * 0: hp			hash pass
215  * 1: ipa			IP checksum assist
216  * 2: ipp			IP checksum pass
217  * 3: tcpa			TCP checksum assist
218  * 4: tcpp			TCP checksum pass
219  * 5: wol			WOL Event
220  * 6: rxmac_error		RXMAC Error Indicator
221  * 7: drop			Drop packet
222  * 8: ft			Frame Truncated
223  * 9: jp			Jumbo Packet
224  * 10: vp			VLAN Packet
225  * 11-15: unused
226  * 16: asw_prev_pkt_dropped	e.g. IFG too small on previous
227  * 17: asw_RX_DV_event		short receive event detected
228  * 18: asw_false_carrier_event	bad carrier since last good packet
229  * 19: asw_code_err		one or more nibbles signalled as errors
230  * 20: asw_CRC_err		CRC error
231  * 21: asw_len_chk_err		frame length field incorrect
232  * 22: asw_too_long		frame length > 1518 bytes
233  * 23: asw_OK			valid CRC + no code error
234  * 24: asw_multicast		has a multicast address
235  * 25: asw_broadcast		has a broadcast address
236  * 26: asw_dribble_nibble	spurious bits after EOP
237  * 27: asw_control_frame	is a control frame
238  * 28: asw_pause_frame		is a pause frame
239  * 29: asw_unsupported_op	unsupported OP code
240  * 30: asw_VLAN_tag		VLAN tag detected
241  * 31: asw_long_evt		Rx long event
242  *
243  * Word 1:
244  * 0-15: length			length in bytes
245  * 16-25: bi			Buffer Index
246  * 26-27: ri			Ring Index
247  * 28-31: reserved
248  */
249 
250 struct pkt_stat_desc {
251 	u32 word0;
252 	u32 word1;
253 };
254 
255 /* Typedefs for the RX DMA status word */
256 
257 /*
258  * rx status word 0 holds part of the status bits of the Rx DMA engine
259  * that get copied out to memory by the ET-1310.  Word 0 is a 32 bit word
260  * which contains the Free Buffer ring 0 and 1 available offset.
261  *
262  * bit 0-9 FBR1 offset
263  * bit 10 Wrap flag for FBR1
264  * bit 16-25 FBR0 offset
265  * bit 26 Wrap flag for FBR0
266  */
267 
268 /*
269  * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
270  * that get copied out to memory by the ET-1310.  Word 3 is a 32 bit word
271  * which contains the Packet Status Ring available offset.
272  *
273  * bit 0-15 reserved
274  * bit 16-27 PSRoffset
275  * bit 28 PSRwrap
276  * bit 29-31 unused
277  */
278 
279 /*
280  * struct rx_status_block is a structure representing the status of the Rx
281  * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
282  */
283 struct rx_status_block {
284 	u32 word0;
285 	u32 word1;
286 };
287 
288 /*
289  * Structure for look-up table holding free buffer ring pointers, addresses
290  * and state.
291  */
292 struct fbr_lookup {
293 	void		*virt[MAX_DESC_PER_RING_RX];
294 	void		*buffer1[MAX_DESC_PER_RING_RX];
295 	void		*buffer2[MAX_DESC_PER_RING_RX];
296 	u32		 bus_high[MAX_DESC_PER_RING_RX];
297 	u32		 bus_low[MAX_DESC_PER_RING_RX];
298 	void		*ring_virtaddr;
299 	dma_addr_t	 ring_physaddr;
300 	void		*mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
301 	dma_addr_t	 mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
302 	u64		 real_physaddr;
303 	u64		 offset;
304 	u32		 local_full;
305 	u32		 num_entries;
306 	u32		 buffsize;
307 };
308 
309 /*
310  * struct rx_ring is the sructure representing the adaptor's local
311  * reference(s) to the rings
312  *
313  ******************************************************************************
314  * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1
315  *			and index 1 to refer to FRB0
316  ******************************************************************************
317  */
318 struct rx_ring {
319 	struct fbr_lookup *fbr[NUM_FBRS];
320 	void *ps_ring_virtaddr;
321 	dma_addr_t ps_ring_physaddr;
322 	u32 local_psr_full;
323 	u32 psr_num_entries;
324 
325 	struct rx_status_block *rx_status_block;
326 	dma_addr_t rx_status_bus;
327 
328 	/* RECV */
329 	struct list_head recv_list;
330 	u32 num_ready_recv;
331 
332 	u32 num_rfd;
333 
334 	bool unfinished_receives;
335 
336 	/* lookaside lists */
337 	struct kmem_cache *recv_lookaside;
338 };
339 
340 /* TX defines */
341 /*
342  * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
343  *
344  * 0-15: length of packet
345  * 16-27: VLAN tag
346  * 28: VLAN CFI
347  * 29-31: VLAN priority
348  *
349  * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
350  *
351  * 0: last packet in the sequence
352  * 1: first packet in the sequence
353  * 2: interrupt the processor when this pkt sent
354  * 3: Control word - no packet data
355  * 4: Issue half-duplex backpressure : XON/XOFF
356  * 5: send pause frame
357  * 6: Tx frame has error
358  * 7: append CRC
359  * 8: MAC override
360  * 9: pad packet
361  * 10: Packet is a Huge packet
362  * 11: append VLAN tag
363  * 12: IP checksum assist
364  * 13: TCP checksum assist
365  * 14: UDP checksum assist
366  */
367 
368 /* struct tx_desc represents each descriptor on the ring */
369 struct tx_desc {
370 	u32 addr_hi;
371 	u32 addr_lo;
372 	u32 len_vlan;	/* control words how to xmit the */
373 	u32 flags;	/* data (detailed above) */
374 };
375 
376 /*
377  * The status of the Tx DMA engine it sits in free memory, and is pointed to
378  * by 0x101c / 0x1020. This is a DMA10 type
379  */
380 
381 /* TCB (Transmit Control Block: Host Side) */
382 struct tcb {
383 	struct tcb *next;	/* Next entry in ring */
384 	u32 flags;		/* Our flags for the packet */
385 	u32 count;		/* Used to spot stuck/lost packets */
386 	u32 stale;		/* Used to spot stuck/lost packets */
387 	struct sk_buff *skb;	/* Network skb we are tied to */
388 	u32 index;		/* Ring indexes */
389 	u32 index_start;
390 };
391 
392 /* Structure representing our local reference(s) to the ring */
393 struct tx_ring {
394 	/* TCB (Transmit Control Block) memory and lists */
395 	struct tcb *tcb_ring;
396 
397 	/* List of TCBs that are ready to be used */
398 	struct tcb *tcb_qhead;
399 	struct tcb *tcb_qtail;
400 
401 	/* list of TCBs that are currently being sent.  NOTE that access to all
402 	 * three of these (including used) are controlled via the
403 	 * TCBSendQLock.  This lock should be secured prior to incementing /
404 	 * decrementing used, or any queue manipulation on send_head /
405 	 * tail
406 	 */
407 	struct tcb *send_head;
408 	struct tcb *send_tail;
409 	int used;
410 
411 	/* The actual descriptor ring */
412 	struct tx_desc *tx_desc_ring;
413 	dma_addr_t tx_desc_ring_pa;
414 
415 	/* send_idx indicates where we last wrote to in the descriptor ring. */
416 	u32 send_idx;
417 
418 	/* The location of the write-back status block */
419 	u32 *tx_status;
420 	dma_addr_t tx_status_pa;
421 
422 	/* Packets since the last IRQ: used for interrupt coalescing */
423 	int since_irq;
424 };
425 
426 /*
427  * Do not change these values: if changed, then change also in respective
428  * TXdma and Rxdma engines
429  */
430 #define NUM_DESC_PER_RING_TX         512    /* TX Do not change these values */
431 #define NUM_TCB                      64
432 
433 /*
434  * These values are all superseded by registry entries to facilitate tuning.
435  * Once the desired performance has been achieved, the optimal registry values
436  * should be re-populated to these #defines:
437  */
438 #define TX_ERROR_PERIOD             1000
439 
440 #define LO_MARK_PERCENT_FOR_PSR     15
441 #define LO_MARK_PERCENT_FOR_RX      15
442 
443 /* RFD (Receive Frame Descriptor) */
444 struct rfd {
445 	struct list_head list_node;
446 	struct sk_buff *skb;
447 	u32 len;	/* total size of receive frame */
448 	u16 bufferindex;
449 	u8 ringindex;
450 };
451 
452 /* Flow Control */
453 #define FLOW_BOTH	0
454 #define FLOW_TXONLY	1
455 #define FLOW_RXONLY	2
456 #define FLOW_NONE	3
457 
458 /* Struct to define some device statistics */
459 struct ce_stats {
460 	/* MIB II variables
461 	 *
462 	 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
463 	 * MUST have 32, then we'll need another way to perform atomic
464 	 * operations
465 	 */
466 	u32		unicast_pkts_rcvd;
467 	atomic_t	unicast_pkts_xmtd;
468 	u32		multicast_pkts_rcvd;
469 	atomic_t	multicast_pkts_xmtd;
470 	u32		broadcast_pkts_rcvd;
471 	atomic_t	broadcast_pkts_xmtd;
472 	u32		rcvd_pkts_dropped;
473 
474 	/* Tx Statistics. */
475 	u32		tx_underflows;
476 
477 	u32		tx_collisions;
478 	u32		tx_excessive_collisions;
479 	u32		tx_first_collisions;
480 	u32		tx_late_collisions;
481 	u32		tx_max_pkt_errs;
482 	u32		tx_deferred;
483 
484 	/* Rx Statistics. */
485 	u32		rx_overflows;
486 
487 	u32		rx_length_errs;
488 	u32		rx_align_errs;
489 	u32		rx_crc_errs;
490 	u32		rx_code_violations;
491 	u32		rx_other_errs;
492 
493 	u32		synchronous_iterations;
494 	u32		interrupt_status;
495 };
496 
497 /* The private adapter structure */
498 struct et131x_adapter {
499 	struct net_device *netdev;
500 	struct pci_dev *pdev;
501 	struct mii_bus *mii_bus;
502 	struct phy_device *phydev;
503 	struct work_struct task;
504 
505 	/* Flags that indicate current state of the adapter */
506 	u32 flags;
507 
508 	/* local link state, to determine if a state change has occurred */
509 	int link;
510 
511 	/* Configuration  */
512 	u8 rom_addr[ETH_ALEN];
513 	u8 addr[ETH_ALEN];
514 	bool has_eeprom;
515 	u8 eeprom_data[2];
516 
517 	/* Spinlocks */
518 	spinlock_t lock;
519 
520 	spinlock_t tcb_send_qlock;
521 	spinlock_t tcb_ready_qlock;
522 	spinlock_t send_hw_lock;
523 
524 	spinlock_t rcv_lock;
525 	spinlock_t rcv_pend_lock;
526 	spinlock_t fbr_lock;
527 
528 	spinlock_t phy_lock;
529 
530 	/* Packet Filter and look ahead size */
531 	u32 packet_filter;
532 
533 	/* multicast list */
534 	u32 multicast_addr_count;
535 	u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
536 
537 	/* Pointer to the device's PCI register space */
538 	struct address_map __iomem *regs;
539 
540 	/* Registry parameters */
541 	u8 wanted_flow;		/* Flow we want for 802.3x flow control */
542 	u32 registry_jumbo_packet;	/* Max supported ethernet packet size */
543 
544 	/* Derived from the registry: */
545 	u8 flowcontrol;		/* flow control validated by the far-end */
546 
547 	/* Minimize init-time */
548 	struct timer_list error_timer;
549 
550 	/* variable putting the phy into coma mode when boot up with no cable
551 	 * plugged in after 5 seconds
552 	 */
553 	u8 boot_coma;
554 
555 	/* Next two used to save power information at power down. This
556 	 * information will be used during power up to set up parts of Power
557 	 * Management in JAGCore
558 	 */
559 	u16 pdown_speed;
560 	u8 pdown_duplex;
561 
562 	/* Tx Memory Variables */
563 	struct tx_ring tx_ring;
564 
565 	/* Rx Memory Variables */
566 	struct rx_ring rx_ring;
567 
568 	/* Stats */
569 	struct ce_stats stats;
570 
571 	struct net_device_stats net_stats;
572 };
573 
eeprom_wait_ready(struct pci_dev * pdev,u32 * status)574 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
575 {
576 	u32 reg;
577 	int i;
578 
579 	/*
580 	 * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
581 	 *    bits 7,1:0 both equal to 1, at least once after reset.
582 	 *    Subsequent operations need only to check that bits 1:0 are equal
583 	 *    to 1 prior to starting a single byte read/write
584 	 */
585 
586 	for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
587 		/* Read registers grouped in DWORD1 */
588 		if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
589 			return -EIO;
590 
591 		/* I2C idle and Phy Queue Avail both true */
592 		if ((reg & 0x3000) == 0x3000) {
593 			if (status)
594 				*status = reg;
595 			return reg & 0xFF;
596 		}
597 	}
598 	return -ETIMEDOUT;
599 }
600 
601 
602 /**
603  * eeprom_write - Write a byte to the ET1310's EEPROM
604  * @adapter: pointer to our private adapter structure
605  * @addr: the address to write
606  * @data: the value to write
607  *
608  * Returns 1 for a successful write.
609  */
eeprom_write(struct et131x_adapter * adapter,u32 addr,u8 data)610 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
611 {
612 	struct pci_dev *pdev = adapter->pdev;
613 	int index = 0;
614 	int retries;
615 	int err = 0;
616 	int i2c_wack = 0;
617 	int writeok = 0;
618 	u32 status;
619 	u32 val = 0;
620 
621 	/*
622 	 * For an EEPROM, an I2C single byte write is defined as a START
623 	 * condition followed by the device address, EEPROM address, one byte
624 	 * of data and a STOP condition.  The STOP condition will trigger the
625 	 * EEPROM's internally timed write cycle to the nonvolatile memory.
626 	 * All inputs are disabled during this write cycle and the EEPROM will
627 	 * not respond to any access until the internal write is complete.
628 	 */
629 
630 	err = eeprom_wait_ready(pdev, NULL);
631 	if (err)
632 		return err;
633 
634 	 /*
635 	 * 2. Write to the LBCIF Control Register:  bit 7=1, bit 6=1, bit 3=0,
636 	 *    and bits 1:0 both =0.  Bit 5 should be set according to the
637 	 *    type of EEPROM being accessed (1=two byte addressing, 0=one
638 	 *    byte addressing).
639 	 */
640 	if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
641 			LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
642 		return -EIO;
643 
644 	i2c_wack = 1;
645 
646 	/* Prepare EEPROM address for Step 3 */
647 
648 	for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
649 		/* Write the address to the LBCIF Address Register */
650 		if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
651 			break;
652 		/*
653 		 * Write the data to the LBCIF Data Register (the I2C write
654 		 * will begin).
655 		 */
656 		if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
657 			break;
658 		/*
659 		 * Monitor bit 1:0 of the LBCIF Status Register.  When bits
660 		 * 1:0 are both equal to 1, the I2C write has completed and the
661 		 * internal write cycle of the EEPROM is about to start.
662 		 * (bits 1:0 = 01 is a legal state while waiting from both
663 		 * equal to 1, but bits 1:0 = 10 is invalid and implies that
664 		 * something is broken).
665 		 */
666 		err = eeprom_wait_ready(pdev, &status);
667 		if (err < 0)
668 			return 0;
669 
670 		/*
671 		 * Check bit 3 of the LBCIF Status Register.  If  equal to 1,
672 		 * an error has occurred.Don't break here if we are revision
673 		 * 1, this is so we do a blind write for load bug.
674 		 */
675 		if ((status & LBCIF_STATUS_GENERAL_ERROR)
676 			&& adapter->pdev->revision == 0)
677 			break;
678 
679 		/*
680 		 * Check bit 2 of the LBCIF Status Register.  If equal to 1 an
681 		 * ACK error has occurred on the address phase of the write.
682 		 * This could be due to an actual hardware failure or the
683 		 * EEPROM may still be in its internal write cycle from a
684 		 * previous write. This write operation was ignored and must be
685 		  *repeated later.
686 		 */
687 		if (status & LBCIF_STATUS_ACK_ERROR) {
688 			/*
689 			 * This could be due to an actual hardware failure
690 			 * or the EEPROM may still be in its internal write
691 			 * cycle from a previous write. This write operation
692 			 * was ignored and must be repeated later.
693 			 */
694 			udelay(10);
695 			continue;
696 		}
697 
698 		writeok = 1;
699 		break;
700 	}
701 
702 	/*
703 	 * Set bit 6 of the LBCIF Control Register = 0.
704 	 */
705 	udelay(10);
706 
707 	while (i2c_wack) {
708 		if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
709 			LBCIF_CONTROL_LBCIF_ENABLE))
710 			writeok = 0;
711 
712 		/* Do read until internal ACK_ERROR goes away meaning write
713 		 * completed
714 		 */
715 		do {
716 			pci_write_config_dword(pdev,
717 					       LBCIF_ADDRESS_REGISTER,
718 					       addr);
719 			do {
720 				pci_read_config_dword(pdev,
721 					LBCIF_DATA_REGISTER, &val);
722 			} while ((val & 0x00010000) == 0);
723 		} while (val & 0x00040000);
724 
725 		if ((val & 0xFF00) != 0xC000 || index == 10000)
726 			break;
727 		index++;
728 	}
729 	return writeok ? 0 : -EIO;
730 }
731 
732 /**
733  * eeprom_read - Read a byte from the ET1310's EEPROM
734  * @adapter: pointer to our private adapter structure
735  * @addr: the address from which to read
736  * @pdata: a pointer to a byte in which to store the value of the read
737  * @eeprom_id: the ID of the EEPROM
738  * @addrmode: how the EEPROM is to be accessed
739  *
740  * Returns 1 for a successful read
741  */
eeprom_read(struct et131x_adapter * adapter,u32 addr,u8 * pdata)742 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
743 {
744 	struct pci_dev *pdev = adapter->pdev;
745 	int err;
746 	u32 status;
747 
748 	/*
749 	 * A single byte read is similar to the single byte write, with the
750 	 * exception of the data flow:
751 	 */
752 
753 	err = eeprom_wait_ready(pdev, NULL);
754 	if (err)
755 		return err;
756 	/*
757 	 * Write to the LBCIF Control Register:  bit 7=1, bit 6=0, bit 3=0,
758 	 * and bits 1:0 both =0.  Bit 5 should be set according to the type
759 	 * of EEPROM being accessed (1=two byte addressing, 0=one byte
760 	 * addressing).
761 	 */
762 	if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
763 				  LBCIF_CONTROL_LBCIF_ENABLE))
764 		return -EIO;
765 	/*
766 	 * Write the address to the LBCIF Address Register (I2C read will
767 	 * begin).
768 	 */
769 	if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
770 		return -EIO;
771 	/*
772 	 * Monitor bit 0 of the LBCIF Status Register.  When = 1, I2C read
773 	 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
774 	 * has occurred).
775 	 */
776 	err = eeprom_wait_ready(pdev, &status);
777 	if (err < 0)
778 		return err;
779 	/*
780 	 * Regardless of error status, read data byte from LBCIF Data
781 	 * Register.
782 	 */
783 	*pdata = err;
784 	/*
785 	 * Check bit 2 of the LBCIF Status Register.  If = 1,
786 	 * then an error has occurred.
787 	 */
788 	return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
789 }
790 
et131x_init_eeprom(struct et131x_adapter * adapter)791 static int et131x_init_eeprom(struct et131x_adapter *adapter)
792 {
793 	struct pci_dev *pdev = adapter->pdev;
794 	u8 eestatus;
795 
796 	/* We first need to check the EEPROM Status code located at offset
797 	 * 0xB2 of config space
798 	 */
799 	pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
800 				      &eestatus);
801 
802 	/* THIS IS A WORKAROUND:
803 	 * I need to call this function twice to get my card in a
804 	 * LG M1 Express Dual running. I tried also a msleep before this
805 	 * function, because I thougth there could be some time condidions
806 	 * but it didn't work. Call the whole function twice also work.
807 	 */
808 	if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
809 		dev_err(&pdev->dev,
810 		       "Could not read PCI config space for EEPROM Status\n");
811 		return -EIO;
812 	}
813 
814 	/* Determine if the error(s) we care about are present. If they are
815 	 * present we need to fail.
816 	 */
817 	if (eestatus & 0x4C) {
818 		int write_failed = 0;
819 		if (pdev->revision == 0x01) {
820 			int	i;
821 			static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
822 
823 			/* Re-write the first 4 bytes if we have an eeprom
824 			 * present and the revision id is 1, this fixes the
825 			 * corruption seen with 1310 B Silicon
826 			 */
827 			for (i = 0; i < 3; i++)
828 				if (eeprom_write(adapter, i, eedata[i]) < 0)
829 					write_failed = 1;
830 		}
831 		if (pdev->revision  != 0x01 || write_failed) {
832 			dev_err(&pdev->dev,
833 			    "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
834 
835 			/* This error could mean that there was an error
836 			 * reading the eeprom or that the eeprom doesn't exist.
837 			 * We will treat each case the same and not try to
838 			 * gather additional information that normally would
839 			 * come from the eeprom, like MAC Address
840 			 */
841 			adapter->has_eeprom = 0;
842 			return -EIO;
843 		}
844 	}
845 	adapter->has_eeprom = 1;
846 
847 	/* Read the EEPROM for information regarding LED behavior. Refer to
848 	 * ET1310_phy.c, et131x_xcvr_init(), for its use.
849 	 */
850 	eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
851 	eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
852 
853 	if (adapter->eeprom_data[0] != 0xcd)
854 		/* Disable all optional features */
855 		adapter->eeprom_data[1] = 0x00;
856 
857 	return 0;
858 }
859 
860 /**
861  * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
862  * @adapter: pointer to our adapter structure
863  */
et131x_rx_dma_enable(struct et131x_adapter * adapter)864 static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
865 {
866 	/* Setup the receive dma configuration register for normal operation */
867 	u32 csr =  0x2000;	/* FBR1 enable */
868 
869 	if (adapter->rx_ring.fbr[0]->buffsize == 4096)
870 		csr |= 0x0800;
871 	else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
872 		csr |= 0x1000;
873 	else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
874 		csr |= 0x1800;
875 #ifdef USE_FBR0
876 	csr |= 0x0400;		/* FBR0 enable */
877 	if (adapter->rx_ring.fbr[1]->buffsize == 256)
878 		csr |= 0x0100;
879 	else if (adapter->rx_ring.fbr[1]->buffsize == 512)
880 		csr |= 0x0200;
881 	else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
882 		csr |= 0x0300;
883 #endif
884 	writel(csr, &adapter->regs->rxdma.csr);
885 
886 	csr = readl(&adapter->regs->rxdma.csr);
887 	if ((csr & 0x00020000) != 0) {
888 		udelay(5);
889 		csr = readl(&adapter->regs->rxdma.csr);
890 		if ((csr & 0x00020000) != 0) {
891 			dev_err(&adapter->pdev->dev,
892 			    "RX Dma failed to exit halt state.  CSR 0x%08x\n",
893 				csr);
894 		}
895 	}
896 }
897 
898 /**
899  * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
900  * @adapter: pointer to our adapter structure
901  */
et131x_rx_dma_disable(struct et131x_adapter * adapter)902 static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
903 {
904 	u32 csr;
905 	/* Setup the receive dma configuration register */
906 	writel(0x00002001, &adapter->regs->rxdma.csr);
907 	csr = readl(&adapter->regs->rxdma.csr);
908 	if ((csr & 0x00020000) == 0) {	/* Check halt status (bit 17) */
909 		udelay(5);
910 		csr = readl(&adapter->regs->rxdma.csr);
911 		if ((csr & 0x00020000) == 0)
912 			dev_err(&adapter->pdev->dev,
913 			"RX Dma failed to enter halt state. CSR 0x%08x\n",
914 				csr);
915 	}
916 }
917 
918 /**
919  * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
920  * @adapter: pointer to our adapter structure
921  *
922  * Mainly used after a return to the D0 (full-power) state from a lower state.
923  */
et131x_tx_dma_enable(struct et131x_adapter * adapter)924 static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
925 {
926 	/* Setup the transmit dma configuration register for normal
927 	 * operation
928 	 */
929 	writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
930 					&adapter->regs->txdma.csr);
931 }
932 
add_10bit(u32 * v,int n)933 static inline void add_10bit(u32 *v, int n)
934 {
935 	*v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
936 }
937 
add_12bit(u32 * v,int n)938 static inline void add_12bit(u32 *v, int n)
939 {
940 	*v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
941 }
942 
943 /**
944  * et1310_config_mac_regs1 - Initialize the first part of MAC regs
945  * @adapter: pointer to our adapter structure
946  */
et1310_config_mac_regs1(struct et131x_adapter * adapter)947 static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
948 {
949 	struct mac_regs __iomem *macregs = &adapter->regs->mac;
950 	u32 station1;
951 	u32 station2;
952 	u32 ipg;
953 
954 	/* First we need to reset everything.  Write to MAC configuration
955 	 * register 1 to perform reset.
956 	 */
957 	writel(0xC00F0000, &macregs->cfg1);
958 
959 	/* Next lets configure the MAC Inter-packet gap register */
960 	ipg = 0x38005860;		/* IPG1 0x38 IPG2 0x58 B2B 0x60 */
961 	ipg |= 0x50 << 8;		/* ifg enforce 0x50 */
962 	writel(ipg, &macregs->ipg);
963 
964 	/* Next lets configure the MAC Half Duplex register */
965 	/* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
966 	writel(0x00A1F037, &macregs->hfdp);
967 
968 	/* Next lets configure the MAC Interface Control register */
969 	writel(0, &macregs->if_ctrl);
970 
971 	/* Let's move on to setting up the mii management configuration */
972 	writel(0x07, &macregs->mii_mgmt_cfg);	/* Clock reset 0x7 */
973 
974 	/* Next lets configure the MAC Station Address register.  These
975 	 * values are read from the EEPROM during initialization and stored
976 	 * in the adapter structure.  We write what is stored in the adapter
977 	 * structure to the MAC Station Address registers high and low.  This
978 	 * station address is used for generating and checking pause control
979 	 * packets.
980 	 */
981 	station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
982 		   (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
983 	station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
984 		   (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
985 		   (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
986 		    adapter->addr[2];
987 	writel(station1, &macregs->station_addr_1);
988 	writel(station2, &macregs->station_addr_2);
989 
990 	/* Max ethernet packet in bytes that will passed by the mac without
991 	 * being truncated.  Allow the MAC to pass 4 more than our max packet
992 	 * size.  This is 4 for the Ethernet CRC.
993 	 *
994 	 * Packets larger than (registry_jumbo_packet) that do not contain a
995 	 * VLAN ID will be dropped by the Rx function.
996 	 */
997 	writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
998 
999 	/* clear out MAC config reset */
1000 	writel(0, &macregs->cfg1);
1001 }
1002 
1003 /**
1004  * et1310_config_mac_regs2 - Initialize the second part of MAC regs
1005  * @adapter: pointer to our adapter structure
1006  */
et1310_config_mac_regs2(struct et131x_adapter * adapter)1007 static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
1008 {
1009 	int32_t delay = 0;
1010 	struct mac_regs __iomem *mac = &adapter->regs->mac;
1011 	struct phy_device *phydev = adapter->phydev;
1012 	u32 cfg1;
1013 	u32 cfg2;
1014 	u32 ifctrl;
1015 	u32 ctl;
1016 
1017 	ctl = readl(&adapter->regs->txmac.ctl);
1018 	cfg1 = readl(&mac->cfg1);
1019 	cfg2 = readl(&mac->cfg2);
1020 	ifctrl = readl(&mac->if_ctrl);
1021 
1022 	/* Set up the if mode bits */
1023 	cfg2 &= ~0x300;
1024 	if (phydev && phydev->speed == SPEED_1000) {
1025 		cfg2 |= 0x200;
1026 		/* Phy mode bit */
1027 		ifctrl &= ~(1 << 24);
1028 	} else {
1029 		cfg2 |= 0x100;
1030 		ifctrl |= (1 << 24);
1031 	}
1032 
1033 	/* We need to enable Rx/Tx */
1034 	cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
1035 	/* Initialize loop back to off */
1036 	cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
1037 	if (adapter->flowcontrol == FLOW_RXONLY ||
1038 				adapter->flowcontrol == FLOW_BOTH)
1039 		cfg1 |= CFG1_RX_FLOW;
1040 	writel(cfg1, &mac->cfg1);
1041 
1042 	/* Now we need to initialize the MAC Configuration 2 register */
1043 	/* preamble 7, check length, huge frame off, pad crc, crc enable
1044 	   full duplex off */
1045 	cfg2 |= 0x7016;
1046 	cfg2 &= ~0x0021;
1047 
1048 	/* Turn on duplex if needed */
1049 	if (phydev && phydev->duplex == DUPLEX_FULL)
1050 		cfg2 |= 0x01;
1051 
1052 	ifctrl &= ~(1 << 26);
1053 	if (phydev && phydev->duplex == DUPLEX_HALF)
1054 		ifctrl |= (1<<26);	/* Enable ghd */
1055 
1056 	writel(ifctrl, &mac->if_ctrl);
1057 	writel(cfg2, &mac->cfg2);
1058 
1059 	do {
1060 		udelay(10);
1061 		delay++;
1062 		cfg1 = readl(&mac->cfg1);
1063 	} while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1064 
1065 	if (delay == 100) {
1066 		dev_warn(&adapter->pdev->dev,
1067 		    "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1068 			cfg1);
1069 	}
1070 
1071 	/* Enable txmac */
1072 	ctl |= 0x09;	/* TX mac enable, FC disable */
1073 	writel(ctl, &adapter->regs->txmac.ctl);
1074 
1075 	/* Ready to start the RXDMA/TXDMA engine */
1076 	if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1077 		et131x_rx_dma_enable(adapter);
1078 		et131x_tx_dma_enable(adapter);
1079 	}
1080 }
1081 
1082 /**
1083  * et1310_in_phy_coma - check if the device is in phy coma
1084  * @adapter: pointer to our adapter structure
1085  *
1086  * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1087  */
et1310_in_phy_coma(struct et131x_adapter * adapter)1088 static int et1310_in_phy_coma(struct et131x_adapter *adapter)
1089 {
1090 	u32 pmcsr;
1091 
1092 	pmcsr = readl(&adapter->regs->global.pm_csr);
1093 
1094 	return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1095 }
1096 
et1310_setup_device_for_multicast(struct et131x_adapter * adapter)1097 static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1098 {
1099 	struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1100 	u32 hash1 = 0;
1101 	u32 hash2 = 0;
1102 	u32 hash3 = 0;
1103 	u32 hash4 = 0;
1104 	u32 pm_csr;
1105 
1106 	/* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1107 	 * the multi-cast LIST.  If it is NOT specified, (and "ALL" is not
1108 	 * specified) then we should pass NO multi-cast addresses to the
1109 	 * driver.
1110 	 */
1111 	if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1112 		int i;
1113 
1114 		/* Loop through our multicast array and set up the device */
1115 		for (i = 0; i < adapter->multicast_addr_count; i++) {
1116 			u32 result;
1117 
1118 			result = ether_crc(6, adapter->multicast_list[i]);
1119 
1120 			result = (result & 0x3F800000) >> 23;
1121 
1122 			if (result < 32) {
1123 				hash1 |= (1 << result);
1124 			} else if ((31 < result) && (result < 64)) {
1125 				result -= 32;
1126 				hash2 |= (1 << result);
1127 			} else if ((63 < result) && (result < 96)) {
1128 				result -= 64;
1129 				hash3 |= (1 << result);
1130 			} else {
1131 				result -= 96;
1132 				hash4 |= (1 << result);
1133 			}
1134 		}
1135 	}
1136 
1137 	/* Write out the new hash to the device */
1138 	pm_csr = readl(&adapter->regs->global.pm_csr);
1139 	if (!et1310_in_phy_coma(adapter)) {
1140 		writel(hash1, &rxmac->multi_hash1);
1141 		writel(hash2, &rxmac->multi_hash2);
1142 		writel(hash3, &rxmac->multi_hash3);
1143 		writel(hash4, &rxmac->multi_hash4);
1144 	}
1145 }
1146 
et1310_setup_device_for_unicast(struct et131x_adapter * adapter)1147 static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1148 {
1149 	struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1150 	u32 uni_pf1;
1151 	u32 uni_pf2;
1152 	u32 uni_pf3;
1153 	u32 pm_csr;
1154 
1155 	/* Set up unicast packet filter reg 3 to be the first two octets of
1156 	 * the MAC address for both address
1157 	 *
1158 	 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1159 	 * MAC address for second address
1160 	 *
1161 	 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1162 	 * MAC address for first address
1163 	 */
1164 	uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1165 		  (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1166 		  (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1167 		   adapter->addr[1];
1168 
1169 	uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1170 		  (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1171 		  (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1172 		   adapter->addr[5];
1173 
1174 	uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1175 		  (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1176 		  (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1177 		   adapter->addr[5];
1178 
1179 	pm_csr = readl(&adapter->regs->global.pm_csr);
1180 	if (!et1310_in_phy_coma(adapter)) {
1181 		writel(uni_pf1, &rxmac->uni_pf_addr1);
1182 		writel(uni_pf2, &rxmac->uni_pf_addr2);
1183 		writel(uni_pf3, &rxmac->uni_pf_addr3);
1184 	}
1185 }
1186 
et1310_config_rxmac_regs(struct et131x_adapter * adapter)1187 static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1188 {
1189 	struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1190 	struct phy_device *phydev = adapter->phydev;
1191 	u32 sa_lo;
1192 	u32 sa_hi = 0;
1193 	u32 pf_ctrl = 0;
1194 
1195 	/* Disable the MAC while it is being configured (also disable WOL) */
1196 	writel(0x8, &rxmac->ctrl);
1197 
1198 	/* Initialize WOL to disabled. */
1199 	writel(0, &rxmac->crc0);
1200 	writel(0, &rxmac->crc12);
1201 	writel(0, &rxmac->crc34);
1202 
1203 	/* We need to set the WOL mask0 - mask4 next.  We initialize it to
1204 	 * its default Values of 0x00000000 because there are not WOL masks
1205 	 * as of this time.
1206 	 */
1207 	writel(0, &rxmac->mask0_word0);
1208 	writel(0, &rxmac->mask0_word1);
1209 	writel(0, &rxmac->mask0_word2);
1210 	writel(0, &rxmac->mask0_word3);
1211 
1212 	writel(0, &rxmac->mask1_word0);
1213 	writel(0, &rxmac->mask1_word1);
1214 	writel(0, &rxmac->mask1_word2);
1215 	writel(0, &rxmac->mask1_word3);
1216 
1217 	writel(0, &rxmac->mask2_word0);
1218 	writel(0, &rxmac->mask2_word1);
1219 	writel(0, &rxmac->mask2_word2);
1220 	writel(0, &rxmac->mask2_word3);
1221 
1222 	writel(0, &rxmac->mask3_word0);
1223 	writel(0, &rxmac->mask3_word1);
1224 	writel(0, &rxmac->mask3_word2);
1225 	writel(0, &rxmac->mask3_word3);
1226 
1227 	writel(0, &rxmac->mask4_word0);
1228 	writel(0, &rxmac->mask4_word1);
1229 	writel(0, &rxmac->mask4_word2);
1230 	writel(0, &rxmac->mask4_word3);
1231 
1232 	/* Lets setup the WOL Source Address */
1233 	sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1234 		(adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1235 		(adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1236 		 adapter->addr[5];
1237 	writel(sa_lo, &rxmac->sa_lo);
1238 
1239 	sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1240 		       adapter->addr[1];
1241 	writel(sa_hi, &rxmac->sa_hi);
1242 
1243 	/* Disable all Packet Filtering */
1244 	writel(0, &rxmac->pf_ctrl);
1245 
1246 	/* Let's initialize the Unicast Packet filtering address */
1247 	if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1248 		et1310_setup_device_for_unicast(adapter);
1249 		pf_ctrl |= 4;	/* Unicast filter */
1250 	} else {
1251 		writel(0, &rxmac->uni_pf_addr1);
1252 		writel(0, &rxmac->uni_pf_addr2);
1253 		writel(0, &rxmac->uni_pf_addr3);
1254 	}
1255 
1256 	/* Let's initialize the Multicast hash */
1257 	if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1258 		pf_ctrl |= 2;	/* Multicast filter */
1259 		et1310_setup_device_for_multicast(adapter);
1260 	}
1261 
1262 	/* Runt packet filtering.  Didn't work in version A silicon. */
1263 	pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1264 	pf_ctrl |= 8;	/* Fragment filter */
1265 
1266 	if (adapter->registry_jumbo_packet > 8192)
1267 		/* In order to transmit jumbo packets greater than 8k, the
1268 		 * FIFO between RxMAC and RxDMA needs to be reduced in size
1269 		 * to (16k - Jumbo packet size).  In order to implement this,
1270 		 * we must use "cut through" mode in the RxMAC, which chops
1271 		 * packets down into segments which are (max_size * 16).  In
1272 		 * this case we selected 256 bytes, since this is the size of
1273 		 * the PCI-Express TLP's that the 1310 uses.
1274 		 *
1275 		 * seg_en on, fc_en off, size 0x10
1276 		 */
1277 		writel(0x41, &rxmac->mcif_ctrl_max_seg);
1278 	else
1279 		writel(0, &rxmac->mcif_ctrl_max_seg);
1280 
1281 	/* Initialize the MCIF water marks */
1282 	writel(0, &rxmac->mcif_water_mark);
1283 
1284 	/*  Initialize the MIF control */
1285 	writel(0, &rxmac->mif_ctrl);
1286 
1287 	/* Initialize the Space Available Register */
1288 	writel(0, &rxmac->space_avail);
1289 
1290 	/* Initialize the the mif_ctrl register
1291 	 * bit 3:  Receive code error. One or more nibbles were signaled as
1292 	 *	   errors  during the reception of the packet.  Clear this
1293 	 *	   bit in Gigabit, set it in 100Mbit.  This was derived
1294 	 *	   experimentally at UNH.
1295 	 * bit 4:  Receive CRC error. The packet's CRC did not match the
1296 	 *	   internally generated CRC.
1297 	 * bit 5:  Receive length check error. Indicates that frame length
1298 	 *	   field value in the packet does not match the actual data
1299 	 *	   byte length and is not a type field.
1300 	 * bit 16: Receive frame truncated.
1301 	 * bit 17: Drop packet enable
1302 	 */
1303 	if (phydev && phydev->speed == SPEED_100)
1304 		writel(0x30038, &rxmac->mif_ctrl);
1305 	else
1306 		writel(0x30030, &rxmac->mif_ctrl);
1307 
1308 	/* Finally we initialize RxMac to be enabled & WOL disabled.  Packet
1309 	 * filter is always enabled since it is where the runt packets are
1310 	 * supposed to be dropped.  For version A silicon, runt packet
1311 	 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1312 	 * but we still leave the packet filter on.
1313 	 */
1314 	writel(pf_ctrl, &rxmac->pf_ctrl);
1315 	writel(0x9, &rxmac->ctrl);
1316 }
1317 
et1310_config_txmac_regs(struct et131x_adapter * adapter)1318 static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1319 {
1320 	struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1321 
1322 	/* We need to update the Control Frame Parameters
1323 	 * cfpt - control frame pause timer set to 64 (0x40)
1324 	 * cfep - control frame extended pause timer set to 0x0
1325 	 */
1326 	if (adapter->flowcontrol == FLOW_NONE)
1327 		writel(0, &txmac->cf_param);
1328 	else
1329 		writel(0x40, &txmac->cf_param);
1330 }
1331 
et1310_config_macstat_regs(struct et131x_adapter * adapter)1332 static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1333 {
1334 	struct macstat_regs __iomem *macstat =
1335 		&adapter->regs->macstat;
1336 
1337 	/* Next we need to initialize all the macstat registers to zero on
1338 	 * the device.
1339 	 */
1340 	writel(0, &macstat->txrx_0_64_byte_frames);
1341 	writel(0, &macstat->txrx_65_127_byte_frames);
1342 	writel(0, &macstat->txrx_128_255_byte_frames);
1343 	writel(0, &macstat->txrx_256_511_byte_frames);
1344 	writel(0, &macstat->txrx_512_1023_byte_frames);
1345 	writel(0, &macstat->txrx_1024_1518_byte_frames);
1346 	writel(0, &macstat->txrx_1519_1522_gvln_frames);
1347 
1348 	writel(0, &macstat->rx_bytes);
1349 	writel(0, &macstat->rx_packets);
1350 	writel(0, &macstat->rx_fcs_errs);
1351 	writel(0, &macstat->rx_multicast_packets);
1352 	writel(0, &macstat->rx_broadcast_packets);
1353 	writel(0, &macstat->rx_control_frames);
1354 	writel(0, &macstat->rx_pause_frames);
1355 	writel(0, &macstat->rx_unknown_opcodes);
1356 	writel(0, &macstat->rx_align_errs);
1357 	writel(0, &macstat->rx_frame_len_errs);
1358 	writel(0, &macstat->rx_code_errs);
1359 	writel(0, &macstat->rx_carrier_sense_errs);
1360 	writel(0, &macstat->rx_undersize_packets);
1361 	writel(0, &macstat->rx_oversize_packets);
1362 	writel(0, &macstat->rx_fragment_packets);
1363 	writel(0, &macstat->rx_jabbers);
1364 	writel(0, &macstat->rx_drops);
1365 
1366 	writel(0, &macstat->tx_bytes);
1367 	writel(0, &macstat->tx_packets);
1368 	writel(0, &macstat->tx_multicast_packets);
1369 	writel(0, &macstat->tx_broadcast_packets);
1370 	writel(0, &macstat->tx_pause_frames);
1371 	writel(0, &macstat->tx_deferred);
1372 	writel(0, &macstat->tx_excessive_deferred);
1373 	writel(0, &macstat->tx_single_collisions);
1374 	writel(0, &macstat->tx_multiple_collisions);
1375 	writel(0, &macstat->tx_late_collisions);
1376 	writel(0, &macstat->tx_excessive_collisions);
1377 	writel(0, &macstat->tx_total_collisions);
1378 	writel(0, &macstat->tx_pause_honored_frames);
1379 	writel(0, &macstat->tx_drops);
1380 	writel(0, &macstat->tx_jabbers);
1381 	writel(0, &macstat->tx_fcs_errs);
1382 	writel(0, &macstat->tx_control_frames);
1383 	writel(0, &macstat->tx_oversize_frames);
1384 	writel(0, &macstat->tx_undersize_frames);
1385 	writel(0, &macstat->tx_fragments);
1386 	writel(0, &macstat->carry_reg1);
1387 	writel(0, &macstat->carry_reg2);
1388 
1389 	/* Unmask any counters that we want to track the overflow of.
1390 	 * Initially this will be all counters.  It may become clear later
1391 	 * that we do not need to track all counters.
1392 	 */
1393 	writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1394 	writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1395 }
1396 
1397 /**
1398  * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
1399  * @adapter: pointer to our private adapter structure
1400  * @addr: the address of the transceiver
1401  * @reg: the register to read
1402  * @value: pointer to a 16-bit value in which the value will be stored
1403  *
1404  * Returns 0 on success, errno on failure (as defined in errno.h)
1405  */
et131x_phy_mii_read(struct et131x_adapter * adapter,u8 addr,u8 reg,u16 * value)1406 static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1407 	      u8 reg, u16 *value)
1408 {
1409 	struct mac_regs __iomem *mac = &adapter->regs->mac;
1410 	int status = 0;
1411 	u32 delay = 0;
1412 	u32 mii_addr;
1413 	u32 mii_cmd;
1414 	u32 mii_indicator;
1415 
1416 	/* Save a local copy of the registers we are dealing with so we can
1417 	 * set them back
1418 	 */
1419 	mii_addr = readl(&mac->mii_mgmt_addr);
1420 	mii_cmd = readl(&mac->mii_mgmt_cmd);
1421 
1422 	/* Stop the current operation */
1423 	writel(0, &mac->mii_mgmt_cmd);
1424 
1425 	/* Set up the register we need to read from on the correct PHY */
1426 	writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1427 
1428 	writel(0x1, &mac->mii_mgmt_cmd);
1429 
1430 	do {
1431 		udelay(50);
1432 		delay++;
1433 		mii_indicator = readl(&mac->mii_mgmt_indicator);
1434 	} while ((mii_indicator & MGMT_WAIT) && delay < 50);
1435 
1436 	/* If we hit the max delay, we could not read the register */
1437 	if (delay == 50) {
1438 		dev_warn(&adapter->pdev->dev,
1439 			    "reg 0x%08x could not be read\n", reg);
1440 		dev_warn(&adapter->pdev->dev, "status is  0x%08x\n",
1441 			    mii_indicator);
1442 
1443 		status = -EIO;
1444 	}
1445 
1446 	/* If we hit here we were able to read the register and we need to
1447 	 * return the value to the caller */
1448 	*value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1449 
1450 	/* Stop the read operation */
1451 	writel(0, &mac->mii_mgmt_cmd);
1452 
1453 	/* set the registers we touched back to the state at which we entered
1454 	 * this function
1455 	 */
1456 	writel(mii_addr, &mac->mii_mgmt_addr);
1457 	writel(mii_cmd, &mac->mii_mgmt_cmd);
1458 
1459 	return status;
1460 }
1461 
et131x_mii_read(struct et131x_adapter * adapter,u8 reg,u16 * value)1462 static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1463 {
1464 	struct phy_device *phydev = adapter->phydev;
1465 
1466 	if (!phydev)
1467 		return -EIO;
1468 
1469 	return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1470 }
1471 
1472 /**
1473  * et131x_mii_write - Write to a PHY register through the MII interface of the MAC
1474  * @adapter: pointer to our private adapter structure
1475  * @reg: the register to read
1476  * @value: 16-bit value to write
1477  *
1478  * FIXME: one caller in netdev still
1479  *
1480  * Return 0 on success, errno on failure (as defined in errno.h)
1481  */
et131x_mii_write(struct et131x_adapter * adapter,u8 reg,u16 value)1482 static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1483 {
1484 	struct mac_regs __iomem *mac = &adapter->regs->mac;
1485 	struct phy_device *phydev = adapter->phydev;
1486 	int status = 0;
1487 	u8 addr;
1488 	u32 delay = 0;
1489 	u32 mii_addr;
1490 	u32 mii_cmd;
1491 	u32 mii_indicator;
1492 
1493 	if (!phydev)
1494 		return -EIO;
1495 
1496 	addr = phydev->addr;
1497 
1498 	/* Save a local copy of the registers we are dealing with so we can
1499 	 * set them back
1500 	 */
1501 	mii_addr = readl(&mac->mii_mgmt_addr);
1502 	mii_cmd = readl(&mac->mii_mgmt_cmd);
1503 
1504 	/* Stop the current operation */
1505 	writel(0, &mac->mii_mgmt_cmd);
1506 
1507 	/* Set up the register we need to write to on the correct PHY */
1508 	writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1509 
1510 	/* Add the value to write to the registers to the mac */
1511 	writel(value, &mac->mii_mgmt_ctrl);
1512 
1513 	do {
1514 		udelay(50);
1515 		delay++;
1516 		mii_indicator = readl(&mac->mii_mgmt_indicator);
1517 	} while ((mii_indicator & MGMT_BUSY) && delay < 100);
1518 
1519 	/* If we hit the max delay, we could not write the register */
1520 	if (delay == 100) {
1521 		u16 tmp;
1522 
1523 		dev_warn(&adapter->pdev->dev,
1524 		    "reg 0x%08x could not be written", reg);
1525 		dev_warn(&adapter->pdev->dev, "status is  0x%08x\n",
1526 			    mii_indicator);
1527 		dev_warn(&adapter->pdev->dev, "command is  0x%08x\n",
1528 			    readl(&mac->mii_mgmt_cmd));
1529 
1530 		et131x_mii_read(adapter, reg, &tmp);
1531 
1532 		status = -EIO;
1533 	}
1534 	/* Stop the write operation */
1535 	writel(0, &mac->mii_mgmt_cmd);
1536 
1537 	/*
1538 	 * set the registers we touched back to the state at which we entered
1539 	 * this function
1540 	 */
1541 	writel(mii_addr, &mac->mii_mgmt_addr);
1542 	writel(mii_cmd, &mac->mii_mgmt_cmd);
1543 
1544 	return status;
1545 }
1546 
1547 /* Still used from _mac for BIT_READ */
et1310_phy_access_mii_bit(struct et131x_adapter * adapter,u16 action,u16 regnum,u16 bitnum,u8 * value)1548 static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
1549 				      u16 action, u16 regnum, u16 bitnum,
1550 				      u8 *value)
1551 {
1552 	u16 reg;
1553 	u16 mask = 0x0001 << bitnum;
1554 
1555 	/* Read the requested register */
1556 	et131x_mii_read(adapter, regnum, &reg);
1557 
1558 	switch (action) {
1559 	case TRUEPHY_BIT_READ:
1560 		*value = (reg & mask) >> bitnum;
1561 		break;
1562 
1563 	case TRUEPHY_BIT_SET:
1564 		et131x_mii_write(adapter, regnum, reg | mask);
1565 		break;
1566 
1567 	case TRUEPHY_BIT_CLEAR:
1568 		et131x_mii_write(adapter, regnum, reg & ~mask);
1569 		break;
1570 
1571 	default:
1572 		break;
1573 	}
1574 }
1575 
et1310_config_flow_control(struct et131x_adapter * adapter)1576 static void et1310_config_flow_control(struct et131x_adapter *adapter)
1577 {
1578 	struct phy_device *phydev = adapter->phydev;
1579 
1580 	if (phydev->duplex == DUPLEX_HALF) {
1581 		adapter->flowcontrol = FLOW_NONE;
1582 	} else {
1583 		char remote_pause, remote_async_pause;
1584 
1585 		et1310_phy_access_mii_bit(adapter,
1586 				TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1587 		et1310_phy_access_mii_bit(adapter,
1588 				TRUEPHY_BIT_READ, 5, 11,
1589 				&remote_async_pause);
1590 
1591 		if ((remote_pause == TRUEPHY_BIT_SET) &&
1592 		    (remote_async_pause == TRUEPHY_BIT_SET)) {
1593 			adapter->flowcontrol = adapter->wanted_flow;
1594 		} else if ((remote_pause == TRUEPHY_BIT_SET) &&
1595 			   (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1596 			if (adapter->wanted_flow == FLOW_BOTH)
1597 				adapter->flowcontrol = FLOW_BOTH;
1598 			else
1599 				adapter->flowcontrol = FLOW_NONE;
1600 		} else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1601 			   (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1602 			adapter->flowcontrol = FLOW_NONE;
1603 		} else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
1604 			       remote_async_pause == TRUEPHY_SET_BIT) */
1605 			if (adapter->wanted_flow == FLOW_BOTH)
1606 				adapter->flowcontrol = FLOW_RXONLY;
1607 			else
1608 				adapter->flowcontrol = FLOW_NONE;
1609 		}
1610 	}
1611 }
1612 
1613 /**
1614  * et1310_update_macstat_host_counters - Update the local copy of the statistics
1615  * @adapter: pointer to the adapter structure
1616  */
et1310_update_macstat_host_counters(struct et131x_adapter * adapter)1617 static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1618 {
1619 	struct ce_stats *stats = &adapter->stats;
1620 	struct macstat_regs __iomem *macstat =
1621 		&adapter->regs->macstat;
1622 
1623 	stats->tx_collisions	       += readl(&macstat->tx_total_collisions);
1624 	stats->tx_first_collisions     += readl(&macstat->tx_single_collisions);
1625 	stats->tx_deferred	       += readl(&macstat->tx_deferred);
1626 	stats->tx_excessive_collisions +=
1627 				readl(&macstat->tx_multiple_collisions);
1628 	stats->tx_late_collisions      += readl(&macstat->tx_late_collisions);
1629 	stats->tx_underflows	       += readl(&macstat->tx_undersize_frames);
1630 	stats->tx_max_pkt_errs	       += readl(&macstat->tx_oversize_frames);
1631 
1632 	stats->rx_align_errs        += readl(&macstat->rx_align_errs);
1633 	stats->rx_crc_errs          += readl(&macstat->rx_code_errs);
1634 	stats->rcvd_pkts_dropped    += readl(&macstat->rx_drops);
1635 	stats->rx_overflows         += readl(&macstat->rx_oversize_packets);
1636 	stats->rx_code_violations   += readl(&macstat->rx_fcs_errs);
1637 	stats->rx_length_errs       += readl(&macstat->rx_frame_len_errs);
1638 	stats->rx_other_errs        += readl(&macstat->rx_fragment_packets);
1639 }
1640 
1641 /**
1642  * et1310_handle_macstat_interrupt
1643  * @adapter: pointer to the adapter structure
1644  *
1645  * One of the MACSTAT counters has wrapped.  Update the local copy of
1646  * the statistics held in the adapter structure, checking the "wrap"
1647  * bit for each counter.
1648  */
et1310_handle_macstat_interrupt(struct et131x_adapter * adapter)1649 static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1650 {
1651 	u32 carry_reg1;
1652 	u32 carry_reg2;
1653 
1654 	/* Read the interrupt bits from the register(s).  These are Clear On
1655 	 * Write.
1656 	 */
1657 	carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1658 	carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1659 
1660 	writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1661 	writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1662 
1663 	/* We need to do update the host copy of all the MAC_STAT counters.
1664 	 * For each counter, check it's overflow bit.  If the overflow bit is
1665 	 * set, then increment the host version of the count by one complete
1666 	 * revolution of the counter.  This routine is called when the counter
1667 	 * block indicates that one of the counters has wrapped.
1668 	 */
1669 	if (carry_reg1 & (1 << 14))
1670 		adapter->stats.rx_code_violations	+= COUNTER_WRAP_16_BIT;
1671 	if (carry_reg1 & (1 << 8))
1672 		adapter->stats.rx_align_errs	+= COUNTER_WRAP_12_BIT;
1673 	if (carry_reg1 & (1 << 7))
1674 		adapter->stats.rx_length_errs	+= COUNTER_WRAP_16_BIT;
1675 	if (carry_reg1 & (1 << 2))
1676 		adapter->stats.rx_other_errs	+= COUNTER_WRAP_16_BIT;
1677 	if (carry_reg1 & (1 << 6))
1678 		adapter->stats.rx_crc_errs	+= COUNTER_WRAP_16_BIT;
1679 	if (carry_reg1 & (1 << 3))
1680 		adapter->stats.rx_overflows	+= COUNTER_WRAP_16_BIT;
1681 	if (carry_reg1 & (1 << 0))
1682 		adapter->stats.rcvd_pkts_dropped	+= COUNTER_WRAP_16_BIT;
1683 	if (carry_reg2 & (1 << 16))
1684 		adapter->stats.tx_max_pkt_errs	+= COUNTER_WRAP_12_BIT;
1685 	if (carry_reg2 & (1 << 15))
1686 		adapter->stats.tx_underflows	+= COUNTER_WRAP_12_BIT;
1687 	if (carry_reg2 & (1 << 6))
1688 		adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1689 	if (carry_reg2 & (1 << 8))
1690 		adapter->stats.tx_deferred	+= COUNTER_WRAP_12_BIT;
1691 	if (carry_reg2 & (1 << 5))
1692 		adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1693 	if (carry_reg2 & (1 << 4))
1694 		adapter->stats.tx_late_collisions	+= COUNTER_WRAP_12_BIT;
1695 	if (carry_reg2 & (1 << 2))
1696 		adapter->stats.tx_collisions	+= COUNTER_WRAP_12_BIT;
1697 }
1698 
et131x_mdio_read(struct mii_bus * bus,int phy_addr,int reg)1699 static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1700 {
1701 	struct net_device *netdev = bus->priv;
1702 	struct et131x_adapter *adapter = netdev_priv(netdev);
1703 	u16 value;
1704 	int ret;
1705 
1706 	ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1707 
1708 	if (ret < 0)
1709 		return ret;
1710 	else
1711 		return value;
1712 }
1713 
et131x_mdio_write(struct mii_bus * bus,int phy_addr,int reg,u16 value)1714 static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
1715 {
1716 	struct net_device *netdev = bus->priv;
1717 	struct et131x_adapter *adapter = netdev_priv(netdev);
1718 
1719 	return et131x_mii_write(adapter, reg, value);
1720 }
1721 
et131x_mdio_reset(struct mii_bus * bus)1722 static int et131x_mdio_reset(struct mii_bus *bus)
1723 {
1724 	struct net_device *netdev = bus->priv;
1725 	struct et131x_adapter *adapter = netdev_priv(netdev);
1726 
1727 	et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1728 
1729 	return 0;
1730 }
1731 
1732 /**
1733  *	et1310_phy_power_down	-	PHY power control
1734  *	@adapter: device to control
1735  *	@down: true for off/false for back on
1736  *
1737  *	one hundred, ten, one thousand megs
1738  *	How would you like to have your LAN accessed
1739  *	Can't you see that this code processed
1740  *	Phy power, phy power..
1741  */
et1310_phy_power_down(struct et131x_adapter * adapter,bool down)1742 static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1743 {
1744 	u16 data;
1745 
1746 	et131x_mii_read(adapter, MII_BMCR, &data);
1747 	data &= ~BMCR_PDOWN;
1748 	if (down)
1749 		data |= BMCR_PDOWN;
1750 	et131x_mii_write(adapter, MII_BMCR, data);
1751 }
1752 
1753 /**
1754  * et131x_xcvr_init - Init the phy if we are setting it into force mode
1755  * @adapter: pointer to our private adapter structure
1756  *
1757  */
et131x_xcvr_init(struct et131x_adapter * adapter)1758 static void et131x_xcvr_init(struct et131x_adapter *adapter)
1759 {
1760 	u16 imr;
1761 	u16 isr;
1762 	u16 lcr2;
1763 
1764 	et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
1765 	et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
1766 
1767 	/* Set the link status interrupt only.  Bad behavior when link status
1768 	 * and auto neg are set, we run into a nested interrupt problem
1769 	 */
1770 	imr |= (ET_PHY_INT_MASK_AUTONEGSTAT &
1771 		ET_PHY_INT_MASK_LINKSTAT &
1772 		ET_PHY_INT_MASK_ENABLE);
1773 
1774 	et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
1775 
1776 	/* Set the LED behavior such that LED 1 indicates speed (off =
1777 	 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1778 	 * link and activity (on for link, blink off for activity).
1779 	 *
1780 	 * NOTE: Some customizations have been added here for specific
1781 	 * vendors; The LED behavior is now determined by vendor data in the
1782 	 * EEPROM. However, the above description is the default.
1783 	 */
1784 	if ((adapter->eeprom_data[1] & 0x4) == 0) {
1785 		et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1786 
1787 		lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T);
1788 		lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1789 
1790 		if ((adapter->eeprom_data[1] & 0x8) == 0)
1791 			lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1792 		else
1793 			lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1794 
1795 		et131x_mii_write(adapter, PHY_LED_2, lcr2);
1796 	}
1797 }
1798 
1799 /**
1800  * et131x_configure_global_regs	-	configure JAGCore global regs
1801  * @adapter: pointer to our adapter structure
1802  *
1803  * Used to configure the global registers on the JAGCore
1804  */
et131x_configure_global_regs(struct et131x_adapter * adapter)1805 static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1806 {
1807 	struct global_regs __iomem *regs = &adapter->regs->global;
1808 
1809 	writel(0, &regs->rxq_start_addr);
1810 	writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1811 
1812 	if (adapter->registry_jumbo_packet < 2048) {
1813 		/* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1814 		 * block of RAM that the driver can split between Tx
1815 		 * and Rx as it desires.  Our default is to split it
1816 		 * 50/50:
1817 		 */
1818 		writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
1819 		writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
1820 	} else if (adapter->registry_jumbo_packet < 8192) {
1821 		/* For jumbo packets > 2k but < 8k, split 50-50. */
1822 		writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
1823 		writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
1824 	} else {
1825 		/* 9216 is the only packet size greater than 8k that
1826 		 * is available. The Tx buffer has to be big enough
1827 		 * for one whole packet on the Tx side. We'll make
1828 		 * the Tx 9408, and give the rest to Rx
1829 		 */
1830 		writel(0x01b3, &regs->rxq_end_addr);
1831 		writel(0x01b4, &regs->txq_start_addr);
1832 	}
1833 
1834 	/* Initialize the loopback register. Disable all loopbacks. */
1835 	writel(0, &regs->loopback);
1836 
1837 	/* MSI Register */
1838 	writel(0, &regs->msi_config);
1839 
1840 	/* By default, disable the watchdog timer.  It will be enabled when
1841 	 * a packet is queued.
1842 	 */
1843 	writel(0, &regs->watchdog_timer);
1844 }
1845 
1846 /**
1847  * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
1848  * @adapter: pointer to our adapter structure
1849  */
et131x_config_rx_dma_regs(struct et131x_adapter * adapter)1850 static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1851 {
1852 	struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1853 	struct rx_ring *rx_local = &adapter->rx_ring;
1854 	struct fbr_desc *fbr_entry;
1855 	u32 entry;
1856 	u32 psr_num_des;
1857 	unsigned long flags;
1858 
1859 	/* Halt RXDMA to perform the reconfigure.  */
1860 	et131x_rx_dma_disable(adapter);
1861 
1862 	/* Load the completion writeback physical address
1863 	 *
1864 	 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
1865 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
1866 	 * are ever returned, make sure the high part is retrieved here
1867 	 * before storing the adjusted address.
1868 	 */
1869 	writel((u32) ((u64)rx_local->rx_status_bus >> 32),
1870 	       &rx_dma->dma_wb_base_hi);
1871 	writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
1872 
1873 	memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1874 
1875 	/* Set the address and parameters of the packet status ring into the
1876 	 * 1310's registers
1877 	 */
1878 	writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
1879 	       &rx_dma->psr_base_hi);
1880 	writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
1881 	writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1882 	writel(0, &rx_dma->psr_full_offset);
1883 
1884 	psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
1885 	writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1886 	       &rx_dma->psr_min_des);
1887 
1888 	spin_lock_irqsave(&adapter->rcv_lock, flags);
1889 
1890 	/* These local variables track the PSR in the adapter structure */
1891 	rx_local->local_psr_full = 0;
1892 
1893 	/* Now's the best time to initialize FBR1 contents */
1894 	fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
1895 	for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
1896 		fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
1897 		fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
1898 		fbr_entry->word2 = entry;
1899 		fbr_entry++;
1900 	}
1901 
1902 	/* Set the address and parameters of Free buffer ring 1 (and 0 if
1903 	 * required) into the 1310's registers
1904 	 */
1905 	writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
1906 	       &rx_dma->fbr1_base_hi);
1907 	writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
1908 	writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
1909 	writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
1910 
1911 	/* This variable tracks the free buffer ring 1 full position, so it
1912 	 * has to match the above.
1913 	 */
1914 	rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
1915 	writel(
1916 	   ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1917 	   &rx_dma->fbr1_min_des);
1918 
1919 #ifdef USE_FBR0
1920 	/* Now's the best time to initialize FBR0 contents */
1921 	fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
1922 	for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
1923 		fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
1924 		fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
1925 		fbr_entry->word2 = entry;
1926 		fbr_entry++;
1927 	}
1928 
1929 	writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
1930 	       &rx_dma->fbr0_base_hi);
1931 	writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
1932 	writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
1933 	writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
1934 
1935 	/* This variable tracks the free buffer ring 0 full position, so it
1936 	 * has to match the above.
1937 	 */
1938 	rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
1939 	writel(
1940 	   ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1941 	   &rx_dma->fbr0_min_des);
1942 #endif
1943 
1944 	/* Program the number of packets we will receive before generating an
1945 	 * interrupt.
1946 	 * For version B silicon, this value gets updated once autoneg is
1947 	 *complete.
1948 	 */
1949 	writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1950 
1951 	/* The "time_done" is not working correctly to coalesce interrupts
1952 	 * after a given time period, but rather is giving us an interrupt
1953 	 * regardless of whether we have received packets.
1954 	 * This value gets updated once autoneg is complete.
1955 	 */
1956 	writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1957 
1958 	spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1959 }
1960 
1961 /**
1962  * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
1963  * @adapter: pointer to our private adapter structure
1964  *
1965  * Configure the transmit engine with the ring buffers we have created
1966  * and prepare it for use.
1967  */
et131x_config_tx_dma_regs(struct et131x_adapter * adapter)1968 static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1969 {
1970 	struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1971 
1972 	/* Load the hardware with the start of the transmit descriptor ring. */
1973 	writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
1974 	       &txdma->pr_base_hi);
1975 	writel((u32) adapter->tx_ring.tx_desc_ring_pa,
1976 	       &txdma->pr_base_lo);
1977 
1978 	/* Initialise the transmit DMA engine */
1979 	writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1980 
1981 	/* Load the completion writeback physical address */
1982 	writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
1983 						&txdma->dma_wb_base_hi);
1984 	writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
1985 
1986 	*adapter->tx_ring.tx_status = 0;
1987 
1988 	writel(0, &txdma->service_request);
1989 	adapter->tx_ring.send_idx = 0;
1990 }
1991 
1992 /**
1993  * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
1994  * @adapter: pointer to our private adapter structure
1995  *
1996  * Returns 0 on success, errno on failure (as defined in errno.h)
1997  */
et131x_adapter_setup(struct et131x_adapter * adapter)1998 static void et131x_adapter_setup(struct et131x_adapter *adapter)
1999 {
2000 	/* Configure the JAGCore */
2001 	et131x_configure_global_regs(adapter);
2002 
2003 	et1310_config_mac_regs1(adapter);
2004 
2005 	/* Configure the MMC registers */
2006 	/* All we need to do is initialize the Memory Control Register */
2007 	writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
2008 
2009 	et1310_config_rxmac_regs(adapter);
2010 	et1310_config_txmac_regs(adapter);
2011 
2012 	et131x_config_rx_dma_regs(adapter);
2013 	et131x_config_tx_dma_regs(adapter);
2014 
2015 	et1310_config_macstat_regs(adapter);
2016 
2017 	et1310_phy_power_down(adapter, 0);
2018 	et131x_xcvr_init(adapter);
2019 }
2020 
2021 /**
2022  * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
2023  * @adapter: pointer to our private adapter structure
2024  */
et131x_soft_reset(struct et131x_adapter * adapter)2025 static void et131x_soft_reset(struct et131x_adapter *adapter)
2026 {
2027 	/* Disable MAC Core */
2028 	writel(0xc00f0000, &adapter->regs->mac.cfg1);
2029 
2030 	/* Set everything to a reset value */
2031 	writel(0x7F, &adapter->regs->global.sw_reset);
2032 	writel(0x000f0000, &adapter->regs->mac.cfg1);
2033 	writel(0x00000000, &adapter->regs->mac.cfg1);
2034 }
2035 
2036 /**
2037  *	et131x_enable_interrupts	-	enable interrupt
2038  *	@adapter: et131x device
2039  *
2040  *	Enable the appropriate interrupts on the ET131x according to our
2041  *	configuration
2042  */
et131x_enable_interrupts(struct et131x_adapter * adapter)2043 static void et131x_enable_interrupts(struct et131x_adapter *adapter)
2044 {
2045 	u32 mask;
2046 
2047 	/* Enable all global interrupts */
2048 	if (adapter->flowcontrol == FLOW_TXONLY ||
2049 			    adapter->flowcontrol == FLOW_BOTH)
2050 		mask = INT_MASK_ENABLE;
2051 	else
2052 		mask = INT_MASK_ENABLE_NO_FLOW;
2053 
2054 	writel(mask, &adapter->regs->global.int_mask);
2055 }
2056 
2057 /**
2058  *	et131x_disable_interrupts	-	interrupt disable
2059  *	@adapter: et131x device
2060  *
2061  *	Block all interrupts from the et131x device at the device itself
2062  */
et131x_disable_interrupts(struct et131x_adapter * adapter)2063 static void et131x_disable_interrupts(struct et131x_adapter *adapter)
2064 {
2065 	/* Disable all global interrupts */
2066 	writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
2067 }
2068 
2069 /**
2070  * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
2071  * @adapter: pointer to our adapter structure
2072  */
et131x_tx_dma_disable(struct et131x_adapter * adapter)2073 static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
2074 {
2075 	/* Setup the tramsmit dma configuration register */
2076 	writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
2077 					&adapter->regs->txdma.csr);
2078 }
2079 
2080 /**
2081  * et131x_enable_txrx - Enable tx/rx queues
2082  * @netdev: device to be enabled
2083  */
et131x_enable_txrx(struct net_device * netdev)2084 static void et131x_enable_txrx(struct net_device *netdev)
2085 {
2086 	struct et131x_adapter *adapter = netdev_priv(netdev);
2087 
2088 	/* Enable the Tx and Rx DMA engines (if not already enabled) */
2089 	et131x_rx_dma_enable(adapter);
2090 	et131x_tx_dma_enable(adapter);
2091 
2092 	/* Enable device interrupts */
2093 	if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
2094 		et131x_enable_interrupts(adapter);
2095 
2096 	/* We're ready to move some data, so start the queue */
2097 	netif_start_queue(netdev);
2098 }
2099 
2100 /**
2101  * et131x_disable_txrx - Disable tx/rx queues
2102  * @netdev: device to be disabled
2103  */
et131x_disable_txrx(struct net_device * netdev)2104 static void et131x_disable_txrx(struct net_device *netdev)
2105 {
2106 	struct et131x_adapter *adapter = netdev_priv(netdev);
2107 
2108 	/* First thing is to stop the queue */
2109 	netif_stop_queue(netdev);
2110 
2111 	/* Stop the Tx and Rx DMA engines */
2112 	et131x_rx_dma_disable(adapter);
2113 	et131x_tx_dma_disable(adapter);
2114 
2115 	/* Disable device interrupts */
2116 	et131x_disable_interrupts(adapter);
2117 }
2118 
2119 /**
2120  * et131x_init_send - Initialize send data structures
2121  * @adapter: pointer to our private adapter structure
2122  */
et131x_init_send(struct et131x_adapter * adapter)2123 static void et131x_init_send(struct et131x_adapter *adapter)
2124 {
2125 	struct tcb *tcb;
2126 	u32 ct;
2127 	struct tx_ring *tx_ring;
2128 
2129 	/* Setup some convenience pointers */
2130 	tx_ring = &adapter->tx_ring;
2131 	tcb = adapter->tx_ring.tcb_ring;
2132 
2133 	tx_ring->tcb_qhead = tcb;
2134 
2135 	memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
2136 
2137 	/* Go through and set up each TCB */
2138 	for (ct = 0; ct++ < NUM_TCB; tcb++)
2139 		/* Set the link pointer in HW TCB to the next TCB in the
2140 		 * chain
2141 		 */
2142 		tcb->next = tcb + 1;
2143 
2144 	/* Set the  tail pointer */
2145 	tcb--;
2146 	tx_ring->tcb_qtail = tcb;
2147 	tcb->next = NULL;
2148 	/* Curr send queue should now be empty */
2149 	tx_ring->send_head = NULL;
2150 	tx_ring->send_tail = NULL;
2151 }
2152 
2153 /**
2154  * et1310_enable_phy_coma - called when network cable is unplugged
2155  * @adapter: pointer to our adapter structure
2156  *
2157  * driver receive an phy status change interrupt while in D0 and check that
2158  * phy_status is down.
2159  *
2160  *          -- gate off JAGCore;
2161  *          -- set gigE PHY in Coma mode
2162  *          -- wake on phy_interrupt; Perform software reset JAGCore,
2163  *             re-initialize jagcore and gigE PHY
2164  *
2165  *      Add D0-ASPM-PhyLinkDown Support:
2166  *          -- while in D0, when there is a phy_interrupt indicating phy link
2167  *             down status, call the MPSetPhyComa routine to enter this active
2168  *             state power saving mode
2169  *          -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
2170  *       indicating linkup status, call the MPDisablePhyComa routine to
2171  *             restore JAGCore and gigE PHY
2172  */
et1310_enable_phy_coma(struct et131x_adapter * adapter)2173 static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2174 {
2175 	unsigned long flags;
2176 	u32 pmcsr;
2177 
2178 	pmcsr = readl(&adapter->regs->global.pm_csr);
2179 
2180 	/* Save the GbE PHY speed and duplex modes. Need to restore this
2181 	 * when cable is plugged back in
2182 	 */
2183 	/*
2184 	 * TODO - when PM is re-enabled, check if we need to
2185 	 * perform a similar task as this -
2186 	 * adapter->pdown_speed = adapter->ai_force_speed;
2187 	 * adapter->pdown_duplex = adapter->ai_force_duplex;
2188 	 */
2189 
2190 	/* Stop sending packets. */
2191 	spin_lock_irqsave(&adapter->send_hw_lock, flags);
2192 	adapter->flags |= fMP_ADAPTER_LOWER_POWER;
2193 	spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2194 
2195 	/* Wait for outstanding Receive packets */
2196 
2197 	et131x_disable_txrx(adapter->netdev);
2198 
2199 	/* Gate off JAGCore 3 clock domains */
2200 	pmcsr &= ~ET_PMCSR_INIT;
2201 	writel(pmcsr, &adapter->regs->global.pm_csr);
2202 
2203 	/* Program gigE PHY in to Coma mode */
2204 	pmcsr |= ET_PM_PHY_SW_COMA;
2205 	writel(pmcsr, &adapter->regs->global.pm_csr);
2206 }
2207 
2208 /**
2209  * et1310_disable_phy_coma - Disable the Phy Coma Mode
2210  * @adapter: pointer to our adapter structure
2211  */
et1310_disable_phy_coma(struct et131x_adapter * adapter)2212 static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2213 {
2214 	u32 pmcsr;
2215 
2216 	pmcsr = readl(&adapter->regs->global.pm_csr);
2217 
2218 	/* Disable phy_sw_coma register and re-enable JAGCore clocks */
2219 	pmcsr |= ET_PMCSR_INIT;
2220 	pmcsr &= ~ET_PM_PHY_SW_COMA;
2221 	writel(pmcsr, &adapter->regs->global.pm_csr);
2222 
2223 	/* Restore the GbE PHY speed and duplex modes;
2224 	 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
2225 	 */
2226 	/* TODO - when PM is re-enabled, check if we need to
2227 	 * perform a similar task as this -
2228 	 * adapter->ai_force_speed = adapter->pdown_speed;
2229 	 * adapter->ai_force_duplex = adapter->pdown_duplex;
2230 	 */
2231 
2232 	/* Re-initialize the send structures */
2233 	et131x_init_send(adapter);
2234 
2235 	/* Bring the device back to the state it was during init prior to
2236 	 * autonegotiation being complete.  This way, when we get the auto-neg
2237 	 * complete interrupt, we can complete init by calling ConfigMacREGS2.
2238 	 */
2239 	et131x_soft_reset(adapter);
2240 
2241 	/* setup et1310 as per the documentation ?? */
2242 	et131x_adapter_setup(adapter);
2243 
2244 	/* Allow Tx to restart */
2245 	adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
2246 
2247 	et131x_enable_txrx(adapter->netdev);
2248 }
2249 
bump_free_buff_ring(u32 * free_buff_ring,u32 limit)2250 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2251 {
2252 	u32 tmp_free_buff_ring = *free_buff_ring;
2253 	tmp_free_buff_ring++;
2254 	/* This works for all cases where limit < 1024. The 1023 case
2255 	   works because 1023++ is 1024 which means the if condition is not
2256 	   taken but the carry of the bit into the wrap bit toggles the wrap
2257 	   value correctly */
2258 	if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2259 		tmp_free_buff_ring &= ~ET_DMA10_MASK;
2260 		tmp_free_buff_ring ^= ET_DMA10_WRAP;
2261 	}
2262 	/* For the 1023 case */
2263 	tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
2264 	*free_buff_ring = tmp_free_buff_ring;
2265 	return tmp_free_buff_ring;
2266 }
2267 
2268 /**
2269  * et131x_align_allocated_memory - Align allocated memory on a given boundary
2270  * @adapter: pointer to our adapter structure
2271  * @phys_addr: pointer to Physical address
2272  * @offset: pointer to the offset variable
2273  * @mask: correct mask
2274  */
et131x_align_allocated_memory(struct et131x_adapter * adapter,u64 * phys_addr,u64 * offset,u64 mask)2275 static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
2276 					  u64 *phys_addr, u64 *offset,
2277 					  u64 mask)
2278 {
2279 	u64 new_addr = *phys_addr & ~mask;
2280 
2281 	*offset = 0;
2282 
2283 	if (new_addr != *phys_addr) {
2284 		/* Move to next aligned block */
2285 		new_addr += mask + 1;
2286 		/* Return offset for adjusting virt addr */
2287 		*offset = new_addr - *phys_addr;
2288 		/* Return new physical address */
2289 		*phys_addr = new_addr;
2290 	}
2291 }
2292 
2293 /**
2294  * et131x_rx_dma_memory_alloc
2295  * @adapter: pointer to our private adapter structure
2296  *
2297  * Returns 0 on success and errno on failure (as defined in errno.h)
2298  *
2299  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
2300  * and the Packet Status Ring.
2301  */
et131x_rx_dma_memory_alloc(struct et131x_adapter * adapter)2302 static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2303 {
2304 	u32 i, j;
2305 	u32 bufsize;
2306 	u32 pktstat_ringsize, fbr_chunksize;
2307 	struct rx_ring *rx_ring;
2308 
2309 	/* Setup some convenience pointers */
2310 	rx_ring = &adapter->rx_ring;
2311 
2312 	/* Alloc memory for the lookup table */
2313 #ifdef USE_FBR0
2314 	rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2315 #endif
2316 	rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2317 
2318 	/* The first thing we will do is configure the sizes of the buffer
2319 	 * rings. These will change based on jumbo packet support.  Larger
2320 	 * jumbo packets increases the size of each entry in FBR0, and the
2321 	 * number of entries in FBR0, while at the same time decreasing the
2322 	 * number of entries in FBR1.
2323 	 *
2324 	 * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
2325 	 * entries are huge in order to accommodate a "jumbo" frame, then it
2326 	 * will have less entries.  Conversely, FBR1 will now be relied upon
2327 	 * to carry more "normal" frames, thus it's entry size also increases
2328 	 * and the number of entries goes up too (since it now carries
2329 	 * "small" + "regular" packets.
2330 	 *
2331 	 * In this scheme, we try to maintain 512 entries between the two
2332 	 * rings. Also, FBR1 remains a constant size - when it's size doubles
2333 	 * the number of entries halves.  FBR0 increases in size, however.
2334 	 */
2335 
2336 	if (adapter->registry_jumbo_packet < 2048) {
2337 #ifdef USE_FBR0
2338 		rx_ring->fbr[1]->buffsize = 256;
2339 		rx_ring->fbr[1]->num_entries = 512;
2340 #endif
2341 		rx_ring->fbr[0]->buffsize = 2048;
2342 		rx_ring->fbr[0]->num_entries = 512;
2343 	} else if (adapter->registry_jumbo_packet < 4096) {
2344 #ifdef USE_FBR0
2345 		rx_ring->fbr[1]->buffsize = 512;
2346 		rx_ring->fbr[1]->num_entries = 1024;
2347 #endif
2348 		rx_ring->fbr[0]->buffsize = 4096;
2349 		rx_ring->fbr[0]->num_entries = 512;
2350 	} else {
2351 #ifdef USE_FBR0
2352 		rx_ring->fbr[1]->buffsize = 1024;
2353 		rx_ring->fbr[1]->num_entries = 768;
2354 #endif
2355 		rx_ring->fbr[0]->buffsize = 16384;
2356 		rx_ring->fbr[0]->num_entries = 128;
2357 	}
2358 
2359 #ifdef USE_FBR0
2360 	adapter->rx_ring.psr_num_entries =
2361 				adapter->rx_ring.fbr[1]->num_entries +
2362 				adapter->rx_ring.fbr[0]->num_entries;
2363 #else
2364 	adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
2365 #endif
2366 
2367 	/* Allocate an area of memory for Free Buffer Ring 1 */
2368 	bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2369 									0xfff;
2370 	rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2371 					bufsize,
2372 					&rx_ring->fbr[0]->ring_physaddr,
2373 					GFP_KERNEL);
2374 	if (!rx_ring->fbr[0]->ring_virtaddr) {
2375 		dev_err(&adapter->pdev->dev,
2376 			  "Cannot alloc memory for Free Buffer Ring 1\n");
2377 		return -ENOMEM;
2378 	}
2379 
2380 	/* Save physical address
2381 	 *
2382 	 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2383 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2384 	 * are ever returned, make sure the high part is retrieved here
2385 	 * before storing the adjusted address.
2386 	 */
2387 	rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
2388 
2389 	/* Align Free Buffer Ring 1 on a 4K boundary */
2390 	et131x_align_allocated_memory(adapter,
2391 				      &rx_ring->fbr[0]->real_physaddr,
2392 				      &rx_ring->fbr[0]->offset, 0x0FFF);
2393 
2394 	rx_ring->fbr[0]->ring_virtaddr =
2395 			(void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
2396 			rx_ring->fbr[0]->offset);
2397 
2398 #ifdef USE_FBR0
2399 	/* Allocate an area of memory for Free Buffer Ring 0 */
2400 	bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2401 									0xfff;
2402 	rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2403 						bufsize,
2404 						&rx_ring->fbr[1]->ring_physaddr,
2405 						GFP_KERNEL);
2406 	if (!rx_ring->fbr[1]->ring_virtaddr) {
2407 		dev_err(&adapter->pdev->dev,
2408 			  "Cannot alloc memory for Free Buffer Ring 0\n");
2409 		return -ENOMEM;
2410 	}
2411 
2412 	/* Save physical address
2413 	 *
2414 	 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2415 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2416 	 * are ever returned, make sure the high part is retrieved here before
2417 	 * storing the adjusted address.
2418 	 */
2419 	rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
2420 
2421 	/* Align Free Buffer Ring 0 on a 4K boundary */
2422 	et131x_align_allocated_memory(adapter,
2423 				      &rx_ring->fbr[1]->real_physaddr,
2424 				      &rx_ring->fbr[1]->offset, 0x0FFF);
2425 
2426 	rx_ring->fbr[1]->ring_virtaddr =
2427 			(void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
2428 			rx_ring->fbr[1]->offset);
2429 #endif
2430 	for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
2431 		u64 fbr1_tmp_physaddr;
2432 		u64 fbr1_offset;
2433 		u32 fbr1_align;
2434 
2435 		/* This code allocates an area of memory big enough for N
2436 		 * free buffers + (buffer_size - 1) so that the buffers can
2437 		 * be aligned on 4k boundaries.  If each buffer were aligned
2438 		 * to a buffer_size boundary, the effect would be to double
2439 		 * the size of FBR0.  By allocating N buffers at once, we
2440 		 * reduce this overhead.
2441 		 */
2442 		if (rx_ring->fbr[0]->buffsize > 4096)
2443 			fbr1_align = 4096;
2444 		else
2445 			fbr1_align = rx_ring->fbr[0]->buffsize;
2446 
2447 		fbr_chunksize =
2448 		    (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
2449 		rx_ring->fbr[0]->mem_virtaddrs[i] =
2450 		    dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2451 				       &rx_ring->fbr[0]->mem_physaddrs[i],
2452 				       GFP_KERNEL);
2453 
2454 		if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
2455 			dev_err(&adapter->pdev->dev,
2456 				"Could not alloc memory\n");
2457 			return -ENOMEM;
2458 		}
2459 
2460 		/* See NOTE in "Save Physical Address" comment above */
2461 		fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
2462 
2463 		et131x_align_allocated_memory(adapter,
2464 					      &fbr1_tmp_physaddr,
2465 					      &fbr1_offset, (fbr1_align - 1));
2466 
2467 		for (j = 0; j < FBR_CHUNKS; j++) {
2468 			u32 index = (i * FBR_CHUNKS) + j;
2469 
2470 			/* Save the Virtual address of this index for quick
2471 			 * access later
2472 			 */
2473 			rx_ring->fbr[0]->virt[index] =
2474 			    (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
2475 			    (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
2476 
2477 			/* now store the physical address in the descriptor
2478 			 * so the device can access it
2479 			 */
2480 			rx_ring->fbr[0]->bus_high[index] =
2481 			    (u32) (fbr1_tmp_physaddr >> 32);
2482 			rx_ring->fbr[0]->bus_low[index] =
2483 			    (u32) fbr1_tmp_physaddr;
2484 
2485 			fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
2486 
2487 			rx_ring->fbr[0]->buffer1[index] =
2488 			    rx_ring->fbr[0]->virt[index];
2489 			rx_ring->fbr[0]->buffer2[index] =
2490 			    rx_ring->fbr[0]->virt[index] - 4;
2491 		}
2492 	}
2493 
2494 #ifdef USE_FBR0
2495 	/* Same for FBR0 (if in use) */
2496 	for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
2497 		u64 fbr0_tmp_physaddr;
2498 		u64 fbr0_offset;
2499 
2500 		fbr_chunksize =
2501 		    ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
2502 		rx_ring->fbr[1]->mem_virtaddrs[i] =
2503 		    dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2504 				       &rx_ring->fbr[1]->mem_physaddrs[i],
2505 				       GFP_KERNEL);
2506 
2507 		if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
2508 			dev_err(&adapter->pdev->dev,
2509 				"Could not alloc memory\n");
2510 			return -ENOMEM;
2511 		}
2512 
2513 		/* See NOTE in "Save Physical Address" comment above */
2514 		fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
2515 
2516 		et131x_align_allocated_memory(adapter,
2517 					      &fbr0_tmp_physaddr,
2518 					      &fbr0_offset,
2519 					      rx_ring->fbr[1]->buffsize - 1);
2520 
2521 		for (j = 0; j < FBR_CHUNKS; j++) {
2522 			u32 index = (i * FBR_CHUNKS) + j;
2523 
2524 			rx_ring->fbr[1]->virt[index] =
2525 			    (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
2526 			    (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
2527 
2528 			rx_ring->fbr[1]->bus_high[index] =
2529 			    (u32) (fbr0_tmp_physaddr >> 32);
2530 			rx_ring->fbr[1]->bus_low[index] =
2531 			    (u32) fbr0_tmp_physaddr;
2532 
2533 			fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
2534 
2535 			rx_ring->fbr[1]->buffer1[index] =
2536 			    rx_ring->fbr[1]->virt[index];
2537 			rx_ring->fbr[1]->buffer2[index] =
2538 			    rx_ring->fbr[1]->virt[index] - 4;
2539 		}
2540 	}
2541 #endif
2542 
2543 	/* Allocate an area of memory for FIFO of Packet Status ring entries */
2544 	pktstat_ringsize =
2545 	    sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2546 
2547 	rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2548 						  pktstat_ringsize,
2549 						  &rx_ring->ps_ring_physaddr,
2550 						  GFP_KERNEL);
2551 
2552 	if (!rx_ring->ps_ring_virtaddr) {
2553 		dev_err(&adapter->pdev->dev,
2554 			  "Cannot alloc memory for Packet Status Ring\n");
2555 		return -ENOMEM;
2556 	}
2557 	printk(KERN_INFO "Packet Status Ring %lx\n",
2558 	    (unsigned long) rx_ring->ps_ring_physaddr);
2559 
2560 	/*
2561 	 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2562 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2563 	 * are ever returned, make sure the high part is retrieved here before
2564 	 * storing the adjusted address.
2565 	 */
2566 
2567 	/* Allocate an area of memory for writeback of status information */
2568 	rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2569 					    sizeof(struct rx_status_block),
2570 					    &rx_ring->rx_status_bus,
2571 					    GFP_KERNEL);
2572 	if (!rx_ring->rx_status_block) {
2573 		dev_err(&adapter->pdev->dev,
2574 			  "Cannot alloc memory for Status Block\n");
2575 		return -ENOMEM;
2576 	}
2577 	rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2578 	printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
2579 
2580 	/* Recv
2581 	 * kmem_cache_create initializes a lookaside list. After successful
2582 	 * creation, nonpaged fixed-size blocks can be allocated from and
2583 	 * freed to the lookaside list.
2584 	 * RFDs will be allocated from this pool.
2585 	 */
2586 	rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
2587 						   sizeof(struct rfd),
2588 						   0,
2589 						   SLAB_CACHE_DMA |
2590 						   SLAB_HWCACHE_ALIGN,
2591 						   NULL);
2592 
2593 	adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
2594 
2595 	/* The RFDs are going to be put on lists later on, so initialize the
2596 	 * lists now.
2597 	 */
2598 	INIT_LIST_HEAD(&rx_ring->recv_list);
2599 	return 0;
2600 }
2601 
2602 /**
2603  * et131x_rx_dma_memory_free - Free all memory allocated within this module.
2604  * @adapter: pointer to our private adapter structure
2605  */
et131x_rx_dma_memory_free(struct et131x_adapter * adapter)2606 static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2607 {
2608 	u32 index;
2609 	u32 bufsize;
2610 	u32 pktstat_ringsize;
2611 	struct rfd *rfd;
2612 	struct rx_ring *rx_ring;
2613 
2614 	/* Setup some convenience pointers */
2615 	rx_ring = &adapter->rx_ring;
2616 
2617 	/* Free RFDs and associated packet descriptors */
2618 	WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2619 
2620 	while (!list_empty(&rx_ring->recv_list)) {
2621 		rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2622 				struct rfd, list_node);
2623 
2624 		list_del(&rfd->list_node);
2625 		rfd->skb = NULL;
2626 		kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
2627 	}
2628 
2629 	/* Free Free Buffer Ring 1 */
2630 	if (rx_ring->fbr[0]->ring_virtaddr) {
2631 		/* First the packet memory */
2632 		for (index = 0; index <
2633 		     (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
2634 			if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
2635 				u32 fbr1_align;
2636 
2637 				if (rx_ring->fbr[0]->buffsize > 4096)
2638 					fbr1_align = 4096;
2639 				else
2640 					fbr1_align = rx_ring->fbr[0]->buffsize;
2641 
2642 				bufsize =
2643 				    (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
2644 				    fbr1_align - 1;
2645 
2646 				dma_free_coherent(&adapter->pdev->dev,
2647 					bufsize,
2648 					rx_ring->fbr[0]->mem_virtaddrs[index],
2649 					rx_ring->fbr[0]->mem_physaddrs[index]);
2650 
2651 				rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
2652 			}
2653 		}
2654 
2655 		/* Now the FIFO itself */
2656 		rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
2657 		    rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
2658 
2659 		bufsize =
2660 		    (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2661 									0xfff;
2662 
2663 		dma_free_coherent(&adapter->pdev->dev, bufsize,
2664 				    rx_ring->fbr[0]->ring_virtaddr,
2665 				    rx_ring->fbr[0]->ring_physaddr);
2666 
2667 		rx_ring->fbr[0]->ring_virtaddr = NULL;
2668 	}
2669 
2670 #ifdef USE_FBR0
2671 	/* Now the same for Free Buffer Ring 0 */
2672 	if (rx_ring->fbr[1]->ring_virtaddr) {
2673 		/* First the packet memory */
2674 		for (index = 0; index <
2675 		     (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
2676 			if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
2677 				bufsize =
2678 				    (rx_ring->fbr[1]->buffsize *
2679 				     (FBR_CHUNKS + 1)) - 1;
2680 
2681 				dma_free_coherent(&adapter->pdev->dev,
2682 					bufsize,
2683 					rx_ring->fbr[1]->mem_virtaddrs[index],
2684 					rx_ring->fbr[1]->mem_physaddrs[index]);
2685 
2686 				rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
2687 			}
2688 		}
2689 
2690 		/* Now the FIFO itself */
2691 		rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
2692 		    rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
2693 
2694 		bufsize =
2695 		    (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2696 									0xfff;
2697 
2698 		dma_free_coherent(&adapter->pdev->dev,
2699 				  bufsize,
2700 				  rx_ring->fbr[1]->ring_virtaddr,
2701 				  rx_ring->fbr[1]->ring_physaddr);
2702 
2703 		rx_ring->fbr[1]->ring_virtaddr = NULL;
2704 	}
2705 #endif
2706 
2707 	/* Free Packet Status Ring */
2708 	if (rx_ring->ps_ring_virtaddr) {
2709 		pktstat_ringsize =
2710 		    sizeof(struct pkt_stat_desc) *
2711 		    adapter->rx_ring.psr_num_entries;
2712 
2713 		dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2714 				    rx_ring->ps_ring_virtaddr,
2715 				    rx_ring->ps_ring_physaddr);
2716 
2717 		rx_ring->ps_ring_virtaddr = NULL;
2718 	}
2719 
2720 	/* Free area of memory for the writeback of status information */
2721 	if (rx_ring->rx_status_block) {
2722 		dma_free_coherent(&adapter->pdev->dev,
2723 			sizeof(struct rx_status_block),
2724 			rx_ring->rx_status_block, rx_ring->rx_status_bus);
2725 		rx_ring->rx_status_block = NULL;
2726 	}
2727 
2728 	/* Destroy the lookaside (RFD) pool */
2729 	if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
2730 		kmem_cache_destroy(rx_ring->recv_lookaside);
2731 		adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
2732 	}
2733 
2734 	/* Free the FBR Lookup Table */
2735 #ifdef USE_FBR0
2736 	kfree(rx_ring->fbr[1]);
2737 #endif
2738 
2739 	kfree(rx_ring->fbr[0]);
2740 
2741 	/* Reset Counters */
2742 	rx_ring->num_ready_recv = 0;
2743 }
2744 
2745 /**
2746  * et131x_init_recv - Initialize receive data structures.
2747  * @adapter: pointer to our private adapter structure
2748  *
2749  * Returns 0 on success and errno on failure (as defined in errno.h)
2750  */
et131x_init_recv(struct et131x_adapter * adapter)2751 static int et131x_init_recv(struct et131x_adapter *adapter)
2752 {
2753 	int status = -ENOMEM;
2754 	struct rfd *rfd = NULL;
2755 	u32 rfdct;
2756 	u32 numrfd = 0;
2757 	struct rx_ring *rx_ring;
2758 
2759 	/* Setup some convenience pointers */
2760 	rx_ring = &adapter->rx_ring;
2761 
2762 	/* Setup each RFD */
2763 	for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2764 		rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
2765 						     GFP_ATOMIC | GFP_DMA);
2766 
2767 		if (!rfd) {
2768 			dev_err(&adapter->pdev->dev,
2769 				  "Couldn't alloc RFD out of kmem_cache\n");
2770 			status = -ENOMEM;
2771 			continue;
2772 		}
2773 
2774 		rfd->skb = NULL;
2775 
2776 		/* Add this RFD to the recv_list */
2777 		list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2778 
2779 		/* Increment both the available RFD's, and the total RFD's. */
2780 		rx_ring->num_ready_recv++;
2781 		numrfd++;
2782 	}
2783 
2784 	if (numrfd > NIC_MIN_NUM_RFD)
2785 		status = 0;
2786 
2787 	rx_ring->num_rfd = numrfd;
2788 
2789 	if (status != 0) {
2790 		kmem_cache_free(rx_ring->recv_lookaside, rfd);
2791 		dev_err(&adapter->pdev->dev,
2792 			  "Allocation problems in et131x_init_recv\n");
2793 	}
2794 	return status;
2795 }
2796 
2797 /**
2798  * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
2799  * @adapter: pointer to our adapter structure
2800  */
et131x_set_rx_dma_timer(struct et131x_adapter * adapter)2801 static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2802 {
2803 	struct phy_device *phydev = adapter->phydev;
2804 
2805 	if (!phydev)
2806 		return;
2807 
2808 	/* For version B silicon, we do not use the RxDMA timer for 10 and 100
2809 	 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2810 	 */
2811 	if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2812 		writel(0, &adapter->regs->rxdma.max_pkt_time);
2813 		writel(1, &adapter->regs->rxdma.num_pkt_done);
2814 	}
2815 }
2816 
2817 /**
2818  * NICReturnRFD - Recycle a RFD and put it back onto the receive list
2819  * @adapter: pointer to our adapter
2820  * @rfd: pointer to the RFD
2821  */
nic_return_rfd(struct et131x_adapter * adapter,struct rfd * rfd)2822 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2823 {
2824 	struct rx_ring *rx_local = &adapter->rx_ring;
2825 	struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2826 	u16 buff_index = rfd->bufferindex;
2827 	u8 ring_index = rfd->ringindex;
2828 	unsigned long flags;
2829 
2830 	/* We don't use any of the OOB data besides status. Otherwise, we
2831 	 * need to clean up OOB data
2832 	 */
2833 	if (
2834 #ifdef USE_FBR0
2835 	    (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
2836 #endif
2837 	    (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
2838 		spin_lock_irqsave(&adapter->fbr_lock, flags);
2839 
2840 		if (ring_index == 1) {
2841 			struct fbr_desc *next = (struct fbr_desc *)
2842 					(rx_local->fbr[0]->ring_virtaddr) +
2843 					INDEX10(rx_local->fbr[0]->local_full);
2844 
2845 			/* Handle the Free Buffer Ring advancement here. Write
2846 			 * the PA / Buffer Index for the returned buffer into
2847 			 * the oldest (next to be freed)FBR entry
2848 			 */
2849 			next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
2850 			next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
2851 			next->word2 = buff_index;
2852 
2853 			writel(bump_free_buff_ring(
2854 					&rx_local->fbr[0]->local_full,
2855 					rx_local->fbr[0]->num_entries - 1),
2856 					&rx_dma->fbr1_full_offset);
2857 		}
2858 #ifdef USE_FBR0
2859 		else {
2860 			struct fbr_desc *next = (struct fbr_desc *)
2861 				rx_local->fbr[1]->ring_virtaddr +
2862 				    INDEX10(rx_local->fbr[1]->local_full);
2863 
2864 			/* Handle the Free Buffer Ring advancement here. Write
2865 			 * the PA / Buffer Index for the returned buffer into
2866 			 * the oldest (next to be freed) FBR entry
2867 			 */
2868 			next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
2869 			next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
2870 			next->word2 = buff_index;
2871 
2872 			writel(bump_free_buff_ring(
2873 					&rx_local->fbr[1]->local_full,
2874 					rx_local->fbr[1]->num_entries - 1),
2875 			       &rx_dma->fbr0_full_offset);
2876 		}
2877 #endif
2878 		spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2879 	} else {
2880 		dev_err(&adapter->pdev->dev,
2881 			  "%s illegal Buffer Index returned\n", __func__);
2882 	}
2883 
2884 	/* The processing on this RFD is done, so put it back on the tail of
2885 	 * our list
2886 	 */
2887 	spin_lock_irqsave(&adapter->rcv_lock, flags);
2888 	list_add_tail(&rfd->list_node, &rx_local->recv_list);
2889 	rx_local->num_ready_recv++;
2890 	spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2891 
2892 	WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2893 }
2894 
2895 /**
2896  * nic_rx_pkts - Checks the hardware for available packets
2897  * @adapter: pointer to our adapter
2898  *
2899  * Returns rfd, a pointer to our MPRFD.
2900  *
2901  * Checks the hardware for available packets, using completion ring
2902  * If packets are available, it gets an RFD from the recv_list, attaches
2903  * the packet to it, puts the RFD in the RecvPendList, and also returns
2904  * the pointer to the RFD.
2905  */
nic_rx_pkts(struct et131x_adapter * adapter)2906 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2907 {
2908 	struct rx_ring *rx_local = &adapter->rx_ring;
2909 	struct rx_status_block *status;
2910 	struct pkt_stat_desc *psr;
2911 	struct rfd *rfd;
2912 	u32 i;
2913 	u8 *buf;
2914 	unsigned long flags;
2915 	struct list_head *element;
2916 	u8 ring_index;
2917 	u16 buff_index;
2918 	u32 len;
2919 	u32 word0;
2920 	u32 word1;
2921 
2922 	/* RX Status block is written by the DMA engine prior to every
2923 	 * interrupt. It contains the next to be used entry in the Packet
2924 	 * Status Ring, and also the two Free Buffer rings.
2925 	 */
2926 	status = rx_local->rx_status_block;
2927 	word1 = status->word1 >> 16;	/* Get the useful bits */
2928 
2929 	/* Check the PSR and wrap bits do not match */
2930 	if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2931 		/* Looks like this ring is not updated yet */
2932 		return NULL;
2933 
2934 	/* The packet status ring indicates that data is available. */
2935 	psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2936 			(rx_local->local_psr_full & 0xFFF);
2937 
2938 	/* Grab any information that is required once the PSR is
2939 	 * advanced, since we can no longer rely on the memory being
2940 	 * accurate
2941 	 */
2942 	len = psr->word1 & 0xFFFF;
2943 	ring_index = (psr->word1 >> 26) & 0x03;
2944 	buff_index = (psr->word1 >> 16) & 0x3FF;
2945 	word0 = psr->word0;
2946 
2947 	/* Indicate that we have used this PSR entry. */
2948 	/* FIXME wrap 12 */
2949 	add_12bit(&rx_local->local_psr_full, 1);
2950 	if (
2951 	  (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2952 		/* Clear psr full and toggle the wrap bit */
2953 		rx_local->local_psr_full &=  ~0xFFF;
2954 		rx_local->local_psr_full ^= 0x1000;
2955 	}
2956 
2957 	writel(rx_local->local_psr_full,
2958 	       &adapter->regs->rxdma.psr_full_offset);
2959 
2960 #ifndef USE_FBR0
2961 	if (ring_index != 1)
2962 		return NULL;
2963 #endif
2964 
2965 #ifdef USE_FBR0
2966 	if (ring_index > 1 ||
2967 		(ring_index == 0 &&
2968 		buff_index > rx_local->fbr[1]->num_entries - 1) ||
2969 		(ring_index == 1 &&
2970 		buff_index > rx_local->fbr[0]->num_entries - 1))
2971 #else
2972 	if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1)
2973 #endif
2974 	{
2975 		/* Illegal buffer or ring index cannot be used by S/W*/
2976 		dev_err(&adapter->pdev->dev,
2977 			  "NICRxPkts PSR Entry %d indicates "
2978 			  "length of %d and/or bad bi(%d)\n",
2979 			  rx_local->local_psr_full & 0xFFF,
2980 			  len, buff_index);
2981 		return NULL;
2982 	}
2983 
2984 	/* Get and fill the RFD. */
2985 	spin_lock_irqsave(&adapter->rcv_lock, flags);
2986 
2987 	rfd = NULL;
2988 	element = rx_local->recv_list.next;
2989 	rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2990 
2991 	if (rfd == NULL) {
2992 		spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2993 		return NULL;
2994 	}
2995 
2996 	list_del(&rfd->list_node);
2997 	rx_local->num_ready_recv--;
2998 
2999 	spin_unlock_irqrestore(&adapter->rcv_lock, flags);
3000 
3001 	rfd->bufferindex = buff_index;
3002 	rfd->ringindex = ring_index;
3003 
3004 	/* In V1 silicon, there is a bug which screws up filtering of
3005 	 * runt packets.  Therefore runt packet filtering is disabled
3006 	 * in the MAC and the packets are dropped here.  They are
3007 	 * also counted here.
3008 	 */
3009 	if (len < (NIC_MIN_PACKET_SIZE + 4)) {
3010 		adapter->stats.rx_other_errs++;
3011 		len = 0;
3012 	}
3013 
3014 	if (len) {
3015 		/* Determine if this is a multicast packet coming in */
3016 		if ((word0 & ALCATEL_MULTICAST_PKT) &&
3017 		    !(word0 & ALCATEL_BROADCAST_PKT)) {
3018 			/* Promiscuous mode and Multicast mode are
3019 			 * not mutually exclusive as was first
3020 			 * thought.  I guess Promiscuous is just
3021 			 * considered a super-set of the other
3022 			 * filters. Generally filter is 0x2b when in
3023 			 * promiscuous mode.
3024 			 */
3025 			if ((adapter->packet_filter &
3026 					ET131X_PACKET_TYPE_MULTICAST)
3027 			    && !(adapter->packet_filter &
3028 					ET131X_PACKET_TYPE_PROMISCUOUS)
3029 			    && !(adapter->packet_filter &
3030 					ET131X_PACKET_TYPE_ALL_MULTICAST)) {
3031 				/*
3032 				 * Note - ring_index for fbr[] array is reversed
3033 				 * 1 for FBR0 etc
3034 				 */
3035 				buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
3036 						virt[buff_index];
3037 
3038 				/* Loop through our list to see if the
3039 				 * destination address of this packet
3040 				 * matches one in our list.
3041 				 */
3042 				for (i = 0; i < adapter->multicast_addr_count;
3043 				     i++) {
3044 					if (buf[0] ==
3045 						adapter->multicast_list[i][0]
3046 					    && buf[1] ==
3047 						adapter->multicast_list[i][1]
3048 					    && buf[2] ==
3049 						adapter->multicast_list[i][2]
3050 					    && buf[3] ==
3051 						adapter->multicast_list[i][3]
3052 					    && buf[4] ==
3053 						adapter->multicast_list[i][4]
3054 					    && buf[5] ==
3055 						adapter->multicast_list[i][5]) {
3056 						break;
3057 					}
3058 				}
3059 
3060 				/* If our index is equal to the number
3061 				 * of Multicast address we have, then
3062 				 * this means we did not find this
3063 				 * packet's matching address in our
3064 				 * list.  Set the len to zero,
3065 				 * so we free our RFD when we return
3066 				 * from this function.
3067 				 */
3068 				if (i == adapter->multicast_addr_count)
3069 					len = 0;
3070 			}
3071 
3072 			if (len > 0)
3073 				adapter->stats.multicast_pkts_rcvd++;
3074 		} else if (word0 & ALCATEL_BROADCAST_PKT)
3075 			adapter->stats.broadcast_pkts_rcvd++;
3076 		else
3077 			/* Not sure what this counter measures in
3078 			 * promiscuous mode. Perhaps we should check
3079 			 * the MAC address to see if it is directed
3080 			 * to us in promiscuous mode.
3081 			 */
3082 			adapter->stats.unicast_pkts_rcvd++;
3083 	}
3084 
3085 	if (len > 0) {
3086 		struct sk_buff *skb = NULL;
3087 
3088 		/*rfd->len = len - 4; */
3089 		rfd->len = len;
3090 
3091 		skb = dev_alloc_skb(rfd->len + 2);
3092 		if (!skb) {
3093 			dev_err(&adapter->pdev->dev,
3094 				  "Couldn't alloc an SKB for Rx\n");
3095 			return NULL;
3096 		}
3097 
3098 		adapter->net_stats.rx_bytes += rfd->len;
3099 
3100 		/*
3101 		 * Note - ring_index for fbr[] array is reversed,
3102 		 * 1 for FBR0 etc
3103 		 */
3104 		memcpy(skb_put(skb, rfd->len),
3105 		    rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
3106 		    rfd->len);
3107 
3108 		skb->dev = adapter->netdev;
3109 		skb->protocol = eth_type_trans(skb, adapter->netdev);
3110 		skb->ip_summed = CHECKSUM_NONE;
3111 
3112 		netif_rx(skb);
3113 	} else {
3114 		rfd->len = 0;
3115 	}
3116 
3117 	nic_return_rfd(adapter, rfd);
3118 	return rfd;
3119 }
3120 
3121 /**
3122  * et131x_handle_recv_interrupt - Interrupt handler for receive processing
3123  * @adapter: pointer to our adapter
3124  *
3125  * Assumption, Rcv spinlock has been acquired.
3126  */
et131x_handle_recv_interrupt(struct et131x_adapter * adapter)3127 static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
3128 {
3129 	struct rfd *rfd = NULL;
3130 	u32 count = 0;
3131 	bool done = true;
3132 
3133 	/* Process up to available RFD's */
3134 	while (count < NUM_PACKETS_HANDLED) {
3135 		if (list_empty(&adapter->rx_ring.recv_list)) {
3136 			WARN_ON(adapter->rx_ring.num_ready_recv != 0);
3137 			done = false;
3138 			break;
3139 		}
3140 
3141 		rfd = nic_rx_pkts(adapter);
3142 
3143 		if (rfd == NULL)
3144 			break;
3145 
3146 		/* Do not receive any packets until a filter has been set.
3147 		 * Do not receive any packets until we have link.
3148 		 * If length is zero, return the RFD in order to advance the
3149 		 * Free buffer ring.
3150 		 */
3151 		if (!adapter->packet_filter ||
3152 		    !netif_carrier_ok(adapter->netdev) ||
3153 		    rfd->len == 0)
3154 			continue;
3155 
3156 		/* Increment the number of packets we received */
3157 		adapter->net_stats.rx_packets++;
3158 
3159 		/* Set the status on the packet, either resources or success */
3160 		if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
3161 			dev_warn(&adapter->pdev->dev,
3162 				    "RFD's are running out\n");
3163 		}
3164 		count++;
3165 	}
3166 
3167 	if (count == NUM_PACKETS_HANDLED || !done) {
3168 		adapter->rx_ring.unfinished_receives = true;
3169 		writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3170 		       &adapter->regs->global.watchdog_timer);
3171 	} else
3172 		/* Watchdog timer will disable itself if appropriate. */
3173 		adapter->rx_ring.unfinished_receives = false;
3174 }
3175 
3176 /**
3177  * et131x_tx_dma_memory_alloc
3178  * @adapter: pointer to our private adapter structure
3179  *
3180  * Returns 0 on success and errno on failure (as defined in errno.h).
3181  *
3182  * Allocates memory that will be visible both to the device and to the CPU.
3183  * The OS will pass us packets, pointers to which we will insert in the Tx
3184  * Descriptor queue. The device will read this queue to find the packets in
3185  * memory. The device will update the "status" in memory each time it xmits a
3186  * packet.
3187  */
et131x_tx_dma_memory_alloc(struct et131x_adapter * adapter)3188 static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
3189 {
3190 	int desc_size = 0;
3191 	struct tx_ring *tx_ring = &adapter->tx_ring;
3192 
3193 	/* Allocate memory for the TCB's (Transmit Control Block) */
3194 	adapter->tx_ring.tcb_ring =
3195 		kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
3196 	if (!adapter->tx_ring.tcb_ring) {
3197 		dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
3198 		return -ENOMEM;
3199 	}
3200 
3201 	/* Allocate enough memory for the Tx descriptor ring, and allocate
3202 	 * some extra so that the ring can be aligned on a 4k boundary.
3203 	 */
3204 	desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
3205 	tx_ring->tx_desc_ring =
3206 	    (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
3207 						  desc_size,
3208 						  &tx_ring->tx_desc_ring_pa,
3209 						  GFP_KERNEL);
3210 	if (!adapter->tx_ring.tx_desc_ring) {
3211 		dev_err(&adapter->pdev->dev,
3212 			"Cannot alloc memory for Tx Ring\n");
3213 		return -ENOMEM;
3214 	}
3215 
3216 	/* Save physical address
3217 	 *
3218 	 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
3219 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
3220 	 * are ever returned, make sure the high part is retrieved here before
3221 	 * storing the adjusted address.
3222 	 */
3223 	/* Allocate memory for the Tx status block */
3224 	tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
3225 						    sizeof(u32),
3226 						    &tx_ring->tx_status_pa,
3227 						    GFP_KERNEL);
3228 	if (!adapter->tx_ring.tx_status_pa) {
3229 		dev_err(&adapter->pdev->dev,
3230 				  "Cannot alloc memory for Tx status block\n");
3231 		return -ENOMEM;
3232 	}
3233 	return 0;
3234 }
3235 
3236 /**
3237  * et131x_tx_dma_memory_free - Free all memory allocated within this module
3238  * @adapter: pointer to our private adapter structure
3239  *
3240  * Returns 0 on success and errno on failure (as defined in errno.h).
3241  */
et131x_tx_dma_memory_free(struct et131x_adapter * adapter)3242 static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
3243 {
3244 	int desc_size = 0;
3245 
3246 	if (adapter->tx_ring.tx_desc_ring) {
3247 		/* Free memory relating to Tx rings here */
3248 		desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
3249 								+ 4096 - 1;
3250 		dma_free_coherent(&adapter->pdev->dev,
3251 				    desc_size,
3252 				    adapter->tx_ring.tx_desc_ring,
3253 				    adapter->tx_ring.tx_desc_ring_pa);
3254 		adapter->tx_ring.tx_desc_ring = NULL;
3255 	}
3256 
3257 	/* Free memory for the Tx status block */
3258 	if (adapter->tx_ring.tx_status) {
3259 		dma_free_coherent(&adapter->pdev->dev,
3260 				    sizeof(u32),
3261 				    adapter->tx_ring.tx_status,
3262 				    adapter->tx_ring.tx_status_pa);
3263 
3264 		adapter->tx_ring.tx_status = NULL;
3265 	}
3266 	/* Free the memory for the tcb structures */
3267 	kfree(adapter->tx_ring.tcb_ring);
3268 }
3269 
3270 /**
3271  * nic_send_packet - NIC specific send handler for version B silicon.
3272  * @adapter: pointer to our adapter
3273  * @tcb: pointer to struct tcb
3274  *
3275  * Returns 0 or errno.
3276  */
nic_send_packet(struct et131x_adapter * adapter,struct tcb * tcb)3277 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
3278 {
3279 	u32 i;
3280 	struct tx_desc desc[24];	/* 24 x 16 byte */
3281 	u32 frag = 0;
3282 	u32 thiscopy, remainder;
3283 	struct sk_buff *skb = tcb->skb;
3284 	u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
3285 	struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
3286 	unsigned long flags;
3287 	struct phy_device *phydev = adapter->phydev;
3288 
3289 	/* Part of the optimizations of this send routine restrict us to
3290 	 * sending 24 fragments at a pass.  In practice we should never see
3291 	 * more than 5 fragments.
3292 	 *
3293 	 * NOTE: The older version of this function (below) can handle any
3294 	 * number of fragments. If needed, we can call this function,
3295 	 * although it is less efficient.
3296 	 */
3297 	if (nr_frags > 23)
3298 		return -EIO;
3299 
3300 	memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
3301 
3302 	for (i = 0; i < nr_frags; i++) {
3303 		/* If there is something in this element, lets get a
3304 		 * descriptor from the ring and get the necessary data
3305 		 */
3306 		if (i == 0) {
3307 			/* If the fragments are smaller than a standard MTU,
3308 			 * then map them to a single descriptor in the Tx
3309 			 * Desc ring. However, if they're larger, as is
3310 			 * possible with support for jumbo packets, then
3311 			 * split them each across 2 descriptors.
3312 			 *
3313 			 * This will work until we determine why the hardware
3314 			 * doesn't seem to like large fragments.
3315 			 */
3316 			if ((skb->len - skb->data_len) <= 1514) {
3317 				desc[frag].addr_hi = 0;
3318 				/* Low 16bits are length, high is vlan and
3319 				   unused currently so zero */
3320 				desc[frag].len_vlan =
3321 					skb->len - skb->data_len;
3322 
3323 				/* NOTE: Here, the dma_addr_t returned from
3324 				 * dma_map_single() is implicitly cast as a
3325 				 * u32. Although dma_addr_t can be
3326 				 * 64-bit, the address returned by
3327 				 * dma_map_single() is always 32-bit
3328 				 * addressable (as defined by the pci/dma
3329 				 * subsystem)
3330 				 */
3331 				desc[frag++].addr_lo =
3332 				    dma_map_single(&adapter->pdev->dev,
3333 						   skb->data,
3334 						   skb->len -
3335 						   skb->data_len,
3336 						   DMA_TO_DEVICE);
3337 			} else {
3338 				desc[frag].addr_hi = 0;
3339 				desc[frag].len_vlan =
3340 				    (skb->len - skb->data_len) / 2;
3341 
3342 				/* NOTE: Here, the dma_addr_t returned from
3343 				 * dma_map_single() is implicitly cast as a
3344 				 * u32. Although dma_addr_t can be
3345 				 * 64-bit, the address returned by
3346 				 * dma_map_single() is always 32-bit
3347 				 * addressable (as defined by the pci/dma
3348 				 * subsystem)
3349 				 */
3350 				desc[frag++].addr_lo =
3351 				    dma_map_single(&adapter->pdev->dev,
3352 						   skb->data,
3353 						   ((skb->len -
3354 						     skb->data_len) / 2),
3355 						   DMA_TO_DEVICE);
3356 				desc[frag].addr_hi = 0;
3357 
3358 				desc[frag].len_vlan =
3359 				    (skb->len - skb->data_len) / 2;
3360 
3361 				/* NOTE: Here, the dma_addr_t returned from
3362 				 * dma_map_single() is implicitly cast as a
3363 				 * u32. Although dma_addr_t can be
3364 				 * 64-bit, the address returned by
3365 				 * dma_map_single() is always 32-bit
3366 				 * addressable (as defined by the pci/dma
3367 				 * subsystem)
3368 				 */
3369 				desc[frag++].addr_lo =
3370 				    dma_map_single(&adapter->pdev->dev,
3371 						   skb->data +
3372 						   ((skb->len -
3373 						     skb->data_len) / 2),
3374 						   ((skb->len -
3375 						     skb->data_len) / 2),
3376 						   DMA_TO_DEVICE);
3377 			}
3378 		} else {
3379 			desc[frag].addr_hi = 0;
3380 			desc[frag].len_vlan =
3381 					frags[i - 1].size;
3382 
3383 			/* NOTE: Here, the dma_addr_t returned from
3384 			 * dma_map_page() is implicitly cast as a u32.
3385 			 * Although dma_addr_t can be 64-bit, the address
3386 			 * returned by dma_map_page() is always 32-bit
3387 			 * addressable (as defined by the pci/dma subsystem)
3388 			 */
3389 			desc[frag++].addr_lo = skb_frag_dma_map(
3390 							&adapter->pdev->dev,
3391 							&frags[i - 1],
3392 							0,
3393 							frags[i - 1].size,
3394 							DMA_TO_DEVICE);
3395 		}
3396 	}
3397 
3398 	if (phydev && phydev->speed == SPEED_1000) {
3399 		if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3400 			/* Last element & Interrupt flag */
3401 			desc[frag - 1].flags = 0x5;
3402 			adapter->tx_ring.since_irq = 0;
3403 		} else { /* Last element */
3404 			desc[frag - 1].flags = 0x1;
3405 		}
3406 	} else
3407 		desc[frag - 1].flags = 0x5;
3408 
3409 	desc[0].flags |= 2;	/* First element flag */
3410 
3411 	tcb->index_start = adapter->tx_ring.send_idx;
3412 	tcb->stale = 0;
3413 
3414 	spin_lock_irqsave(&adapter->send_hw_lock, flags);
3415 
3416 	thiscopy = NUM_DESC_PER_RING_TX -
3417 				INDEX10(adapter->tx_ring.send_idx);
3418 
3419 	if (thiscopy >= frag) {
3420 		remainder = 0;
3421 		thiscopy = frag;
3422 	} else {
3423 		remainder = frag - thiscopy;
3424 	}
3425 
3426 	memcpy(adapter->tx_ring.tx_desc_ring +
3427 	       INDEX10(adapter->tx_ring.send_idx), desc,
3428 	       sizeof(struct tx_desc) * thiscopy);
3429 
3430 	add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3431 
3432 	if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3433 		  INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3434 		adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3435 		adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3436 	}
3437 
3438 	if (remainder) {
3439 		memcpy(adapter->tx_ring.tx_desc_ring,
3440 		       desc + thiscopy,
3441 		       sizeof(struct tx_desc) * remainder);
3442 
3443 		add_10bit(&adapter->tx_ring.send_idx, remainder);
3444 	}
3445 
3446 	if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3447 		if (adapter->tx_ring.send_idx)
3448 			tcb->index = NUM_DESC_PER_RING_TX - 1;
3449 		else
3450 			tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3451 	} else
3452 		tcb->index = adapter->tx_ring.send_idx - 1;
3453 
3454 	spin_lock(&adapter->tcb_send_qlock);
3455 
3456 	if (adapter->tx_ring.send_tail)
3457 		adapter->tx_ring.send_tail->next = tcb;
3458 	else
3459 		adapter->tx_ring.send_head = tcb;
3460 
3461 	adapter->tx_ring.send_tail = tcb;
3462 
3463 	WARN_ON(tcb->next != NULL);
3464 
3465 	adapter->tx_ring.used++;
3466 
3467 	spin_unlock(&adapter->tcb_send_qlock);
3468 
3469 	/* Write the new write pointer back to the device. */
3470 	writel(adapter->tx_ring.send_idx,
3471 	       &adapter->regs->txdma.service_request);
3472 
3473 	/* For Gig only, we use Tx Interrupt coalescing.  Enable the software
3474 	 * timer to wake us up if this packet isn't followed by N more.
3475 	 */
3476 	if (phydev && phydev->speed == SPEED_1000) {
3477 		writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3478 		       &adapter->regs->global.watchdog_timer);
3479 	}
3480 	spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3481 
3482 	return 0;
3483 }
3484 
3485 /**
3486  * send_packet - Do the work to send a packet
3487  * @skb: the packet(s) to send
3488  * @adapter: a pointer to the device's private adapter structure
3489  *
3490  * Return 0 in almost all cases; non-zero value in extreme hard failure only.
3491  *
3492  * Assumption: Send spinlock has been acquired
3493  */
send_packet(struct sk_buff * skb,struct et131x_adapter * adapter)3494 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3495 {
3496 	int status;
3497 	struct tcb *tcb = NULL;
3498 	u16 *shbufva;
3499 	unsigned long flags;
3500 
3501 	/* All packets must have at least a MAC address and a protocol type */
3502 	if (skb->len < ETH_HLEN)
3503 		return -EIO;
3504 
3505 	/* Get a TCB for this packet */
3506 	spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3507 
3508 	tcb = adapter->tx_ring.tcb_qhead;
3509 
3510 	if (tcb == NULL) {
3511 		spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3512 		return -ENOMEM;
3513 	}
3514 
3515 	adapter->tx_ring.tcb_qhead = tcb->next;
3516 
3517 	if (adapter->tx_ring.tcb_qhead == NULL)
3518 		adapter->tx_ring.tcb_qtail = NULL;
3519 
3520 	spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3521 
3522 	tcb->skb = skb;
3523 
3524 	if (skb->data != NULL && skb->len - skb->data_len >= 6) {
3525 		shbufva = (u16 *) skb->data;
3526 
3527 		if ((shbufva[0] == 0xffff) &&
3528 		    (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3529 			tcb->flags |= fMP_DEST_BROAD;
3530 		} else if ((shbufva[0] & 0x3) == 0x0001) {
3531 			tcb->flags |=  fMP_DEST_MULTI;
3532 		}
3533 	}
3534 
3535 	tcb->next = NULL;
3536 
3537 	/* Call the NIC specific send handler. */
3538 	status = nic_send_packet(adapter, tcb);
3539 
3540 	if (status != 0) {
3541 		spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3542 
3543 		if (adapter->tx_ring.tcb_qtail)
3544 			adapter->tx_ring.tcb_qtail->next = tcb;
3545 		else
3546 			/* Apparently ready Q is empty. */
3547 			adapter->tx_ring.tcb_qhead = tcb;
3548 
3549 		adapter->tx_ring.tcb_qtail = tcb;
3550 		spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3551 		return status;
3552 	}
3553 	WARN_ON(adapter->tx_ring.used > NUM_TCB);
3554 	return 0;
3555 }
3556 
3557 /**
3558  * et131x_send_packets - This function is called by the OS to send packets
3559  * @skb: the packet(s) to send
3560  * @netdev:device on which to TX the above packet(s)
3561  *
3562  * Return 0 in almost all cases; non-zero value in extreme hard failure only
3563  */
et131x_send_packets(struct sk_buff * skb,struct net_device * netdev)3564 static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3565 {
3566 	int status = 0;
3567 	struct et131x_adapter *adapter = netdev_priv(netdev);
3568 
3569 	/* Send these packets
3570 	 *
3571 	 * NOTE: The Linux Tx entry point is only given one packet at a time
3572 	 * to Tx, so the PacketCount and it's array used makes no sense here
3573 	 */
3574 
3575 	/* TCB is not available */
3576 	if (adapter->tx_ring.used >= NUM_TCB) {
3577 		/* NOTE: If there's an error on send, no need to queue the
3578 		 * packet under Linux; if we just send an error up to the
3579 		 * netif layer, it will resend the skb to us.
3580 		 */
3581 		status = -ENOMEM;
3582 	} else {
3583 		/* We need to see if the link is up; if it's not, make the
3584 		 * netif layer think we're good and drop the packet
3585 		 */
3586 		if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3587 					!netif_carrier_ok(netdev)) {
3588 			dev_kfree_skb_any(skb);
3589 			skb = NULL;
3590 
3591 			adapter->net_stats.tx_dropped++;
3592 		} else {
3593 			status = send_packet(skb, adapter);
3594 			if (status != 0 && status != -ENOMEM) {
3595 				/* On any other error, make netif think we're
3596 				 * OK and drop the packet
3597 				 */
3598 				dev_kfree_skb_any(skb);
3599 				skb = NULL;
3600 				adapter->net_stats.tx_dropped++;
3601 			}
3602 		}
3603 	}
3604 	return status;
3605 }
3606 
3607 /**
3608  * free_send_packet - Recycle a struct tcb
3609  * @adapter: pointer to our adapter
3610  * @tcb: pointer to struct tcb
3611  *
3612  * Complete the packet if necessary
3613  * Assumption - Send spinlock has been acquired
3614  */
free_send_packet(struct et131x_adapter * adapter,struct tcb * tcb)3615 static inline void free_send_packet(struct et131x_adapter *adapter,
3616 						struct tcb *tcb)
3617 {
3618 	unsigned long flags;
3619 	struct tx_desc *desc = NULL;
3620 	struct net_device_stats *stats = &adapter->net_stats;
3621 
3622 	if (tcb->flags & fMP_DEST_BROAD)
3623 		atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3624 	else if (tcb->flags & fMP_DEST_MULTI)
3625 		atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3626 	else
3627 		atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3628 
3629 	if (tcb->skb) {
3630 		stats->tx_bytes += tcb->skb->len;
3631 
3632 		/* Iterate through the TX descriptors on the ring
3633 		 * corresponding to this packet and umap the fragments
3634 		 * they point to
3635 		 */
3636 		do {
3637 			desc = (struct tx_desc *)
3638 				    (adapter->tx_ring.tx_desc_ring +
3639 						INDEX10(tcb->index_start));
3640 
3641 			dma_unmap_single(&adapter->pdev->dev,
3642 					 desc->addr_lo,
3643 					 desc->len_vlan, DMA_TO_DEVICE);
3644 
3645 			add_10bit(&tcb->index_start, 1);
3646 			if (INDEX10(tcb->index_start) >=
3647 							NUM_DESC_PER_RING_TX) {
3648 				tcb->index_start &= ~ET_DMA10_MASK;
3649 				tcb->index_start ^= ET_DMA10_WRAP;
3650 			}
3651 		} while (desc != (adapter->tx_ring.tx_desc_ring +
3652 				INDEX10(tcb->index)));
3653 
3654 		dev_kfree_skb_any(tcb->skb);
3655 	}
3656 
3657 	memset(tcb, 0, sizeof(struct tcb));
3658 
3659 	/* Add the TCB to the Ready Q */
3660 	spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3661 
3662 	adapter->net_stats.tx_packets++;
3663 
3664 	if (adapter->tx_ring.tcb_qtail)
3665 		adapter->tx_ring.tcb_qtail->next = tcb;
3666 	else
3667 		/* Apparently ready Q is empty. */
3668 		adapter->tx_ring.tcb_qhead = tcb;
3669 
3670 	adapter->tx_ring.tcb_qtail = tcb;
3671 
3672 	spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3673 	WARN_ON(adapter->tx_ring.used < 0);
3674 }
3675 
3676 /**
3677  * et131x_free_busy_send_packets - Free and complete the stopped active sends
3678  * @adapter: pointer to our adapter
3679  *
3680  * Assumption - Send spinlock has been acquired
3681  */
et131x_free_busy_send_packets(struct et131x_adapter * adapter)3682 static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3683 {
3684 	struct tcb *tcb;
3685 	unsigned long flags;
3686 	u32 freed = 0;
3687 
3688 	/* Any packets being sent? Check the first TCB on the send list */
3689 	spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3690 
3691 	tcb = adapter->tx_ring.send_head;
3692 
3693 	while (tcb != NULL && freed < NUM_TCB) {
3694 		struct tcb *next = tcb->next;
3695 
3696 		adapter->tx_ring.send_head = next;
3697 
3698 		if (next == NULL)
3699 			adapter->tx_ring.send_tail = NULL;
3700 
3701 		adapter->tx_ring.used--;
3702 
3703 		spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3704 
3705 		freed++;
3706 		free_send_packet(adapter, tcb);
3707 
3708 		spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3709 
3710 		tcb = adapter->tx_ring.send_head;
3711 	}
3712 
3713 	WARN_ON(freed == NUM_TCB);
3714 
3715 	spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3716 
3717 	adapter->tx_ring.used = 0;
3718 }
3719 
3720 /**
3721  * et131x_handle_send_interrupt - Interrupt handler for sending processing
3722  * @adapter: pointer to our adapter
3723  *
3724  * Re-claim the send resources, complete sends and get more to send from
3725  * the send wait queue.
3726  *
3727  * Assumption - Send spinlock has been acquired
3728  */
et131x_handle_send_interrupt(struct et131x_adapter * adapter)3729 static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3730 {
3731 	unsigned long flags;
3732 	u32 serviced;
3733 	struct tcb *tcb;
3734 	u32 index;
3735 
3736 	serviced = readl(&adapter->regs->txdma.new_service_complete);
3737 	index = INDEX10(serviced);
3738 
3739 	/* Has the ring wrapped?  Process any descriptors that do not have
3740 	 * the same "wrap" indicator as the current completion indicator
3741 	 */
3742 	spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3743 
3744 	tcb = adapter->tx_ring.send_head;
3745 
3746 	while (tcb &&
3747 	       ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3748 	       index < INDEX10(tcb->index)) {
3749 		adapter->tx_ring.used--;
3750 		adapter->tx_ring.send_head = tcb->next;
3751 		if (tcb->next == NULL)
3752 			adapter->tx_ring.send_tail = NULL;
3753 
3754 		spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3755 		free_send_packet(adapter, tcb);
3756 		spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3757 
3758 		/* Goto the next packet */
3759 		tcb = adapter->tx_ring.send_head;
3760 	}
3761 	while (tcb &&
3762 	       !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3763 	       && index > (tcb->index & ET_DMA10_MASK)) {
3764 		adapter->tx_ring.used--;
3765 		adapter->tx_ring.send_head = tcb->next;
3766 		if (tcb->next == NULL)
3767 			adapter->tx_ring.send_tail = NULL;
3768 
3769 		spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3770 		free_send_packet(adapter, tcb);
3771 		spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3772 
3773 		/* Goto the next packet */
3774 		tcb = adapter->tx_ring.send_head;
3775 	}
3776 
3777 	/* Wake up the queue when we hit a low-water mark */
3778 	if (adapter->tx_ring.used <= NUM_TCB / 3)
3779 		netif_wake_queue(adapter->netdev);
3780 
3781 	spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3782 }
3783 
et131x_get_settings(struct net_device * netdev,struct ethtool_cmd * cmd)3784 static int et131x_get_settings(struct net_device *netdev,
3785 			       struct ethtool_cmd *cmd)
3786 {
3787 	struct et131x_adapter *adapter = netdev_priv(netdev);
3788 
3789 	return phy_ethtool_gset(adapter->phydev, cmd);
3790 }
3791 
et131x_set_settings(struct net_device * netdev,struct ethtool_cmd * cmd)3792 static int et131x_set_settings(struct net_device *netdev,
3793 			       struct ethtool_cmd *cmd)
3794 {
3795 	struct et131x_adapter *adapter = netdev_priv(netdev);
3796 
3797 	return phy_ethtool_sset(adapter->phydev, cmd);
3798 }
3799 
et131x_get_regs_len(struct net_device * netdev)3800 static int et131x_get_regs_len(struct net_device *netdev)
3801 {
3802 #define ET131X_REGS_LEN 256
3803 	return ET131X_REGS_LEN * sizeof(u32);
3804 }
3805 
et131x_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * regs_data)3806 static void et131x_get_regs(struct net_device *netdev,
3807 			    struct ethtool_regs *regs, void *regs_data)
3808 {
3809 	struct et131x_adapter *adapter = netdev_priv(netdev);
3810 	struct address_map __iomem *aregs = adapter->regs;
3811 	u32 *regs_buff = regs_data;
3812 	u32 num = 0;
3813 
3814 	memset(regs_data, 0, et131x_get_regs_len(netdev));
3815 
3816 	regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3817 			adapter->pdev->device;
3818 
3819 	/* PHY regs */
3820 	et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]);
3821 	et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]);
3822 	et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]);
3823 	et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]);
3824 	et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]);
3825 	et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]);
3826 	et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]);
3827 	/* Autoneg next page transmit reg */
3828 	et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]);
3829 	/* Link partner next page reg */
3830 	et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]);
3831 	et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]);
3832 	et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]);
3833 	et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]);
3834 	et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]);
3835 	et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]);
3836 	et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3837 			(u16 *)&regs_buff[num++]);
3838 	et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3839 			(u16 *)&regs_buff[num++]);
3840 	et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3841 			(u16 *)&regs_buff[num++]);
3842 	et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3843 			(u16 *)&regs_buff[num++]);
3844 	et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]);
3845 	et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]);
3846 	et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]);
3847 	et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3848 			(u16 *)&regs_buff[num++]);
3849 	et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]);
3850 	et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]);
3851 	et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]);
3852 
3853 	/* Global regs */
3854 	regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3855 	regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3856 	regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3857 	regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3858 	regs_buff[num++] = readl(&aregs->global.pm_csr);
3859 	regs_buff[num++] = adapter->stats.interrupt_status;
3860 	regs_buff[num++] = readl(&aregs->global.int_mask);
3861 	regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3862 	regs_buff[num++] = readl(&aregs->global.int_status_alias);
3863 	regs_buff[num++] = readl(&aregs->global.sw_reset);
3864 	regs_buff[num++] = readl(&aregs->global.slv_timer);
3865 	regs_buff[num++] = readl(&aregs->global.msi_config);
3866 	regs_buff[num++] = readl(&aregs->global.loopback);
3867 	regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3868 
3869 	/* TXDMA regs */
3870 	regs_buff[num++] = readl(&aregs->txdma.csr);
3871 	regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3872 	regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3873 	regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3874 	regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3875 	regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3876 	regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3877 	regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3878 	regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3879 	regs_buff[num++] = readl(&aregs->txdma.service_request);
3880 	regs_buff[num++] = readl(&aregs->txdma.service_complete);
3881 	regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3882 	regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3883 	regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3884 	regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3885 	regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3886 	regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3887 	regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3888 	regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3889 	regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3890 	regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3891 	regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3892 	regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3893 	regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3894 	regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3895 	regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3896 
3897 	/* RXDMA regs */
3898 	regs_buff[num++] = readl(&aregs->rxdma.csr);
3899 	regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3900 	regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3901 	regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3902 	regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3903 	regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3904 	regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3905 	regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3906 	regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3907 	regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3908 	regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3909 	regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3910 	regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3911 	regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3912 	regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3913 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3914 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3915 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3916 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3917 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3918 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3919 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3920 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3921 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3922 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3923 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3924 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3925 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3926 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3927 }
3928 
3929 #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */
et131x_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)3930 static void et131x_get_drvinfo(struct net_device *netdev,
3931 			       struct ethtool_drvinfo *info)
3932 {
3933 	struct et131x_adapter *adapter = netdev_priv(netdev);
3934 
3935 	strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
3936 	strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
3937 	strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3938 }
3939 
3940 static struct ethtool_ops et131x_ethtool_ops = {
3941 	.get_settings	= et131x_get_settings,
3942 	.set_settings	= et131x_set_settings,
3943 	.get_drvinfo	= et131x_get_drvinfo,
3944 	.get_regs_len	= et131x_get_regs_len,
3945 	.get_regs	= et131x_get_regs,
3946 	.get_link = ethtool_op_get_link,
3947 };
3948 
et131x_set_ethtool_ops(struct net_device * netdev)3949 static void et131x_set_ethtool_ops(struct net_device *netdev)
3950 {
3951 	SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
3952 }
3953 
3954 /**
3955  * et131x_hwaddr_init - set up the MAC Address on the ET1310
3956  * @adapter: pointer to our private adapter structure
3957  */
et131x_hwaddr_init(struct et131x_adapter * adapter)3958 static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3959 {
3960 	/* If have our default mac from init and no mac address from
3961 	 * EEPROM then we need to generate the last octet and set it on the
3962 	 * device
3963 	 */
3964 	if (adapter->rom_addr[0] == 0x00 &&
3965 	    adapter->rom_addr[1] == 0x00 &&
3966 	    adapter->rom_addr[2] == 0x00 &&
3967 	    adapter->rom_addr[3] == 0x00 &&
3968 	    adapter->rom_addr[4] == 0x00 &&
3969 	    adapter->rom_addr[5] == 0x00) {
3970 		/*
3971 		 * We need to randomly generate the last octet so we
3972 		 * decrease our chances of setting the mac address to
3973 		 * same as another one of our cards in the system
3974 		 */
3975 		get_random_bytes(&adapter->addr[5], 1);
3976 		/*
3977 		 * We have the default value in the register we are
3978 		 * working with so we need to copy the current
3979 		 * address into the permanent address
3980 		 */
3981 		memcpy(adapter->rom_addr,
3982 			adapter->addr, ETH_ALEN);
3983 	} else {
3984 		/* We do not have an override address, so set the
3985 		 * current address to the permanent address and add
3986 		 * it to the device
3987 		 */
3988 		memcpy(adapter->addr,
3989 		       adapter->rom_addr, ETH_ALEN);
3990 	}
3991 }
3992 
3993 /**
3994  * et131x_pci_init	 - initial PCI setup
3995  * @adapter: pointer to our private adapter structure
3996  * @pdev: our PCI device
3997  *
3998  * Perform the initial setup of PCI registers and if possible initialise
3999  * the MAC address. At this point the I/O registers have yet to be mapped
4000  */
et131x_pci_init(struct et131x_adapter * adapter,struct pci_dev * pdev)4001 static int et131x_pci_init(struct et131x_adapter *adapter,
4002 						struct pci_dev *pdev)
4003 {
4004 	int cap = pci_pcie_cap(pdev);
4005 	u16 max_payload;
4006 	u16 ctl;
4007 	int i, rc;
4008 
4009 	rc = et131x_init_eeprom(adapter);
4010 	if (rc < 0)
4011 		goto out;
4012 
4013 	if (!cap) {
4014 		dev_err(&pdev->dev, "Missing PCIe capabilities\n");
4015 		goto err_out;
4016 	}
4017 
4018 	/* Let's set up the PORT LOGIC Register.  First we need to know what
4019 	 * the max_payload_size is
4020 	 */
4021 	if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) {
4022 		dev_err(&pdev->dev,
4023 		    "Could not read PCI config space for Max Payload Size\n");
4024 		goto err_out;
4025 	}
4026 
4027 	/* Program the Ack/Nak latency and replay timers */
4028 	max_payload &= 0x07;
4029 
4030 	if (max_payload < 2) {
4031 		static const u16 acknak[2] = { 0x76, 0xD0 };
4032 		static const u16 replay[2] = { 0x1E0, 0x2ED };
4033 
4034 		if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
4035 					       acknak[max_payload])) {
4036 			dev_err(&pdev->dev,
4037 			  "Could not write PCI config space for ACK/NAK\n");
4038 			goto err_out;
4039 		}
4040 		if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
4041 					       replay[max_payload])) {
4042 			dev_err(&pdev->dev,
4043 			  "Could not write PCI config space for Replay Timer\n");
4044 			goto err_out;
4045 		}
4046 	}
4047 
4048 	/* l0s and l1 latency timers.  We are using default values.
4049 	 * Representing 001 for L0s and 010 for L1
4050 	 */
4051 	if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
4052 		dev_err(&pdev->dev,
4053 		  "Could not write PCI config space for Latency Timers\n");
4054 		goto err_out;
4055 	}
4056 
4057 	/* Change the max read size to 2k */
4058 	if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) {
4059 		dev_err(&pdev->dev,
4060 			"Could not read PCI config space for Max read size\n");
4061 		goto err_out;
4062 	}
4063 
4064 	ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | ( 0x04 << 12);
4065 
4066 	if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) {
4067 		dev_err(&pdev->dev,
4068 		      "Could not write PCI config space for Max read size\n");
4069 		goto err_out;
4070 	}
4071 
4072 	/* Get MAC address from config space if an eeprom exists, otherwise
4073 	 * the MAC address there will not be valid
4074 	 */
4075 	if (!adapter->has_eeprom) {
4076 		et131x_hwaddr_init(adapter);
4077 		return 0;
4078 	}
4079 
4080 	for (i = 0; i < ETH_ALEN; i++) {
4081 		if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
4082 					adapter->rom_addr + i)) {
4083 			dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
4084 			goto err_out;
4085 		}
4086 	}
4087 	memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
4088 out:
4089 	return rc;
4090 err_out:
4091 	rc = -EIO;
4092 	goto out;
4093 }
4094 
4095 /**
4096  * et131x_error_timer_handler
4097  * @data: timer-specific variable; here a pointer to our adapter structure
4098  *
4099  * The routine called when the error timer expires, to track the number of
4100  * recurring errors.
4101  */
et131x_error_timer_handler(unsigned long data)4102 static void et131x_error_timer_handler(unsigned long data)
4103 {
4104 	struct et131x_adapter *adapter = (struct et131x_adapter *) data;
4105 	struct phy_device *phydev = adapter->phydev;
4106 
4107 	if (et1310_in_phy_coma(adapter)) {
4108 		/* Bring the device immediately out of coma, to
4109 		 * prevent it from sleeping indefinitely, this
4110 		 * mechanism could be improved! */
4111 		et1310_disable_phy_coma(adapter);
4112 		adapter->boot_coma = 20;
4113 	} else {
4114 		et1310_update_macstat_host_counters(adapter);
4115 	}
4116 
4117 	if (!phydev->link && adapter->boot_coma < 11)
4118 		adapter->boot_coma++;
4119 
4120 	if (adapter->boot_coma == 10) {
4121 		if (!phydev->link) {
4122 			if (!et1310_in_phy_coma(adapter)) {
4123 				/* NOTE - This was originally a 'sync with
4124 				 *  interrupt'. How to do that under Linux?
4125 				 */
4126 				et131x_enable_interrupts(adapter);
4127 				et1310_enable_phy_coma(adapter);
4128 			}
4129 		}
4130 	}
4131 
4132 	/* This is a periodic timer, so reschedule */
4133 	mod_timer(&adapter->error_timer, jiffies +
4134 					  TX_ERROR_PERIOD * HZ / 1000);
4135 }
4136 
4137 /**
4138  * et131x_adapter_memory_alloc
4139  * @adapter: pointer to our private adapter structure
4140  *
4141  * Returns 0 on success, errno on failure (as defined in errno.h).
4142  *
4143  * Allocate all the memory blocks for send, receive and others.
4144  */
et131x_adapter_memory_alloc(struct et131x_adapter * adapter)4145 static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
4146 {
4147 	int status;
4148 
4149 	/* Allocate memory for the Tx Ring */
4150 	status = et131x_tx_dma_memory_alloc(adapter);
4151 	if (status != 0) {
4152 		dev_err(&adapter->pdev->dev,
4153 			  "et131x_tx_dma_memory_alloc FAILED\n");
4154 		return status;
4155 	}
4156 	/* Receive buffer memory allocation */
4157 	status = et131x_rx_dma_memory_alloc(adapter);
4158 	if (status != 0) {
4159 		dev_err(&adapter->pdev->dev,
4160 			  "et131x_rx_dma_memory_alloc FAILED\n");
4161 		et131x_tx_dma_memory_free(adapter);
4162 		return status;
4163 	}
4164 
4165 	/* Init receive data structures */
4166 	status = et131x_init_recv(adapter);
4167 	if (status != 0) {
4168 		dev_err(&adapter->pdev->dev,
4169 			"et131x_init_recv FAILED\n");
4170 		et131x_tx_dma_memory_free(adapter);
4171 		et131x_rx_dma_memory_free(adapter);
4172 	}
4173 	return status;
4174 }
4175 
4176 /**
4177  * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
4178  * @adapter: pointer to our private adapter structure
4179  */
et131x_adapter_memory_free(struct et131x_adapter * adapter)4180 static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
4181 {
4182 	/* Free DMA memory */
4183 	et131x_tx_dma_memory_free(adapter);
4184 	et131x_rx_dma_memory_free(adapter);
4185 }
4186 
et131x_adjust_link(struct net_device * netdev)4187 static void et131x_adjust_link(struct net_device *netdev)
4188 {
4189 	struct et131x_adapter *adapter = netdev_priv(netdev);
4190 	struct  phy_device *phydev = adapter->phydev;
4191 
4192 	if (netif_carrier_ok(netdev)) {
4193 		adapter->boot_coma = 20;
4194 
4195 		if (phydev && phydev->speed == SPEED_10) {
4196 			/*
4197 			 * NOTE - Is there a way to query this without
4198 			 * TruePHY?
4199 			 * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
4200 			 * EMI_TRUEPHY_A13O) {
4201 			 */
4202 			u16 register18;
4203 
4204 			et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4205 					 &register18);
4206 			et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4207 					 register18 | 0x4);
4208 			et131x_mii_write(adapter, PHY_INDEX_REG,
4209 					 register18 | 0x8402);
4210 			et131x_mii_write(adapter, PHY_DATA_REG,
4211 					 register18 | 511);
4212 			et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4213 					 register18);
4214 		}
4215 
4216 		et1310_config_flow_control(adapter);
4217 
4218 		if (phydev && phydev->speed == SPEED_1000 &&
4219 				adapter->registry_jumbo_packet > 2048) {
4220 			u16 reg;
4221 
4222 			et131x_mii_read(adapter, PHY_CONFIG, &reg);
4223 			reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
4224 			reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
4225 			et131x_mii_write(adapter, PHY_CONFIG, reg);
4226 		}
4227 
4228 		et131x_set_rx_dma_timer(adapter);
4229 		et1310_config_mac_regs2(adapter);
4230 	}
4231 
4232 	if (phydev && phydev->link != adapter->link) {
4233 		/*
4234 		 * Check to see if we are in coma mode and if
4235 		 * so, disable it because we will not be able
4236 		 * to read PHY values until we are out.
4237 		 */
4238 		if (et1310_in_phy_coma(adapter))
4239 			et1310_disable_phy_coma(adapter);
4240 
4241 		if (phydev->link) {
4242 			adapter->boot_coma = 20;
4243 		} else {
4244 			dev_warn(&adapter->pdev->dev,
4245 			    "Link down - cable problem ?\n");
4246 			adapter->boot_coma = 0;
4247 
4248 			if (phydev->speed == SPEED_10) {
4249 				/* NOTE - Is there a way to query this without
4250 				 * TruePHY?
4251 				 * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
4252 				 * EMI_TRUEPHY_A13O)
4253 				 */
4254 				u16 register18;
4255 
4256 				et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4257 						 &register18);
4258 				et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4259 						 register18 | 0x4);
4260 				et131x_mii_write(adapter, PHY_INDEX_REG,
4261 						 register18 | 0x8402);
4262 				et131x_mii_write(adapter, PHY_DATA_REG,
4263 						 register18 | 511);
4264 				et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4265 						 register18);
4266 			}
4267 
4268 			/* Free the packets being actively sent & stopped */
4269 			et131x_free_busy_send_packets(adapter);
4270 
4271 			/* Re-initialize the send structures */
4272 			et131x_init_send(adapter);
4273 
4274 			/*
4275 			 * Bring the device back to the state it was during
4276 			 * init prior to autonegotiation being complete. This
4277 			 * way, when we get the auto-neg complete interrupt,
4278 			 * we can complete init by calling config_mac_regs2.
4279 			 */
4280 			et131x_soft_reset(adapter);
4281 
4282 			/* Setup ET1310 as per the documentation */
4283 			et131x_adapter_setup(adapter);
4284 
4285 			/* perform reset of tx/rx */
4286 			et131x_disable_txrx(netdev);
4287 			et131x_enable_txrx(netdev);
4288 		}
4289 
4290 		adapter->link = phydev->link;
4291 
4292 		phy_print_status(phydev);
4293 	}
4294 }
4295 
et131x_mii_probe(struct net_device * netdev)4296 static int et131x_mii_probe(struct net_device *netdev)
4297 {
4298 	struct et131x_adapter *adapter = netdev_priv(netdev);
4299 	struct  phy_device *phydev = NULL;
4300 
4301 	phydev = phy_find_first(adapter->mii_bus);
4302 	if (!phydev) {
4303 		dev_err(&adapter->pdev->dev, "no PHY found\n");
4304 		return -ENODEV;
4305 	}
4306 
4307 	phydev = phy_connect(netdev, dev_name(&phydev->dev),
4308 			&et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
4309 
4310 	if (IS_ERR(phydev)) {
4311 		dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
4312 		return PTR_ERR(phydev);
4313 	}
4314 
4315 	phydev->supported &= (SUPPORTED_10baseT_Half
4316 				| SUPPORTED_10baseT_Full
4317 				| SUPPORTED_100baseT_Half
4318 				| SUPPORTED_100baseT_Full
4319 				| SUPPORTED_Autoneg
4320 				| SUPPORTED_MII
4321 				| SUPPORTED_TP);
4322 
4323 	if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
4324 		phydev->supported |= SUPPORTED_1000baseT_Full;
4325 
4326 	phydev->advertising = phydev->supported;
4327 	adapter->phydev = phydev;
4328 
4329 	dev_info(&adapter->pdev->dev, "attached PHY driver [%s] "
4330 		 "(mii_bus:phy_addr=%s)\n",
4331 		 phydev->drv->name, dev_name(&phydev->dev));
4332 
4333 	return 0;
4334 }
4335 
4336 /**
4337  * et131x_adapter_init
4338  * @adapter: pointer to the private adapter struct
4339  * @pdev: pointer to the PCI device
4340  *
4341  * Initialize the data structures for the et131x_adapter object and link
4342  * them together with the platform provided device structures.
4343  */
et131x_adapter_init(struct net_device * netdev,struct pci_dev * pdev)4344 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
4345 		struct pci_dev *pdev)
4346 {
4347 	static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
4348 
4349 	struct et131x_adapter *adapter;
4350 
4351 	/* Allocate private adapter struct and copy in relevant information */
4352 	adapter = netdev_priv(netdev);
4353 	adapter->pdev = pci_dev_get(pdev);
4354 	adapter->netdev = netdev;
4355 
4356 	/* Initialize spinlocks here */
4357 	spin_lock_init(&adapter->lock);
4358 	spin_lock_init(&adapter->tcb_send_qlock);
4359 	spin_lock_init(&adapter->tcb_ready_qlock);
4360 	spin_lock_init(&adapter->send_hw_lock);
4361 	spin_lock_init(&adapter->rcv_lock);
4362 	spin_lock_init(&adapter->rcv_pend_lock);
4363 	spin_lock_init(&adapter->fbr_lock);
4364 	spin_lock_init(&adapter->phy_lock);
4365 
4366 	adapter->registry_jumbo_packet = 1514;	/* 1514-9216 */
4367 
4368 	/* Set the MAC address to a default */
4369 	memcpy(adapter->addr, default_mac, ETH_ALEN);
4370 
4371 	return adapter;
4372 }
4373 
4374 /**
4375  * et131x_pci_remove
4376  * @pdev: a pointer to the device's pci_dev structure
4377  *
4378  * Registered in the pci_driver structure, this function is called when the
4379  * PCI subsystem detects that a PCI device which matches the information
4380  * contained in the pci_device_id table has been removed.
4381  */
et131x_pci_remove(struct pci_dev * pdev)4382 static void __devexit et131x_pci_remove(struct pci_dev *pdev)
4383 {
4384 	struct net_device *netdev = pci_get_drvdata(pdev);
4385 	struct et131x_adapter *adapter = netdev_priv(netdev);
4386 
4387 	unregister_netdev(netdev);
4388 	phy_disconnect(adapter->phydev);
4389 	mdiobus_unregister(adapter->mii_bus);
4390 	kfree(adapter->mii_bus->irq);
4391 	mdiobus_free(adapter->mii_bus);
4392 
4393 	et131x_adapter_memory_free(adapter);
4394 	iounmap(adapter->regs);
4395 	pci_dev_put(pdev);
4396 
4397 	free_netdev(netdev);
4398 	pci_release_regions(pdev);
4399 	pci_disable_device(pdev);
4400 }
4401 
4402 /**
4403  * et131x_up - Bring up a device for use.
4404  * @netdev: device to be opened
4405  */
et131x_up(struct net_device * netdev)4406 static void et131x_up(struct net_device *netdev)
4407 {
4408 	struct et131x_adapter *adapter = netdev_priv(netdev);
4409 
4410 	et131x_enable_txrx(netdev);
4411 	phy_start(adapter->phydev);
4412 }
4413 
4414 /**
4415  * et131x_down - Bring down the device
4416  * @netdev: device to be broght down
4417  */
et131x_down(struct net_device * netdev)4418 static void et131x_down(struct net_device *netdev)
4419 {
4420 	struct et131x_adapter *adapter = netdev_priv(netdev);
4421 
4422 	/* Save the timestamp for the TX watchdog, prevent a timeout */
4423 	netdev->trans_start = jiffies;
4424 
4425 	phy_stop(adapter->phydev);
4426 	et131x_disable_txrx(netdev);
4427 }
4428 
4429 #ifdef CONFIG_PM_SLEEP
et131x_suspend(struct device * dev)4430 static int et131x_suspend(struct device *dev)
4431 {
4432 	struct pci_dev *pdev = to_pci_dev(dev);
4433 	struct net_device *netdev = pci_get_drvdata(pdev);
4434 
4435 	if (netif_running(netdev)) {
4436 		netif_device_detach(netdev);
4437 		et131x_down(netdev);
4438 		pci_save_state(pdev);
4439 	}
4440 
4441 	return 0;
4442 }
4443 
et131x_resume(struct device * dev)4444 static int et131x_resume(struct device *dev)
4445 {
4446 	struct pci_dev *pdev = to_pci_dev(dev);
4447 	struct net_device *netdev = pci_get_drvdata(pdev);
4448 
4449 	if (netif_running(netdev)) {
4450 		pci_restore_state(pdev);
4451 		et131x_up(netdev);
4452 		netif_device_attach(netdev);
4453 	}
4454 
4455 	return 0;
4456 }
4457 
4458 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4459 #define ET131X_PM_OPS (&et131x_pm_ops)
4460 #else
4461 #define ET131X_PM_OPS NULL
4462 #endif
4463 
4464 /**
4465  * et131x_isr - The Interrupt Service Routine for the driver.
4466  * @irq: the IRQ on which the interrupt was received.
4467  * @dev_id: device-specific info (here a pointer to a net_device struct)
4468  *
4469  * Returns a value indicating if the interrupt was handled.
4470  */
et131x_isr(int irq,void * dev_id)4471 irqreturn_t et131x_isr(int irq, void *dev_id)
4472 {
4473 	bool handled = true;
4474 	struct net_device *netdev = (struct net_device *)dev_id;
4475 	struct et131x_adapter *adapter = NULL;
4476 	u32 status;
4477 
4478 	if (!netif_device_present(netdev)) {
4479 		handled = false;
4480 		goto out;
4481 	}
4482 
4483 	adapter = netdev_priv(netdev);
4484 
4485 	/* If the adapter is in low power state, then it should not
4486 	 * recognize any interrupt
4487 	 */
4488 
4489 	/* Disable Device Interrupts */
4490 	et131x_disable_interrupts(adapter);
4491 
4492 	/* Get a copy of the value in the interrupt status register
4493 	 * so we can process the interrupting section
4494 	 */
4495 	status = readl(&adapter->regs->global.int_status);
4496 
4497 	if (adapter->flowcontrol == FLOW_TXONLY ||
4498 	    adapter->flowcontrol == FLOW_BOTH) {
4499 		status &= ~INT_MASK_ENABLE;
4500 	} else {
4501 		status &= ~INT_MASK_ENABLE_NO_FLOW;
4502 	}
4503 
4504 	/* Make sure this is our interrupt */
4505 	if (!status) {
4506 		handled = false;
4507 		et131x_enable_interrupts(adapter);
4508 		goto out;
4509 	}
4510 
4511 	/* This is our interrupt, so process accordingly */
4512 
4513 	if (status & ET_INTR_WATCHDOG) {
4514 		struct tcb *tcb = adapter->tx_ring.send_head;
4515 
4516 		if (tcb)
4517 			if (++tcb->stale > 1)
4518 				status |= ET_INTR_TXDMA_ISR;
4519 
4520 		if (adapter->rx_ring.unfinished_receives)
4521 			status |= ET_INTR_RXDMA_XFR_DONE;
4522 		else if (tcb == NULL)
4523 			writel(0, &adapter->regs->global.watchdog_timer);
4524 
4525 		status &= ~ET_INTR_WATCHDOG;
4526 	}
4527 
4528 	if (status == 0) {
4529 		/* This interrupt has in some way been "handled" by
4530 		 * the ISR. Either it was a spurious Rx interrupt, or
4531 		 * it was a Tx interrupt that has been filtered by
4532 		 * the ISR.
4533 		 */
4534 		et131x_enable_interrupts(adapter);
4535 		goto out;
4536 	}
4537 
4538 	/* We need to save the interrupt status value for use in our
4539 	 * DPC. We will clear the software copy of that in that
4540 	 * routine.
4541 	 */
4542 	adapter->stats.interrupt_status = status;
4543 
4544 	/* Schedule the ISR handler as a bottom-half task in the
4545 	 * kernel's tq_immediate queue, and mark the queue for
4546 	 * execution
4547 	 */
4548 	schedule_work(&adapter->task);
4549 out:
4550 	return IRQ_RETVAL(handled);
4551 }
4552 
4553 /**
4554  * et131x_isr_handler - The ISR handler
4555  * @p_adapter, a pointer to the device's private adapter structure
4556  *
4557  * scheduled to run in a deferred context by the ISR. This is where the ISR's
4558  * work actually gets done.
4559  */
et131x_isr_handler(struct work_struct * work)4560 static void et131x_isr_handler(struct work_struct *work)
4561 {
4562 	struct et131x_adapter *adapter =
4563 		container_of(work, struct et131x_adapter, task);
4564 	u32 status = adapter->stats.interrupt_status;
4565 	struct address_map __iomem *iomem = adapter->regs;
4566 
4567 	/*
4568 	 * These first two are by far the most common.  Once handled, we clear
4569 	 * their two bits in the status word.  If the word is now zero, we
4570 	 * exit.
4571 	 */
4572 	/* Handle all the completed Transmit interrupts */
4573 	if (status & ET_INTR_TXDMA_ISR)
4574 		et131x_handle_send_interrupt(adapter);
4575 
4576 	/* Handle all the completed Receives interrupts */
4577 	if (status & ET_INTR_RXDMA_XFR_DONE)
4578 		et131x_handle_recv_interrupt(adapter);
4579 
4580 	status &= 0xffffffd7;
4581 
4582 	if (status) {
4583 		/* Handle the TXDMA Error interrupt */
4584 		if (status & ET_INTR_TXDMA_ERR) {
4585 			u32 txdma_err;
4586 
4587 			/* Following read also clears the register (COR) */
4588 			txdma_err = readl(&iomem->txdma.tx_dma_error);
4589 
4590 			dev_warn(&adapter->pdev->dev,
4591 				    "TXDMA_ERR interrupt, error = %d\n",
4592 				    txdma_err);
4593 		}
4594 
4595 		/* Handle Free Buffer Ring 0 and 1 Low interrupt */
4596 		if (status &
4597 		    (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4598 			/*
4599 			 * This indicates the number of unused buffers in
4600 			 * RXDMA free buffer ring 0 is <= the limit you
4601 			 * programmed. Free buffer resources need to be
4602 			 * returned.  Free buffers are consumed as packets
4603 			 * are passed from the network to the host. The host
4604 			 * becomes aware of the packets from the contents of
4605 			 * the packet status ring. This ring is queried when
4606 			 * the packet done interrupt occurs. Packets are then
4607 			 * passed to the OS. When the OS is done with the
4608 			 * packets the resources can be returned to the
4609 			 * ET1310 for re-use. This interrupt is one method of
4610 			 * returning resources.
4611 			 */
4612 
4613 			/* If the user has flow control on, then we will
4614 			 * send a pause packet, otherwise just exit
4615 			 */
4616 			if (adapter->flowcontrol == FLOW_TXONLY ||
4617 			    adapter->flowcontrol == FLOW_BOTH) {
4618 				u32 pm_csr;
4619 
4620 				/* Tell the device to send a pause packet via
4621 				 * the back pressure register (bp req  and
4622 				 * bp xon/xoff)
4623 				 */
4624 				pm_csr = readl(&iomem->global.pm_csr);
4625 				if (!et1310_in_phy_coma(adapter))
4626 					writel(3, &iomem->txmac.bp_ctrl);
4627 			}
4628 		}
4629 
4630 		/* Handle Packet Status Ring Low Interrupt */
4631 		if (status & ET_INTR_RXDMA_STAT_LOW) {
4632 
4633 			/*
4634 			 * Same idea as with the two Free Buffer Rings.
4635 			 * Packets going from the network to the host each
4636 			 * consume a free buffer resource and a packet status
4637 			 * resource.  These resoures are passed to the OS.
4638 			 * When the OS is done with the resources, they need
4639 			 * to be returned to the ET1310. This is one method
4640 			 * of returning the resources.
4641 			 */
4642 		}
4643 
4644 		/* Handle RXDMA Error Interrupt */
4645 		if (status & ET_INTR_RXDMA_ERR) {
4646 			/*
4647 			 * The rxdma_error interrupt is sent when a time-out
4648 			 * on a request issued by the JAGCore has occurred or
4649 			 * a completion is returned with an un-successful
4650 			 * status.  In both cases the request is considered
4651 			 * complete. The JAGCore will automatically re-try the
4652 			 * request in question. Normally information on events
4653 			 * like these are sent to the host using the "Advanced
4654 			 * Error Reporting" capability. This interrupt is
4655 			 * another way of getting similar information. The
4656 			 * only thing required is to clear the interrupt by
4657 			 * reading the ISR in the global resources. The
4658 			 * JAGCore will do a re-try on the request.  Normally
4659 			 * you should never see this interrupt. If you start
4660 			 * to see this interrupt occurring frequently then
4661 			 * something bad has occurred. A reset might be the
4662 			 * thing to do.
4663 			 */
4664 			/* TRAP();*/
4665 
4666 			dev_warn(&adapter->pdev->dev,
4667 				    "RxDMA_ERR interrupt, error %x\n",
4668 				    readl(&iomem->txmac.tx_test));
4669 		}
4670 
4671 		/* Handle the Wake on LAN Event */
4672 		if (status & ET_INTR_WOL) {
4673 			/*
4674 			 * This is a secondary interrupt for wake on LAN.
4675 			 * The driver should never see this, if it does,
4676 			 * something serious is wrong. We will TRAP the
4677 			 * message when we are in DBG mode, otherwise we
4678 			 * will ignore it.
4679 			 */
4680 			dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4681 		}
4682 
4683 		/* Let's move on to the TxMac */
4684 		if (status & ET_INTR_TXMAC) {
4685 			u32 err = readl(&iomem->txmac.err);
4686 
4687 			/*
4688 			 * When any of the errors occur and TXMAC generates
4689 			 * an interrupt to report these errors, it usually
4690 			 * means that TXMAC has detected an error in the data
4691 			 * stream retrieved from the on-chip Tx Q. All of
4692 			 * these errors are catastrophic and TXMAC won't be
4693 			 * able to recover data when these errors occur.  In
4694 			 * a nutshell, the whole Tx path will have to be reset
4695 			 * and re-configured afterwards.
4696 			 */
4697 			dev_warn(&adapter->pdev->dev,
4698 				    "TXMAC interrupt, error 0x%08x\n",
4699 				    err);
4700 
4701 			/* If we are debugging, we want to see this error,
4702 			 * otherwise we just want the device to be reset and
4703 			 * continue
4704 			 */
4705 		}
4706 
4707 		/* Handle RXMAC Interrupt */
4708 		if (status & ET_INTR_RXMAC) {
4709 			/*
4710 			 * These interrupts are catastrophic to the device,
4711 			 * what we need to do is disable the interrupts and
4712 			 * set the flag to cause us to reset so we can solve
4713 			 * this issue.
4714 			 */
4715 			/* MP_SET_FLAG( adapter,
4716 						fMP_ADAPTER_HARDWARE_ERROR); */
4717 
4718 			dev_warn(&adapter->pdev->dev,
4719 			  "RXMAC interrupt, error 0x%08x.  Requesting reset\n",
4720 				    readl(&iomem->rxmac.err_reg));
4721 
4722 			dev_warn(&adapter->pdev->dev,
4723 				    "Enable 0x%08x, Diag 0x%08x\n",
4724 				    readl(&iomem->rxmac.ctrl),
4725 				    readl(&iomem->rxmac.rxq_diag));
4726 
4727 			/*
4728 			 * If we are debugging, we want to see this error,
4729 			 * otherwise we just want the device to be reset and
4730 			 * continue
4731 			 */
4732 		}
4733 
4734 		/* Handle MAC_STAT Interrupt */
4735 		if (status & ET_INTR_MAC_STAT) {
4736 			/*
4737 			 * This means at least one of the un-masked counters
4738 			 * in the MAC_STAT block has rolled over.  Use this
4739 			 * to maintain the top, software managed bits of the
4740 			 * counter(s).
4741 			 */
4742 			et1310_handle_macstat_interrupt(adapter);
4743 		}
4744 
4745 		/* Handle SLV Timeout Interrupt */
4746 		if (status & ET_INTR_SLV_TIMEOUT) {
4747 			/*
4748 			 * This means a timeout has occurred on a read or
4749 			 * write request to one of the JAGCore registers. The
4750 			 * Global Resources block has terminated the request
4751 			 * and on a read request, returned a "fake" value.
4752 			 * The most likely reasons are: Bad Address or the
4753 			 * addressed module is in a power-down state and
4754 			 * can't respond.
4755 			 */
4756 		}
4757 	}
4758 	et131x_enable_interrupts(adapter);
4759 }
4760 
4761 /**
4762  * et131x_stats - Return the current device statistics.
4763  * @netdev: device whose stats are being queried
4764  *
4765  * Returns 0 on success, errno on failure (as defined in errno.h)
4766  */
et131x_stats(struct net_device * netdev)4767 static struct net_device_stats *et131x_stats(struct net_device *netdev)
4768 {
4769 	struct et131x_adapter *adapter = netdev_priv(netdev);
4770 	struct net_device_stats *stats = &adapter->net_stats;
4771 	struct ce_stats *devstat = &adapter->stats;
4772 
4773 	stats->rx_errors = devstat->rx_length_errs +
4774 			   devstat->rx_align_errs +
4775 			   devstat->rx_crc_errs +
4776 			   devstat->rx_code_violations +
4777 			   devstat->rx_other_errs;
4778 	stats->tx_errors = devstat->tx_max_pkt_errs;
4779 	stats->multicast = devstat->multicast_pkts_rcvd;
4780 	stats->collisions = devstat->tx_collisions;
4781 
4782 	stats->rx_length_errors = devstat->rx_length_errs;
4783 	stats->rx_over_errors = devstat->rx_overflows;
4784 	stats->rx_crc_errors = devstat->rx_crc_errs;
4785 
4786 	/* NOTE: These stats don't have corresponding values in CE_STATS,
4787 	 * so we're going to have to update these directly from within the
4788 	 * TX/RX code
4789 	 */
4790 	/* stats->rx_bytes            = 20; devstat->; */
4791 	/* stats->tx_bytes            = 20;  devstat->; */
4792 	/* stats->rx_dropped          = devstat->; */
4793 	/* stats->tx_dropped          = devstat->; */
4794 
4795 	/*  NOTE: Not used, can't find analogous statistics */
4796 	/* stats->rx_frame_errors     = devstat->; */
4797 	/* stats->rx_fifo_errors      = devstat->; */
4798 	/* stats->rx_missed_errors    = devstat->; */
4799 
4800 	/* stats->tx_aborted_errors   = devstat->; */
4801 	/* stats->tx_carrier_errors   = devstat->; */
4802 	/* stats->tx_fifo_errors      = devstat->; */
4803 	/* stats->tx_heartbeat_errors = devstat->; */
4804 	/* stats->tx_window_errors    = devstat->; */
4805 	return stats;
4806 }
4807 
4808 /**
4809  * et131x_open - Open the device for use.
4810  * @netdev: device to be opened
4811  *
4812  * Returns 0 on success, errno on failure (as defined in errno.h)
4813  */
et131x_open(struct net_device * netdev)4814 static int et131x_open(struct net_device *netdev)
4815 {
4816 	struct et131x_adapter *adapter = netdev_priv(netdev);
4817 	struct pci_dev *pdev = adapter->pdev;
4818 	unsigned int irq = pdev->irq;
4819 	int result;
4820 
4821 	/* Start the timer to track NIC errors */
4822 	init_timer(&adapter->error_timer);
4823 	adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4824 	adapter->error_timer.function = et131x_error_timer_handler;
4825 	adapter->error_timer.data = (unsigned long)adapter;
4826 	add_timer(&adapter->error_timer);
4827 
4828 	result = request_irq(irq, et131x_isr, IRQF_SHARED, netdev->name, netdev);
4829 	if (result) {
4830 		dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
4831 		return result;
4832 	}
4833 
4834 	adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
4835 
4836 	et131x_up(netdev);
4837 
4838 	return result;
4839 }
4840 
4841 /**
4842  * et131x_close - Close the device
4843  * @netdev: device to be closed
4844  *
4845  * Returns 0 on success, errno on failure (as defined in errno.h)
4846  */
et131x_close(struct net_device * netdev)4847 static int et131x_close(struct net_device *netdev)
4848 {
4849 	struct et131x_adapter *adapter = netdev_priv(netdev);
4850 
4851 	et131x_down(netdev);
4852 
4853 	adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
4854 	free_irq(adapter->pdev->irq, netdev);
4855 
4856 	/* Stop the error timer */
4857 	return del_timer_sync(&adapter->error_timer);
4858 }
4859 
4860 /**
4861  * et131x_ioctl - The I/O Control handler for the driver
4862  * @netdev: device on which the control request is being made
4863  * @reqbuf: a pointer to the IOCTL request buffer
4864  * @cmd: the IOCTL command code
4865  *
4866  * Returns 0 on success, errno on failure (as defined in errno.h)
4867  */
et131x_ioctl(struct net_device * netdev,struct ifreq * reqbuf,int cmd)4868 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4869 			int cmd)
4870 {
4871 	struct et131x_adapter *adapter = netdev_priv(netdev);
4872 
4873 	if (!adapter->phydev)
4874 		return -EINVAL;
4875 
4876 	return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4877 }
4878 
4879 /**
4880  * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
4881  * @adapter: pointer to our private adapter structure
4882  *
4883  * FIXME: lot of dups with MAC code
4884  *
4885  * Returns 0 on success, errno on failure
4886  */
et131x_set_packet_filter(struct et131x_adapter * adapter)4887 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4888 {
4889 	int filter = adapter->packet_filter;
4890 	int status = 0;
4891 	u32 ctrl;
4892 	u32 pf_ctrl;
4893 
4894 	ctrl = readl(&adapter->regs->rxmac.ctrl);
4895 	pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4896 
4897 	/* Default to disabled packet filtering.  Enable it in the individual
4898 	 * case statements that require the device to filter something
4899 	 */
4900 	ctrl |= 0x04;
4901 
4902 	/* Set us to be in promiscuous mode so we receive everything, this
4903 	 * is also true when we get a packet filter of 0
4904 	 */
4905 	if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4906 		pf_ctrl &= ~7;	/* Clear filter bits */
4907 	else {
4908 		/*
4909 		 * Set us up with Multicast packet filtering.  Three cases are
4910 		 * possible - (1) we have a multi-cast list, (2) we receive ALL
4911 		 * multicast entries or (3) we receive none.
4912 		 */
4913 		if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4914 			pf_ctrl &= ~2;	/* Multicast filter bit */
4915 		else {
4916 			et1310_setup_device_for_multicast(adapter);
4917 			pf_ctrl |= 2;
4918 			ctrl &= ~0x04;
4919 		}
4920 
4921 		/* Set us up with Unicast packet filtering */
4922 		if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4923 			et1310_setup_device_for_unicast(adapter);
4924 			pf_ctrl |= 4;
4925 			ctrl &= ~0x04;
4926 		}
4927 
4928 		/* Set us up with Broadcast packet filtering */
4929 		if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4930 			pf_ctrl |= 1;	/* Broadcast filter bit */
4931 			ctrl &= ~0x04;
4932 		} else
4933 			pf_ctrl &= ~1;
4934 
4935 		/* Setup the receive mac configuration registers - Packet
4936 		 * Filter control + the enable / disable for packet filter
4937 		 * in the control reg.
4938 		 */
4939 		writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4940 		writel(ctrl, &adapter->regs->rxmac.ctrl);
4941 	}
4942 	return status;
4943 }
4944 
4945 /**
4946  * et131x_multicast - The handler to configure multicasting on the interface
4947  * @netdev: a pointer to a net_device struct representing the device
4948  */
et131x_multicast(struct net_device * netdev)4949 static void et131x_multicast(struct net_device *netdev)
4950 {
4951 	struct et131x_adapter *adapter = netdev_priv(netdev);
4952 	int packet_filter;
4953 	unsigned long flags;
4954 	struct netdev_hw_addr *ha;
4955 	int i;
4956 
4957 	spin_lock_irqsave(&adapter->lock, flags);
4958 
4959 	/* Before we modify the platform-independent filter flags, store them
4960 	 * locally. This allows us to determine if anything's changed and if
4961 	 * we even need to bother the hardware
4962 	 */
4963 	packet_filter = adapter->packet_filter;
4964 
4965 	/* Clear the 'multicast' flag locally; because we only have a single
4966 	 * flag to check multicast, and multiple multicast addresses can be
4967 	 * set, this is the easiest way to determine if more than one
4968 	 * multicast address is being set.
4969 	 */
4970 	packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4971 
4972 	/* Check the net_device flags and set the device independent flags
4973 	 * accordingly
4974 	 */
4975 
4976 	if (netdev->flags & IFF_PROMISC)
4977 		adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4978 	else
4979 		adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4980 
4981 	if (netdev->flags & IFF_ALLMULTI)
4982 		adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4983 
4984 	if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
4985 		adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4986 
4987 	if (netdev_mc_count(netdev) < 1) {
4988 		adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4989 		adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4990 	} else
4991 		adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
4992 
4993 	/* Set values in the private adapter struct */
4994 	i = 0;
4995 	netdev_for_each_mc_addr(ha, netdev) {
4996 		if (i == NIC_MAX_MCAST_LIST)
4997 			break;
4998 		memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4999 	}
5000 	adapter->multicast_addr_count = i;
5001 
5002 	/* Are the new flags different from the previous ones? If not, then no
5003 	 * action is required
5004 	 *
5005 	 * NOTE - This block will always update the multicast_list with the
5006 	 *        hardware, even if the addresses aren't the same.
5007 	 */
5008 	if (packet_filter != adapter->packet_filter) {
5009 		/* Call the device's filter function */
5010 		et131x_set_packet_filter(adapter);
5011 	}
5012 	spin_unlock_irqrestore(&adapter->lock, flags);
5013 }
5014 
5015 /**
5016  * et131x_tx - The handler to tx a packet on the device
5017  * @skb: data to be Tx'd
5018  * @netdev: device on which data is to be Tx'd
5019  *
5020  * Returns 0 on success, errno on failure (as defined in errno.h)
5021  */
et131x_tx(struct sk_buff * skb,struct net_device * netdev)5022 static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
5023 {
5024 	int status = 0;
5025 	struct et131x_adapter *adapter = netdev_priv(netdev);
5026 
5027 	/* stop the queue if it's getting full */
5028 	if (adapter->tx_ring.used >= NUM_TCB - 1 &&
5029 	    !netif_queue_stopped(netdev))
5030 		netif_stop_queue(netdev);
5031 
5032 	/* Save the timestamp for the TX timeout watchdog */
5033 	netdev->trans_start = jiffies;
5034 
5035 	/* Call the device-specific data Tx routine */
5036 	status = et131x_send_packets(skb, netdev);
5037 
5038 	/* Check status and manage the netif queue if necessary */
5039 	if (status != 0) {
5040 		if (status == -ENOMEM)
5041 			status = NETDEV_TX_BUSY;
5042 		else
5043 			status = NETDEV_TX_OK;
5044 	}
5045 	return status;
5046 }
5047 
5048 /**
5049  * et131x_tx_timeout - Timeout handler
5050  * @netdev: a pointer to a net_device struct representing the device
5051  *
5052  * The handler called when a Tx request times out. The timeout period is
5053  * specified by the 'tx_timeo" element in the net_device structure (see
5054  * et131x_alloc_device() to see how this value is set).
5055  */
et131x_tx_timeout(struct net_device * netdev)5056 static void et131x_tx_timeout(struct net_device *netdev)
5057 {
5058 	struct et131x_adapter *adapter = netdev_priv(netdev);
5059 	struct tcb *tcb;
5060 	unsigned long flags;
5061 
5062 	/* If the device is closed, ignore the timeout */
5063 	if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
5064 		return;
5065 
5066 	/* Any nonrecoverable hardware error?
5067 	 * Checks adapter->flags for any failure in phy reading
5068 	 */
5069 	if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
5070 		return;
5071 
5072 	/* Hardware failure? */
5073 	if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
5074 		dev_err(&adapter->pdev->dev, "hardware error - reset\n");
5075 		return;
5076 	}
5077 
5078 	/* Is send stuck? */
5079 	spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
5080 
5081 	tcb = adapter->tx_ring.send_head;
5082 
5083 	if (tcb != NULL) {
5084 		tcb->count++;
5085 
5086 		if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
5087 			spin_unlock_irqrestore(&adapter->tcb_send_qlock,
5088 					       flags);
5089 
5090 			dev_warn(&adapter->pdev->dev,
5091 				"Send stuck - reset.  tcb->WrIndex %x, flags 0x%08x\n",
5092 				tcb->index,
5093 				tcb->flags);
5094 
5095 			adapter->net_stats.tx_errors++;
5096 
5097 			/* perform reset of tx/rx */
5098 			et131x_disable_txrx(netdev);
5099 			et131x_enable_txrx(netdev);
5100 			return;
5101 		}
5102 	}
5103 
5104 	spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
5105 }
5106 
5107 /**
5108  * et131x_change_mtu - The handler called to change the MTU for the device
5109  * @netdev: device whose MTU is to be changed
5110  * @new_mtu: the desired MTU
5111  *
5112  * Returns 0 on success, errno on failure (as defined in errno.h)
5113  */
et131x_change_mtu(struct net_device * netdev,int new_mtu)5114 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
5115 {
5116 	int result = 0;
5117 	struct et131x_adapter *adapter = netdev_priv(netdev);
5118 
5119 	/* Make sure the requested MTU is valid */
5120 	if (new_mtu < 64 || new_mtu > 9216)
5121 		return -EINVAL;
5122 
5123 	et131x_disable_txrx(netdev);
5124 	et131x_handle_send_interrupt(adapter);
5125 	et131x_handle_recv_interrupt(adapter);
5126 
5127 	/* Set the new MTU */
5128 	netdev->mtu = new_mtu;
5129 
5130 	/* Free Rx DMA memory */
5131 	et131x_adapter_memory_free(adapter);
5132 
5133 	/* Set the config parameter for Jumbo Packet support */
5134 	adapter->registry_jumbo_packet = new_mtu + 14;
5135 	et131x_soft_reset(adapter);
5136 
5137 	/* Alloc and init Rx DMA memory */
5138 	result = et131x_adapter_memory_alloc(adapter);
5139 	if (result != 0) {
5140 		dev_warn(&adapter->pdev->dev,
5141 			"Change MTU failed; couldn't re-alloc DMA memory\n");
5142 		return result;
5143 	}
5144 
5145 	et131x_init_send(adapter);
5146 
5147 	et131x_hwaddr_init(adapter);
5148 	memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5149 
5150 	/* Init the device with the new settings */
5151 	et131x_adapter_setup(adapter);
5152 
5153 	et131x_enable_txrx(netdev);
5154 
5155 	return result;
5156 }
5157 
5158 /**
5159  * et131x_set_mac_addr - handler to change the MAC address for the device
5160  * @netdev: device whose MAC is to be changed
5161  * @new_mac: the desired MAC address
5162  *
5163  * Returns 0 on success, errno on failure (as defined in errno.h)
5164  *
5165  * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
5166  */
et131x_set_mac_addr(struct net_device * netdev,void * new_mac)5167 static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5168 {
5169 	int result = 0;
5170 	struct et131x_adapter *adapter = netdev_priv(netdev);
5171 	struct sockaddr *address = new_mac;
5172 
5173 	/* begin blux */
5174 
5175 	if (adapter == NULL)
5176 		return -ENODEV;
5177 
5178 	/* Make sure the requested MAC is valid */
5179 	if (!is_valid_ether_addr(address->sa_data))
5180 		return -EINVAL;
5181 
5182 	et131x_disable_txrx(netdev);
5183 	et131x_handle_send_interrupt(adapter);
5184 	et131x_handle_recv_interrupt(adapter);
5185 
5186 	/* Set the new MAC */
5187 	/* netdev->set_mac_address  = &new_mac; */
5188 
5189 	memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
5190 
5191 	printk(KERN_INFO "%s: Setting MAC address to %pM\n",
5192 			netdev->name, netdev->dev_addr);
5193 
5194 	/* Free Rx DMA memory */
5195 	et131x_adapter_memory_free(adapter);
5196 
5197 	et131x_soft_reset(adapter);
5198 
5199 	/* Alloc and init Rx DMA memory */
5200 	result = et131x_adapter_memory_alloc(adapter);
5201 	if (result != 0) {
5202 		dev_err(&adapter->pdev->dev,
5203 			"Change MAC failed; couldn't re-alloc DMA memory\n");
5204 		return result;
5205 	}
5206 
5207 	et131x_init_send(adapter);
5208 
5209 	et131x_hwaddr_init(adapter);
5210 
5211 	/* Init the device with the new settings */
5212 	et131x_adapter_setup(adapter);
5213 
5214 	et131x_enable_txrx(netdev);
5215 
5216 	return result;
5217 }
5218 
5219 static const struct net_device_ops et131x_netdev_ops = {
5220 	.ndo_open		= et131x_open,
5221 	.ndo_stop		= et131x_close,
5222 	.ndo_start_xmit		= et131x_tx,
5223 	.ndo_set_rx_mode	= et131x_multicast,
5224 	.ndo_tx_timeout		= et131x_tx_timeout,
5225 	.ndo_change_mtu		= et131x_change_mtu,
5226 	.ndo_set_mac_address	= et131x_set_mac_addr,
5227 	.ndo_validate_addr	= eth_validate_addr,
5228 	.ndo_get_stats		= et131x_stats,
5229 	.ndo_do_ioctl		= et131x_ioctl,
5230 };
5231 
5232 /**
5233  * et131x_pci_setup - Perform device initialization
5234  * @pdev: a pointer to the device's pci_dev structure
5235  * @ent: this device's entry in the pci_device_id table
5236  *
5237  * Returns 0 on success, errno on failure (as defined in errno.h)
5238  *
5239  * Registered in the pci_driver structure, this function is called when the
5240  * PCI subsystem finds a new PCI device which matches the information
5241  * contained in the pci_device_id table. This routine is the equivalent to
5242  * a device insertion routine.
5243  */
et131x_pci_setup(struct pci_dev * pdev,const struct pci_device_id * ent)5244 static int __devinit et131x_pci_setup(struct pci_dev *pdev,
5245 			       const struct pci_device_id *ent)
5246 {
5247 	struct net_device *netdev;
5248 	struct et131x_adapter *adapter;
5249 	int rc;
5250 	int ii;
5251 
5252 	rc = pci_enable_device(pdev);
5253 	if (rc < 0) {
5254 		dev_err(&pdev->dev, "pci_enable_device() failed\n");
5255 		goto out;
5256 	}
5257 
5258 	/* Perform some basic PCI checks */
5259 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5260 		dev_err(&pdev->dev, "Can't find PCI device's base address\n");
5261 		rc = -ENODEV;
5262 		goto err_disable;
5263 	}
5264 
5265 	rc = pci_request_regions(pdev, DRIVER_NAME);
5266 	if (rc < 0) {
5267 		dev_err(&pdev->dev, "Can't get PCI resources\n");
5268 		goto err_disable;
5269 	}
5270 
5271 	pci_set_master(pdev);
5272 
5273 	/* Check the DMA addressing support of this device */
5274 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
5275 		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5276 		if (rc < 0) {
5277 			dev_err(&pdev->dev,
5278 			  "Unable to obtain 64 bit DMA for consistent allocations\n");
5279 			goto err_release_res;
5280 		}
5281 	} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
5282 		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5283 		if (rc < 0) {
5284 			dev_err(&pdev->dev,
5285 			  "Unable to obtain 32 bit DMA for consistent allocations\n");
5286 			goto err_release_res;
5287 		}
5288 	} else {
5289 		dev_err(&pdev->dev, "No usable DMA addressing method\n");
5290 		rc = -EIO;
5291 		goto err_release_res;
5292 	}
5293 
5294 	/* Allocate netdev and private adapter structs */
5295 	netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5296 	if (!netdev) {
5297 		dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
5298 		rc = -ENOMEM;
5299 		goto err_release_res;
5300 	}
5301 
5302 	netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
5303 	netdev->netdev_ops     = &et131x_netdev_ops;
5304 
5305 	SET_NETDEV_DEV(netdev, &pdev->dev);
5306 	et131x_set_ethtool_ops(netdev);
5307 
5308 	adapter = et131x_adapter_init(netdev, pdev);
5309 
5310 	rc = et131x_pci_init(adapter, pdev);
5311 	if (rc < 0)
5312 		goto err_free_dev;
5313 
5314 	/* Map the bus-relative registers to system virtual memory */
5315 	adapter->regs = pci_ioremap_bar(pdev, 0);
5316 	if (!adapter->regs) {
5317 		dev_err(&pdev->dev, "Cannot map device registers\n");
5318 		rc = -ENOMEM;
5319 		goto err_free_dev;
5320 	}
5321 
5322 	/* If Phy COMA mode was enabled when we went down, disable it here. */
5323 	writel(ET_PMCSR_INIT,  &adapter->regs->global.pm_csr);
5324 
5325 	/* Issue a global reset to the et1310 */
5326 	et131x_soft_reset(adapter);
5327 
5328 	/* Disable all interrupts (paranoid) */
5329 	et131x_disable_interrupts(adapter);
5330 
5331 	/* Allocate DMA memory */
5332 	rc = et131x_adapter_memory_alloc(adapter);
5333 	if (rc < 0) {
5334 		dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
5335 		goto err_iounmap;
5336 	}
5337 
5338 	/* Init send data structures */
5339 	et131x_init_send(adapter);
5340 
5341 	/* Set up the task structure for the ISR's deferred handler */
5342 	INIT_WORK(&adapter->task, et131x_isr_handler);
5343 
5344 	/* Copy address into the net_device struct */
5345 	memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5346 
5347 	/* Init variable for counting how long we do not have link status */
5348 	adapter->boot_coma = 0;
5349 	et1310_disable_phy_coma(adapter);
5350 
5351 	rc = -ENOMEM;
5352 
5353 	/* Setup the mii_bus struct */
5354 	adapter->mii_bus = mdiobus_alloc();
5355 	if (!adapter->mii_bus) {
5356 		dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
5357 		goto err_mem_free;
5358 	}
5359 
5360 	adapter->mii_bus->name = "et131x_eth_mii";
5361 	snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
5362 		(adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
5363 	adapter->mii_bus->priv = netdev;
5364 	adapter->mii_bus->read = et131x_mdio_read;
5365 	adapter->mii_bus->write = et131x_mdio_write;
5366 	adapter->mii_bus->reset = et131x_mdio_reset;
5367 	adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
5368 	if (!adapter->mii_bus->irq) {
5369 		dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
5370 		goto err_mdio_free;
5371 	}
5372 
5373 	for (ii = 0; ii < PHY_MAX_ADDR; ii++)
5374 		adapter->mii_bus->irq[ii] = PHY_POLL;
5375 
5376 	rc = mdiobus_register(adapter->mii_bus);
5377 	if (rc < 0) {
5378 		dev_err(&pdev->dev, "failed to register MII bus\n");
5379 		goto err_mdio_free_irq;
5380 	}
5381 
5382 	rc = et131x_mii_probe(netdev);
5383 	if (rc < 0) {
5384 		dev_err(&pdev->dev, "failed to probe MII bus\n");
5385 		goto err_mdio_unregister;
5386 	}
5387 
5388 	/* Setup et1310 as per the documentation */
5389 	et131x_adapter_setup(adapter);
5390 
5391 	/* We can enable interrupts now
5392 	 *
5393 	 *  NOTE - Because registration of interrupt handler is done in the
5394 	 *         device's open(), defer enabling device interrupts to that
5395 	 *         point
5396 	 */
5397 
5398 	/* Register the net_device struct with the Linux network layer */
5399 	rc = register_netdev(netdev);
5400 	if (rc < 0) {
5401 		dev_err(&pdev->dev, "register_netdev() failed\n");
5402 		goto err_phy_disconnect;
5403 	}
5404 
5405 	/* Register the net_device struct with the PCI subsystem. Save a copy
5406 	 * of the PCI config space for this device now that the device has
5407 	 * been initialized, just in case it needs to be quickly restored.
5408 	 */
5409 	pci_set_drvdata(pdev, netdev);
5410 out:
5411 	return rc;
5412 
5413 err_phy_disconnect:
5414 	phy_disconnect(adapter->phydev);
5415 err_mdio_unregister:
5416 	mdiobus_unregister(adapter->mii_bus);
5417 err_mdio_free_irq:
5418 	kfree(adapter->mii_bus->irq);
5419 err_mdio_free:
5420 	mdiobus_free(adapter->mii_bus);
5421 err_mem_free:
5422 	et131x_adapter_memory_free(adapter);
5423 err_iounmap:
5424 	iounmap(adapter->regs);
5425 err_free_dev:
5426 	pci_dev_put(pdev);
5427 	free_netdev(netdev);
5428 err_release_res:
5429 	pci_release_regions(pdev);
5430 err_disable:
5431 	pci_disable_device(pdev);
5432 	goto out;
5433 }
5434 
5435 static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5436 	{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
5437 	{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
5438 	{0,}
5439 };
5440 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
5441 
5442 static struct pci_driver et131x_driver = {
5443 	.name		= DRIVER_NAME,
5444 	.id_table	= et131x_pci_table,
5445 	.probe		= et131x_pci_setup,
5446 	.remove		= __devexit_p(et131x_pci_remove),
5447 	.driver.pm	= ET131X_PM_OPS,
5448 };
5449 
5450 /**
5451  * et131x_init_module - The "main" entry point called on driver initialization
5452  *
5453  * Returns 0 on success, errno on failure (as defined in errno.h)
5454  */
et131x_init_module(void)5455 static int __init et131x_init_module(void)
5456 {
5457 	return pci_register_driver(&et131x_driver);
5458 }
5459 
5460 /**
5461  * et131x_cleanup_module - The entry point called on driver cleanup
5462  */
et131x_cleanup_module(void)5463 static void __exit et131x_cleanup_module(void)
5464 {
5465 	pci_unregister_driver(&et131x_driver);
5466 }
5467 
5468 module_init(et131x_init_module);
5469 module_exit(et131x_cleanup_module);
5470 
5471