1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0 0
62 #define BAR_2 2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70 return test_bit(flag, bits);
71 }
72
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 set_bit(flag, bits);
76 }
77
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 122
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
100
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
104 (NETIF_MSG_DRV | \
105 NETIF_MSG_PROBE | \
106 NETIF_MSG_LINK | \
107 NETIF_MSG_TIMER | \
108 NETIF_MSG_IFDOWN | \
109 NETIF_MSG_IFUP | \
110 NETIF_MSG_RX_ERR | \
111 NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
114
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
117 */
118
119 #define TG3_TX_TIMEOUT (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
129 */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140 * we really want to expose these constants to GCC so that modulo et
141 * al. operations are done with shifts and masks instead of with
142 * hw multiply/modulo instructions. Another solution would be to
143 * replace things like '% foo' with '& (foo - 1)'.
144 */
145
146 #define TG3_TX_RING_SIZE 512
147 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
156 TG3_TX_RING_SIZE)
157 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB 64
160
161 #define TG3_RX_STD_DMA_SZ 1536
162 #define TG3_RX_JMB_DMA_SZ 9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176 * that are at least dword aligned when used in PCIX mode. The driver
177 * works around this bug by double copying the packet. This workaround
178 * is built into the normal double copy length check for efficiency.
179 *
180 * However, the double copy is only necessary on those architectures
181 * where unaligned memory accesses are inefficient. For those architectures
182 * where unaligned memory accesses incur little penalty, we can reintegrate
183 * the 5701 in the normal rx path. Doing so saves a device structure
184 * dereference by hardcoding the double copy threshold in place.
185 */
186 #define TG3_RX_COPY_THRESHOLD 256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
189 #else
190 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K 2048
202 #define TG3_TX_BD_DMA_MAX_4K 4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
207
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309 {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315 const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317 { "rx_octets" },
318 { "rx_fragments" },
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
322 { "rx_fcs_errors" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
329 { "rx_jabbers" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
343
344 { "tx_octets" },
345 { "tx_collisions" },
346
347 { "tx_xon_sent" },
348 { "tx_xoff_sent" },
349 { "tx_flow_control" },
350 { "tx_mac_errors" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
353 { "tx_deferred" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
374 { "tx_discards" },
375 { "tx_errors" },
376
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
379 { "rxbds_empty" },
380 { "rx_discards" },
381 { "rx_errors" },
382 { "rx_threshold_hit" },
383
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
387
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
390 { "nic_irqs" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
393
394 { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401 const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
411 };
412
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
414
415
tg3_write32(struct tg3 * tp,u32 off,u32 val)416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418 writel(val, tp->regs + off);
419 }
420
tg3_read32(struct tg3 * tp,u32 off)421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423 return readl(tp->regs + off);
424 }
425
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428 writel(val, tp->aperegs + off);
429 }
430
tg3_ape_read32(struct tg3 * tp,u32 off)431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433 return readl(tp->aperegs + off);
434 }
435
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438 unsigned long flags;
439
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
450 }
451
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454 unsigned long flags;
455 u32 val;
456
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
461 return val;
462 }
463
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466 unsigned long flags;
467
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
471 return;
472 }
473 if (off == TG3_RX_STD_PROD_IDX_REG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
476 return;
477 }
478
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
486 */
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488 (val == 0x1)) {
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491 }
492 }
493
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496 unsigned long flags;
497 u32 val;
498
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
516 else {
517 /* Posted method */
518 tg3_write32(tp, off, val);
519 if (usec_wait)
520 udelay(usec_wait);
521 tp->read32(tp, off);
522 }
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
525 */
526 if (usec_wait)
527 udelay(usec_wait);
528 }
529
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532 tp->write32_mbox(tp, off, val);
533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 tp->read32_mbox(tp, off);
535 }
536
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539 void __iomem *mbox = tp->regs + off;
540 writel(val, mbox);
541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
542 writel(val, mbox);
543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
544 readl(mbox);
545 }
546
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549 return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554 writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
567
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570 unsigned long flags;
571
572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574 return;
575
576 spin_lock_irqsave(&tp->indirect_lock, flags);
577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583 } else {
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589 }
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595 unsigned long flags;
596
597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 *val = 0;
600 return;
601 }
602
603 spin_lock_irqsave(&tp->indirect_lock, flags);
604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610 } else {
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616 }
617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
tg3_ape_lock_init(struct tg3 * tp)620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622 int i;
623 u32 regbase, bit;
624
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
627 else
628 regbase = TG3_APE_PER_LOCK_GRANT;
629
630 /* Make sure the driver hasn't any stale locks. */
631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 switch (i) {
633 case TG3_APE_LOCK_PHY0:
634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
638 break;
639 default:
640 if (!tp->pci_fn)
641 bit = APE_LOCK_GRANT_DRIVER;
642 else
643 bit = 1 << tp->pci_fn;
644 }
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
646 }
647
648 }
649
tg3_ape_lock(struct tg3 * tp,int locknum)650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652 int i, off;
653 int ret = 0;
654 u32 status, req, gnt, bit;
655
656 if (!tg3_flag(tp, ENABLE_APE))
657 return 0;
658
659 switch (locknum) {
660 case TG3_APE_LOCK_GPIO:
661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662 return 0;
663 case TG3_APE_LOCK_GRC:
664 case TG3_APE_LOCK_MEM:
665 if (!tp->pci_fn)
666 bit = APE_LOCK_REQ_DRIVER;
667 else
668 bit = 1 << tp->pci_fn;
669 break;
670 default:
671 return -EINVAL;
672 }
673
674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 req = TG3_APE_LOCK_REQ;
676 gnt = TG3_APE_LOCK_GRANT;
677 } else {
678 req = TG3_APE_PER_LOCK_REQ;
679 gnt = TG3_APE_PER_LOCK_GRANT;
680 }
681
682 off = 4 * locknum;
683
684 tg3_ape_write32(tp, req + off, bit);
685
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i = 0; i < 100; i++) {
688 status = tg3_ape_read32(tp, gnt + off);
689 if (status == bit)
690 break;
691 udelay(10);
692 }
693
694 if (status != bit) {
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp, gnt + off, bit);
697 ret = -EBUSY;
698 }
699
700 return ret;
701 }
702
tg3_ape_unlock(struct tg3 * tp,int locknum)703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705 u32 gnt, bit;
706
707 if (!tg3_flag(tp, ENABLE_APE))
708 return;
709
710 switch (locknum) {
711 case TG3_APE_LOCK_GPIO:
712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713 return;
714 case TG3_APE_LOCK_GRC:
715 case TG3_APE_LOCK_MEM:
716 if (!tp->pci_fn)
717 bit = APE_LOCK_GRANT_DRIVER;
718 else
719 bit = 1 << tp->pci_fn;
720 break;
721 default:
722 return;
723 }
724
725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 gnt = TG3_APE_LOCK_GRANT;
727 else
728 gnt = TG3_APE_PER_LOCK_GRANT;
729
730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
tg3_ape_send_event(struct tg3 * tp,u32 event)733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735 int i;
736 u32 apedata;
737
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp, APE_HAS_NCSI))
740 return;
741
742 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 if (apedata != APE_SEG_SIG_MAGIC)
744 return;
745
746 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 if (!(apedata & APE_FW_STATUS_READY))
748 return;
749
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i = 0; i < 10; i++) {
752 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753 return;
754
755 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 event | APE_EVENT_STATUS_EVENT_PENDING);
760
761 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764 break;
765
766 udelay(100);
767 }
768
769 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
tg3_ape_driver_state_change(struct tg3 * tp,int kind)773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775 u32 event;
776 u32 apedata;
777
778 if (!tg3_flag(tp, ENABLE_APE))
779 return;
780
781 switch (kind) {
782 case RESET_KIND_INIT:
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 APE_HOST_SEG_SIG_MAGIC);
785 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 APE_HOST_SEG_LEN_MAGIC);
787 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 APE_HOST_BEHAV_NO_PHYLOCK);
793 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 TG3_APE_HOST_DRVR_STATE_START);
795
796 event = APE_EVENT_STATUS_STATE_START;
797 break;
798 case RESET_KIND_SHUTDOWN:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
803 */
804 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806 if (device_may_wakeup(&tp->pdev->dev) &&
807 tg3_flag(tp, WOL_ENABLE)) {
808 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 TG3_APE_HOST_WOL_SPEED_AUTO);
810 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811 } else
812 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816 event = APE_EVENT_STATUS_STATE_UNLOAD;
817 break;
818 case RESET_KIND_SUSPEND:
819 event = APE_EVENT_STATUS_STATE_SUSPEND;
820 break;
821 default:
822 return;
823 }
824
825 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827 tg3_ape_send_event(tp, event);
828 }
829
tg3_disable_ints(struct tg3 * tp)830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832 int i;
833
834 tw32(TG3PCI_MISC_HOST_CTRL,
835 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 for (i = 0; i < tp->irq_max; i++)
837 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
tg3_enable_ints(struct tg3 * tp)840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842 int i;
843
844 tp->irq_sync = 0;
845 wmb();
846
847 tw32(TG3PCI_MISC_HOST_CTRL,
848 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 for (i = 0; i < tp->irq_cnt; i++) {
852 struct tg3_napi *tnapi = &tp->napi[i];
853
854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 if (tg3_flag(tp, 1SHOT_MSI))
856 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858 tp->coal_now |= tnapi->coal_now;
859 }
860
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp, TAGGED_STATUS) &&
863 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865 else
866 tw32(HOSTCC_MODE, tp->coal_now);
867
868 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
tg3_has_work(struct tg3_napi * tnapi)871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873 struct tg3 *tp = tnapi->tp;
874 struct tg3_hw_status *sblk = tnapi->hw_status;
875 unsigned int work_exists = 0;
876
877 /* check for phy events */
878 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 if (sblk->status & SD_STATUS_LINK_CHG)
880 work_exists = 1;
881 }
882 /* check for RX/TX work to do */
883 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885 work_exists = 1;
886
887 return work_exists;
888 }
889
890 /* tg3_int_reenable
891 * similar to tg3_enable_ints, but it accurately determines whether there
892 * is new work pending and can return without flushing the PIO write
893 * which reenables interrupts
894 */
tg3_int_reenable(struct tg3_napi * tnapi)895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897 struct tg3 *tp = tnapi->tp;
898
899 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900 mmiowb();
901
902 /* When doing tagged status, this work check is unnecessary.
903 * The last_tag we write above tells the chip which piece of
904 * work we've completed.
905 */
906 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907 tw32(HOSTCC_MODE, tp->coalesce_mode |
908 HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
tg3_switch_clocks(struct tg3 * tp)911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913 u32 clock_ctrl;
914 u32 orig_clock_ctrl;
915
916 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917 return;
918
919 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921 orig_clock_ctrl = clock_ctrl;
922 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923 CLOCK_CTRL_CLKRUN_OENABLE |
924 0x1f);
925 tp->pci_clock_ctrl = clock_ctrl;
926
927 if (tg3_flag(tp, 5705_PLUS)) {
928 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929 tw32_wait_f(TG3PCI_CLOCK_CTRL,
930 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931 }
932 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934 clock_ctrl |
935 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936 40);
937 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938 clock_ctrl | (CLOCK_CTRL_ALTCLK),
939 40);
940 }
941 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS 5000
945
tg3_readphy(struct tg3 * tp,int reg,u32 * val)946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948 u32 frame_val;
949 unsigned int loops;
950 int ret;
951
952 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953 tw32_f(MAC_MI_MODE,
954 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955 udelay(80);
956 }
957
958 *val = 0x0;
959
960 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961 MI_COM_PHY_ADDR_MASK);
962 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963 MI_COM_REG_ADDR_MASK);
964 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966 tw32_f(MAC_MI_COM, frame_val);
967
968 loops = PHY_BUSY_LOOPS;
969 while (loops != 0) {
970 udelay(10);
971 frame_val = tr32(MAC_MI_COM);
972
973 if ((frame_val & MI_COM_BUSY) == 0) {
974 udelay(5);
975 frame_val = tr32(MAC_MI_COM);
976 break;
977 }
978 loops -= 1;
979 }
980
981 ret = -EBUSY;
982 if (loops != 0) {
983 *val = frame_val & MI_COM_DATA_MASK;
984 ret = 0;
985 }
986
987 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988 tw32_f(MAC_MI_MODE, tp->mi_mode);
989 udelay(80);
990 }
991
992 return ret;
993 }
994
tg3_writephy(struct tg3 * tp,int reg,u32 val)995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997 u32 frame_val;
998 unsigned int loops;
999 int ret;
1000
1001 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003 return 0;
1004
1005 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006 tw32_f(MAC_MI_MODE,
1007 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008 udelay(80);
1009 }
1010
1011 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012 MI_COM_PHY_ADDR_MASK);
1013 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014 MI_COM_REG_ADDR_MASK);
1015 frame_val |= (val & MI_COM_DATA_MASK);
1016 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018 tw32_f(MAC_MI_COM, frame_val);
1019
1020 loops = PHY_BUSY_LOOPS;
1021 while (loops != 0) {
1022 udelay(10);
1023 frame_val = tr32(MAC_MI_COM);
1024 if ((frame_val & MI_COM_BUSY) == 0) {
1025 udelay(5);
1026 frame_val = tr32(MAC_MI_COM);
1027 break;
1028 }
1029 loops -= 1;
1030 }
1031
1032 ret = -EBUSY;
1033 if (loops != 0)
1034 ret = 0;
1035
1036 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 udelay(80);
1039 }
1040
1041 return ret;
1042 }
1043
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046 int err;
1047
1048 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049 if (err)
1050 goto done;
1051
1052 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053 if (err)
1054 goto done;
1055
1056 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058 if (err)
1059 goto done;
1060
1061 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064 return err;
1065 }
1066
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069 int err;
1070
1071 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072 if (err)
1073 goto done;
1074
1075 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076 if (err)
1077 goto done;
1078
1079 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081 if (err)
1082 goto done;
1083
1084 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087 return err;
1088 }
1089
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092 int err;
1093
1094 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095 if (!err)
1096 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098 return err;
1099 }
1100
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103 int err;
1104
1105 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106 if (!err)
1107 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109 return err;
1110 }
1111
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114 int err;
1115
1116 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118 MII_TG3_AUXCTL_SHDWSEL_MISC);
1119 if (!err)
1120 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122 return err;
1123 }
1124
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
tg3_bmcr_reset(struct tg3 * tp)1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144 u32 phy_control;
1145 int limit, err;
1146
1147 /* OK, reset it, and poll the BMCR_RESET bit until it
1148 * clears or we time out.
1149 */
1150 phy_control = BMCR_RESET;
1151 err = tg3_writephy(tp, MII_BMCR, phy_control);
1152 if (err != 0)
1153 return -EBUSY;
1154
1155 limit = 5000;
1156 while (limit--) {
1157 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158 if (err != 0)
1159 return -EBUSY;
1160
1161 if ((phy_control & BMCR_RESET) == 0) {
1162 udelay(40);
1163 break;
1164 }
1165 udelay(10);
1166 }
1167 if (limit < 0)
1168 return -EBUSY;
1169
1170 return 0;
1171 }
1172
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175 struct tg3 *tp = bp->priv;
1176 u32 val;
1177
1178 spin_lock_bh(&tp->lock);
1179
1180 if (tg3_readphy(tp, reg, &val))
1181 val = -EIO;
1182
1183 spin_unlock_bh(&tp->lock);
1184
1185 return val;
1186 }
1187
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190 struct tg3 *tp = bp->priv;
1191 u32 ret = 0;
1192
1193 spin_lock_bh(&tp->lock);
1194
1195 if (tg3_writephy(tp, reg, val))
1196 ret = -EIO;
1197
1198 spin_unlock_bh(&tp->lock);
1199
1200 return ret;
1201 }
1202
tg3_mdio_reset(struct mii_bus * bp)1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205 return 0;
1206 }
1207
tg3_mdio_config_5785(struct tg3 * tp)1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210 u32 val;
1211 struct phy_device *phydev;
1212
1213 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215 case PHY_ID_BCM50610:
1216 case PHY_ID_BCM50610M:
1217 val = MAC_PHYCFG2_50610_LED_MODES;
1218 break;
1219 case PHY_ID_BCMAC131:
1220 val = MAC_PHYCFG2_AC131_LED_MODES;
1221 break;
1222 case PHY_ID_RTL8211C:
1223 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224 break;
1225 case PHY_ID_RTL8201E:
1226 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227 break;
1228 default:
1229 return;
1230 }
1231
1232 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233 tw32(MAC_PHYCFG2, val);
1234
1235 val = tr32(MAC_PHYCFG1);
1236 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239 tw32(MAC_PHYCFG1, val);
1240
1241 return;
1242 }
1243
1244 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246 MAC_PHYCFG2_FMODE_MASK_MASK |
1247 MAC_PHYCFG2_GMODE_MASK_MASK |
1248 MAC_PHYCFG2_ACT_MASK_MASK |
1249 MAC_PHYCFG2_QUAL_MASK_MASK |
1250 MAC_PHYCFG2_INBAND_ENABLE;
1251
1252 tw32(MAC_PHYCFG2, val);
1253
1254 val = tr32(MAC_PHYCFG1);
1255 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262 }
1263 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265 tw32(MAC_PHYCFG1, val);
1266
1267 val = tr32(MAC_EXT_RGMII_MODE);
1268 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269 MAC_RGMII_MODE_RX_QUALITY |
1270 MAC_RGMII_MODE_RX_ACTIVITY |
1271 MAC_RGMII_MODE_RX_ENG_DET |
1272 MAC_RGMII_MODE_TX_ENABLE |
1273 MAC_RGMII_MODE_TX_LOWPWR |
1274 MAC_RGMII_MODE_TX_RESET);
1275 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277 val |= MAC_RGMII_MODE_RX_INT_B |
1278 MAC_RGMII_MODE_RX_QUALITY |
1279 MAC_RGMII_MODE_RX_ACTIVITY |
1280 MAC_RGMII_MODE_RX_ENG_DET;
1281 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282 val |= MAC_RGMII_MODE_TX_ENABLE |
1283 MAC_RGMII_MODE_TX_LOWPWR |
1284 MAC_RGMII_MODE_TX_RESET;
1285 }
1286 tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
tg3_mdio_start(struct tg3 * tp)1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292 tw32_f(MAC_MI_MODE, tp->mi_mode);
1293 udelay(80);
1294
1295 if (tg3_flag(tp, MDIOBUS_INITED) &&
1296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297 tg3_mdio_config_5785(tp);
1298 }
1299
tg3_mdio_init(struct tg3 * tp)1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302 int i;
1303 u32 reg;
1304 struct phy_device *phydev;
1305
1306 if (tg3_flag(tp, 5717_PLUS)) {
1307 u32 is_serdes;
1308
1309 tp->phy_addr = tp->pci_fn + 1;
1310
1311 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313 else
1314 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315 TG3_CPMU_PHY_STRAP_IS_SERDES;
1316 if (is_serdes)
1317 tp->phy_addr += 7;
1318 } else
1319 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321 tg3_mdio_start(tp);
1322
1323 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324 return 0;
1325
1326 tp->mdio_bus = mdiobus_alloc();
1327 if (tp->mdio_bus == NULL)
1328 return -ENOMEM;
1329
1330 tp->mdio_bus->name = "tg3 mdio bus";
1331 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333 tp->mdio_bus->priv = tp;
1334 tp->mdio_bus->parent = &tp->pdev->dev;
1335 tp->mdio_bus->read = &tg3_mdio_read;
1336 tp->mdio_bus->write = &tg3_mdio_write;
1337 tp->mdio_bus->reset = &tg3_mdio_reset;
1338 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339 tp->mdio_bus->irq = &tp->mdio_irq[0];
1340
1341 for (i = 0; i < PHY_MAX_ADDR; i++)
1342 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344 /* The bus registration will look for all the PHYs on the mdio bus.
1345 * Unfortunately, it does not ensure the PHY is powered up before
1346 * accessing the PHY ID registers. A chip reset is the
1347 * quickest way to bring the device back to an operational state..
1348 */
1349 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1350 tg3_bmcr_reset(tp);
1351
1352 i = mdiobus_register(tp->mdio_bus);
1353 if (i) {
1354 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355 mdiobus_free(tp->mdio_bus);
1356 return i;
1357 }
1358
1359 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361 if (!phydev || !phydev->drv) {
1362 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363 mdiobus_unregister(tp->mdio_bus);
1364 mdiobus_free(tp->mdio_bus);
1365 return -ENODEV;
1366 }
1367
1368 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369 case PHY_ID_BCM57780:
1370 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372 break;
1373 case PHY_ID_BCM50610:
1374 case PHY_ID_BCM50610M:
1375 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376 PHY_BRCM_RX_REFCLK_UNUSED |
1377 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385 /* fallthru */
1386 case PHY_ID_RTL8211C:
1387 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388 break;
1389 case PHY_ID_RTL8201E:
1390 case PHY_ID_BCMAC131:
1391 phydev->interface = PHY_INTERFACE_MODE_MII;
1392 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394 break;
1395 }
1396
1397 tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400 tg3_mdio_config_5785(tp);
1401
1402 return 0;
1403 }
1404
tg3_mdio_fini(struct tg3 * tp)1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407 if (tg3_flag(tp, MDIOBUS_INITED)) {
1408 tg3_flag_clear(tp, MDIOBUS_INITED);
1409 mdiobus_unregister(tp->mdio_bus);
1410 mdiobus_free(tp->mdio_bus);
1411 }
1412 }
1413
1414 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417 u32 val;
1418
1419 val = tr32(GRC_RX_CPU_EVENT);
1420 val |= GRC_RX_CPU_DRIVER_EVENT;
1421 tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423 tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431 int i;
1432 unsigned int delay_cnt;
1433 long time_remain;
1434
1435 /* If enough time has passed, no wait is necessary. */
1436 time_remain = (long)(tp->last_event_jiffies + 1 +
1437 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438 (long)jiffies;
1439 if (time_remain < 0)
1440 return;
1441
1442 /* Check if we can shorten the wait time. */
1443 delay_cnt = jiffies_to_usecs(time_remain);
1444 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446 delay_cnt = (delay_cnt >> 3) + 1;
1447
1448 for (i = 0; i < delay_cnt; i++) {
1449 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450 break;
1451 udelay(8);
1452 }
1453 }
1454
1455 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458 u32 reg;
1459 u32 val;
1460
1461 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462 return;
1463
1464 tg3_wait_for_event_ack(tp);
1465
1466 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470 val = 0;
1471 if (!tg3_readphy(tp, MII_BMCR, ®))
1472 val = reg << 16;
1473 if (!tg3_readphy(tp, MII_BMSR, ®))
1474 val |= (reg & 0xffff);
1475 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477 val = 0;
1478 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1479 val = reg << 16;
1480 if (!tg3_readphy(tp, MII_LPA, ®))
1481 val |= (reg & 0xffff);
1482 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484 val = 0;
1485 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1487 val = reg << 16;
1488 if (!tg3_readphy(tp, MII_STAT1000, ®))
1489 val |= (reg & 0xffff);
1490 }
1491 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1494 val = reg << 16;
1495 else
1496 val = 0;
1497 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499 tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506 /* Wait for RX cpu to ACK the previous event. */
1507 tg3_wait_for_event_ack(tp);
1508
1509 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511 tg3_generate_fw_event(tp);
1512
1513 /* Wait for RX cpu to ACK this event. */
1514 tg3_wait_for_event_ack(tp);
1515 }
1516 }
1517
1518 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525 switch (kind) {
1526 case RESET_KIND_INIT:
1527 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528 DRV_STATE_START);
1529 break;
1530
1531 case RESET_KIND_SHUTDOWN:
1532 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533 DRV_STATE_UNLOAD);
1534 break;
1535
1536 case RESET_KIND_SUSPEND:
1537 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538 DRV_STATE_SUSPEND);
1539 break;
1540
1541 default:
1542 break;
1543 }
1544 }
1545
1546 if (kind == RESET_KIND_INIT ||
1547 kind == RESET_KIND_SUSPEND)
1548 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555 switch (kind) {
1556 case RESET_KIND_INIT:
1557 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558 DRV_STATE_START_DONE);
1559 break;
1560
1561 case RESET_KIND_SHUTDOWN:
1562 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563 DRV_STATE_UNLOAD_DONE);
1564 break;
1565
1566 default:
1567 break;
1568 }
1569 }
1570
1571 if (kind == RESET_KIND_SHUTDOWN)
1572 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578 if (tg3_flag(tp, ENABLE_ASF)) {
1579 switch (kind) {
1580 case RESET_KIND_INIT:
1581 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582 DRV_STATE_START);
1583 break;
1584
1585 case RESET_KIND_SHUTDOWN:
1586 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587 DRV_STATE_UNLOAD);
1588 break;
1589
1590 case RESET_KIND_SUSPEND:
1591 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592 DRV_STATE_SUSPEND);
1593 break;
1594
1595 default:
1596 break;
1597 }
1598 }
1599 }
1600
tg3_poll_fw(struct tg3 * tp)1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603 int i;
1604 u32 val;
1605
1606 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607 /* Wait up to 20ms for init done. */
1608 for (i = 0; i < 200; i++) {
1609 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610 return 0;
1611 udelay(100);
1612 }
1613 return -ENODEV;
1614 }
1615
1616 /* Wait for firmware initialization to complete. */
1617 for (i = 0; i < 100000; i++) {
1618 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620 break;
1621 udelay(10);
1622 }
1623
1624 /* Chip might not be fitted with firmware. Some Sun onboard
1625 * parts are configured like that. So don't signal the timeout
1626 * of the above loop as an error, but do report the lack of
1627 * running firmware once.
1628 */
1629 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632 netdev_info(tp->dev, "No firmware running\n");
1633 }
1634
1635 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636 /* The 57765 A0 needs a little more
1637 * time to do some important work.
1638 */
1639 mdelay(10);
1640 }
1641
1642 return 0;
1643 }
1644
tg3_link_report(struct tg3 * tp)1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647 if (!netif_carrier_ok(tp->dev)) {
1648 netif_info(tp, link, tp->dev, "Link is down\n");
1649 tg3_ump_link_report(tp);
1650 } else if (netif_msg_link(tp)) {
1651 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652 (tp->link_config.active_speed == SPEED_1000 ?
1653 1000 :
1654 (tp->link_config.active_speed == SPEED_100 ?
1655 100 : 10)),
1656 (tp->link_config.active_duplex == DUPLEX_FULL ?
1657 "full" : "half"));
1658
1659 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661 "on" : "off",
1662 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663 "on" : "off");
1664
1665 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666 netdev_info(tp->dev, "EEE is %s\n",
1667 tp->setlpicnt ? "enabled" : "disabled");
1668
1669 tg3_ump_link_report(tp);
1670 }
1671 }
1672
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1673 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1674 {
1675 u16 miireg;
1676
1677 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678 miireg = ADVERTISE_1000XPAUSE;
1679 else if (flow_ctrl & FLOW_CTRL_TX)
1680 miireg = ADVERTISE_1000XPSE_ASYM;
1681 else if (flow_ctrl & FLOW_CTRL_RX)
1682 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683 else
1684 miireg = 0;
1685
1686 return miireg;
1687 }
1688
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1689 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1690 {
1691 u8 cap = 0;
1692
1693 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1694 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1695 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1696 if (lcladv & ADVERTISE_1000XPAUSE)
1697 cap = FLOW_CTRL_RX;
1698 if (rmtadv & ADVERTISE_1000XPAUSE)
1699 cap = FLOW_CTRL_TX;
1700 }
1701
1702 return cap;
1703 }
1704
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1705 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1706 {
1707 u8 autoneg;
1708 u8 flowctrl = 0;
1709 u32 old_rx_mode = tp->rx_mode;
1710 u32 old_tx_mode = tp->tx_mode;
1711
1712 if (tg3_flag(tp, USE_PHYLIB))
1713 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1714 else
1715 autoneg = tp->link_config.autoneg;
1716
1717 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1718 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1719 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1720 else
1721 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1722 } else
1723 flowctrl = tp->link_config.flowctrl;
1724
1725 tp->link_config.active_flowctrl = flowctrl;
1726
1727 if (flowctrl & FLOW_CTRL_RX)
1728 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1729 else
1730 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1731
1732 if (old_rx_mode != tp->rx_mode)
1733 tw32_f(MAC_RX_MODE, tp->rx_mode);
1734
1735 if (flowctrl & FLOW_CTRL_TX)
1736 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1737 else
1738 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1739
1740 if (old_tx_mode != tp->tx_mode)
1741 tw32_f(MAC_TX_MODE, tp->tx_mode);
1742 }
1743
tg3_adjust_link(struct net_device * dev)1744 static void tg3_adjust_link(struct net_device *dev)
1745 {
1746 u8 oldflowctrl, linkmesg = 0;
1747 u32 mac_mode, lcl_adv, rmt_adv;
1748 struct tg3 *tp = netdev_priv(dev);
1749 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1750
1751 spin_lock_bh(&tp->lock);
1752
1753 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1754 MAC_MODE_HALF_DUPLEX);
1755
1756 oldflowctrl = tp->link_config.active_flowctrl;
1757
1758 if (phydev->link) {
1759 lcl_adv = 0;
1760 rmt_adv = 0;
1761
1762 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1763 mac_mode |= MAC_MODE_PORT_MODE_MII;
1764 else if (phydev->speed == SPEED_1000 ||
1765 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1766 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767 else
1768 mac_mode |= MAC_MODE_PORT_MODE_MII;
1769
1770 if (phydev->duplex == DUPLEX_HALF)
1771 mac_mode |= MAC_MODE_HALF_DUPLEX;
1772 else {
1773 lcl_adv = mii_advertise_flowctrl(
1774 tp->link_config.flowctrl);
1775
1776 if (phydev->pause)
1777 rmt_adv = LPA_PAUSE_CAP;
1778 if (phydev->asym_pause)
1779 rmt_adv |= LPA_PAUSE_ASYM;
1780 }
1781
1782 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1783 } else
1784 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1785
1786 if (mac_mode != tp->mac_mode) {
1787 tp->mac_mode = mac_mode;
1788 tw32_f(MAC_MODE, tp->mac_mode);
1789 udelay(40);
1790 }
1791
1792 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1793 if (phydev->speed == SPEED_10)
1794 tw32(MAC_MI_STAT,
1795 MAC_MI_STAT_10MBPS_MODE |
1796 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1797 else
1798 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1799 }
1800
1801 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1802 tw32(MAC_TX_LENGTHS,
1803 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1804 (6 << TX_LENGTHS_IPG_SHIFT) |
1805 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1806 else
1807 tw32(MAC_TX_LENGTHS,
1808 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1809 (6 << TX_LENGTHS_IPG_SHIFT) |
1810 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1811
1812 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1813 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1814 phydev->speed != tp->link_config.active_speed ||
1815 phydev->duplex != tp->link_config.active_duplex ||
1816 oldflowctrl != tp->link_config.active_flowctrl)
1817 linkmesg = 1;
1818
1819 tp->link_config.active_speed = phydev->speed;
1820 tp->link_config.active_duplex = phydev->duplex;
1821
1822 spin_unlock_bh(&tp->lock);
1823
1824 if (linkmesg)
1825 tg3_link_report(tp);
1826 }
1827
tg3_phy_init(struct tg3 * tp)1828 static int tg3_phy_init(struct tg3 *tp)
1829 {
1830 struct phy_device *phydev;
1831
1832 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1833 return 0;
1834
1835 /* Bring the PHY back to a known state. */
1836 tg3_bmcr_reset(tp);
1837
1838 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1839
1840 /* Attach the MAC to the PHY. */
1841 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1842 phydev->dev_flags, phydev->interface);
1843 if (IS_ERR(phydev)) {
1844 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1845 return PTR_ERR(phydev);
1846 }
1847
1848 /* Mask with MAC supported features. */
1849 switch (phydev->interface) {
1850 case PHY_INTERFACE_MODE_GMII:
1851 case PHY_INTERFACE_MODE_RGMII:
1852 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1853 phydev->supported &= (PHY_GBIT_FEATURES |
1854 SUPPORTED_Pause |
1855 SUPPORTED_Asym_Pause);
1856 break;
1857 }
1858 /* fallthru */
1859 case PHY_INTERFACE_MODE_MII:
1860 phydev->supported &= (PHY_BASIC_FEATURES |
1861 SUPPORTED_Pause |
1862 SUPPORTED_Asym_Pause);
1863 break;
1864 default:
1865 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1866 return -EINVAL;
1867 }
1868
1869 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1870
1871 phydev->advertising = phydev->supported;
1872
1873 return 0;
1874 }
1875
tg3_phy_start(struct tg3 * tp)1876 static void tg3_phy_start(struct tg3 *tp)
1877 {
1878 struct phy_device *phydev;
1879
1880 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1881 return;
1882
1883 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1884
1885 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1886 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1887 phydev->speed = tp->link_config.orig_speed;
1888 phydev->duplex = tp->link_config.orig_duplex;
1889 phydev->autoneg = tp->link_config.orig_autoneg;
1890 phydev->advertising = tp->link_config.orig_advertising;
1891 }
1892
1893 phy_start(phydev);
1894
1895 phy_start_aneg(phydev);
1896 }
1897
tg3_phy_stop(struct tg3 * tp)1898 static void tg3_phy_stop(struct tg3 *tp)
1899 {
1900 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901 return;
1902
1903 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1904 }
1905
tg3_phy_fini(struct tg3 * tp)1906 static void tg3_phy_fini(struct tg3 *tp)
1907 {
1908 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1909 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1910 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1911 }
1912 }
1913
tg3_phy_set_extloopbk(struct tg3 * tp)1914 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1915 {
1916 int err;
1917 u32 val;
1918
1919 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1920 return 0;
1921
1922 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1923 /* Cannot do read-modify-write on 5401 */
1924 err = tg3_phy_auxctl_write(tp,
1925 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1926 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1927 0x4c20);
1928 goto done;
1929 }
1930
1931 err = tg3_phy_auxctl_read(tp,
1932 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1933 if (err)
1934 return err;
1935
1936 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1937 err = tg3_phy_auxctl_write(tp,
1938 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1939
1940 done:
1941 return err;
1942 }
1943
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)1944 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1945 {
1946 u32 phytest;
1947
1948 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1949 u32 phy;
1950
1951 tg3_writephy(tp, MII_TG3_FET_TEST,
1952 phytest | MII_TG3_FET_SHADOW_EN);
1953 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1954 if (enable)
1955 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1956 else
1957 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1958 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1959 }
1960 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1961 }
1962 }
1963
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)1964 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966 u32 reg;
1967
1968 if (!tg3_flag(tp, 5705_PLUS) ||
1969 (tg3_flag(tp, 5717_PLUS) &&
1970 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1971 return;
1972
1973 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1974 tg3_phy_fet_toggle_apd(tp, enable);
1975 return;
1976 }
1977
1978 reg = MII_TG3_MISC_SHDW_WREN |
1979 MII_TG3_MISC_SHDW_SCR5_SEL |
1980 MII_TG3_MISC_SHDW_SCR5_LPED |
1981 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1982 MII_TG3_MISC_SHDW_SCR5_SDTL |
1983 MII_TG3_MISC_SHDW_SCR5_C125OE;
1984 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1985 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1986
1987 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1988
1989
1990 reg = MII_TG3_MISC_SHDW_WREN |
1991 MII_TG3_MISC_SHDW_APD_SEL |
1992 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1993 if (enable)
1994 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1995
1996 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1997 }
1998
tg3_phy_toggle_automdix(struct tg3 * tp,int enable)1999 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2000 {
2001 u32 phy;
2002
2003 if (!tg3_flag(tp, 5705_PLUS) ||
2004 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2005 return;
2006
2007 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2008 u32 ephy;
2009
2010 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2011 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2012
2013 tg3_writephy(tp, MII_TG3_FET_TEST,
2014 ephy | MII_TG3_FET_SHADOW_EN);
2015 if (!tg3_readphy(tp, reg, &phy)) {
2016 if (enable)
2017 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2018 else
2019 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2020 tg3_writephy(tp, reg, phy);
2021 }
2022 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2023 }
2024 } else {
2025 int ret;
2026
2027 ret = tg3_phy_auxctl_read(tp,
2028 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2029 if (!ret) {
2030 if (enable)
2031 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2032 else
2033 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2034 tg3_phy_auxctl_write(tp,
2035 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2036 }
2037 }
2038 }
2039
tg3_phy_set_wirespeed(struct tg3 * tp)2040 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2041 {
2042 int ret;
2043 u32 val;
2044
2045 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2046 return;
2047
2048 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2049 if (!ret)
2050 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2051 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2052 }
2053
tg3_phy_apply_otp(struct tg3 * tp)2054 static void tg3_phy_apply_otp(struct tg3 *tp)
2055 {
2056 u32 otp, phy;
2057
2058 if (!tp->phy_otp)
2059 return;
2060
2061 otp = tp->phy_otp;
2062
2063 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2064 return;
2065
2066 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2067 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2068 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2069
2070 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2071 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2072 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2073
2074 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2075 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2076 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2077
2078 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2079 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2080
2081 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2082 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2083
2084 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2085 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2086 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2087
2088 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 }
2090
tg3_phy_eee_adjust(struct tg3 * tp,u32 current_link_up)2091 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2092 {
2093 u32 val;
2094
2095 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2096 return;
2097
2098 tp->setlpicnt = 0;
2099
2100 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2101 current_link_up == 1 &&
2102 tp->link_config.active_duplex == DUPLEX_FULL &&
2103 (tp->link_config.active_speed == SPEED_100 ||
2104 tp->link_config.active_speed == SPEED_1000)) {
2105 u32 eeectl;
2106
2107 if (tp->link_config.active_speed == SPEED_1000)
2108 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2109 else
2110 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2111
2112 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2113
2114 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2115 TG3_CL45_D7_EEERES_STAT, &val);
2116
2117 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2118 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2119 tp->setlpicnt = 2;
2120 }
2121
2122 if (!tp->setlpicnt) {
2123 if (current_link_up == 1 &&
2124 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2126 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2127 }
2128
2129 val = tr32(TG3_CPMU_EEE_MODE);
2130 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2131 }
2132 }
2133
tg3_phy_eee_enable(struct tg3 * tp)2134 static void tg3_phy_eee_enable(struct tg3 *tp)
2135 {
2136 u32 val;
2137
2138 if (tp->link_config.active_speed == SPEED_1000 &&
2139 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2140 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2141 tg3_flag(tp, 57765_CLASS)) &&
2142 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143 val = MII_TG3_DSP_TAP26_ALNOKO |
2144 MII_TG3_DSP_TAP26_RMRXSTO;
2145 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2146 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147 }
2148
2149 val = tr32(TG3_CPMU_EEE_MODE);
2150 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2151 }
2152
tg3_wait_macro_done(struct tg3 * tp)2153 static int tg3_wait_macro_done(struct tg3 *tp)
2154 {
2155 int limit = 100;
2156
2157 while (limit--) {
2158 u32 tmp32;
2159
2160 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2161 if ((tmp32 & 0x1000) == 0)
2162 break;
2163 }
2164 }
2165 if (limit < 0)
2166 return -EBUSY;
2167
2168 return 0;
2169 }
2170
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2171 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2172 {
2173 static const u32 test_pat[4][6] = {
2174 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2178 };
2179 int chan;
2180
2181 for (chan = 0; chan < 4; chan++) {
2182 int i;
2183
2184 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2185 (chan * 0x2000) | 0x0200);
2186 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2187
2188 for (i = 0; i < 6; i++)
2189 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2190 test_pat[chan][i]);
2191
2192 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2193 if (tg3_wait_macro_done(tp)) {
2194 *resetp = 1;
2195 return -EBUSY;
2196 }
2197
2198 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2199 (chan * 0x2000) | 0x0200);
2200 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2201 if (tg3_wait_macro_done(tp)) {
2202 *resetp = 1;
2203 return -EBUSY;
2204 }
2205
2206 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2207 if (tg3_wait_macro_done(tp)) {
2208 *resetp = 1;
2209 return -EBUSY;
2210 }
2211
2212 for (i = 0; i < 6; i += 2) {
2213 u32 low, high;
2214
2215 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2216 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2217 tg3_wait_macro_done(tp)) {
2218 *resetp = 1;
2219 return -EBUSY;
2220 }
2221 low &= 0x7fff;
2222 high &= 0x000f;
2223 if (low != test_pat[chan][i] ||
2224 high != test_pat[chan][i+1]) {
2225 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2226 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2227 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2228
2229 return -EBUSY;
2230 }
2231 }
2232 }
2233
2234 return 0;
2235 }
2236
tg3_phy_reset_chanpat(struct tg3 * tp)2237 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2238 {
2239 int chan;
2240
2241 for (chan = 0; chan < 4; chan++) {
2242 int i;
2243
2244 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2245 (chan * 0x2000) | 0x0200);
2246 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2247 for (i = 0; i < 6; i++)
2248 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2249 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2250 if (tg3_wait_macro_done(tp))
2251 return -EBUSY;
2252 }
2253
2254 return 0;
2255 }
2256
tg3_phy_reset_5703_4_5(struct tg3 * tp)2257 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2258 {
2259 u32 reg32, phy9_orig;
2260 int retries, do_phy_reset, err;
2261
2262 retries = 10;
2263 do_phy_reset = 1;
2264 do {
2265 if (do_phy_reset) {
2266 err = tg3_bmcr_reset(tp);
2267 if (err)
2268 return err;
2269 do_phy_reset = 0;
2270 }
2271
2272 /* Disable transmitter and interrupt. */
2273 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2274 continue;
2275
2276 reg32 |= 0x3000;
2277 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2278
2279 /* Set full-duplex, 1000 mbps. */
2280 tg3_writephy(tp, MII_BMCR,
2281 BMCR_FULLDPLX | BMCR_SPEED1000);
2282
2283 /* Set to master mode. */
2284 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2285 continue;
2286
2287 tg3_writephy(tp, MII_CTRL1000,
2288 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2289
2290 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2291 if (err)
2292 return err;
2293
2294 /* Block the PHY control access. */
2295 tg3_phydsp_write(tp, 0x8005, 0x0800);
2296
2297 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2298 if (!err)
2299 break;
2300 } while (--retries);
2301
2302 err = tg3_phy_reset_chanpat(tp);
2303 if (err)
2304 return err;
2305
2306 tg3_phydsp_write(tp, 0x8005, 0x0000);
2307
2308 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2309 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2310
2311 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312
2313 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2314
2315 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2316 reg32 &= ~0x3000;
2317 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2318 } else if (!err)
2319 err = -EBUSY;
2320
2321 return err;
2322 }
2323
2324 /* This will reset the tigon3 PHY if there is no valid
2325 * link unless the FORCE argument is non-zero.
2326 */
tg3_phy_reset(struct tg3 * tp)2327 static int tg3_phy_reset(struct tg3 *tp)
2328 {
2329 u32 val, cpmuctrl;
2330 int err;
2331
2332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2333 val = tr32(GRC_MISC_CFG);
2334 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2335 udelay(40);
2336 }
2337 err = tg3_readphy(tp, MII_BMSR, &val);
2338 err |= tg3_readphy(tp, MII_BMSR, &val);
2339 if (err != 0)
2340 return -EBUSY;
2341
2342 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2343 netif_carrier_off(tp->dev);
2344 tg3_link_report(tp);
2345 }
2346
2347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2350 err = tg3_phy_reset_5703_4_5(tp);
2351 if (err)
2352 return err;
2353 goto out;
2354 }
2355
2356 cpmuctrl = 0;
2357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2358 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2359 cpmuctrl = tr32(TG3_CPMU_CTRL);
2360 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2361 tw32(TG3_CPMU_CTRL,
2362 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2363 }
2364
2365 err = tg3_bmcr_reset(tp);
2366 if (err)
2367 return err;
2368
2369 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2370 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2371 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2372
2373 tw32(TG3_CPMU_CTRL, cpmuctrl);
2374 }
2375
2376 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2377 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2378 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2379 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2380 CPMU_LSPD_1000MB_MACCLK_12_5) {
2381 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2382 udelay(40);
2383 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2384 }
2385 }
2386
2387 if (tg3_flag(tp, 5717_PLUS) &&
2388 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2389 return 0;
2390
2391 tg3_phy_apply_otp(tp);
2392
2393 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2394 tg3_phy_toggle_apd(tp, true);
2395 else
2396 tg3_phy_toggle_apd(tp, false);
2397
2398 out:
2399 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2400 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2401 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2402 tg3_phydsp_write(tp, 0x000a, 0x0323);
2403 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2404 }
2405
2406 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2407 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2408 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2409 }
2410
2411 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2412 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2413 tg3_phydsp_write(tp, 0x000a, 0x310b);
2414 tg3_phydsp_write(tp, 0x201f, 0x9506);
2415 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2416 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417 }
2418 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2419 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2420 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2421 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2422 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2423 tg3_writephy(tp, MII_TG3_TEST1,
2424 MII_TG3_TEST1_TRIM_EN | 0x4);
2425 } else
2426 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2427
2428 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429 }
2430 }
2431
2432 /* Set Extended packet length bit (bit 14) on all chips that */
2433 /* support jumbo frames */
2434 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2435 /* Cannot do read-modify-write on 5401 */
2436 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2437 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2438 /* Set bit 14 with read-modify-write to preserve other bits */
2439 err = tg3_phy_auxctl_read(tp,
2440 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2441 if (!err)
2442 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2443 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2444 }
2445
2446 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447 * jumbo frames transmission.
2448 */
2449 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2450 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2451 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2452 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2453 }
2454
2455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2456 /* adjust output voltage */
2457 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2458 }
2459
2460 tg3_phy_toggle_automdix(tp, 1);
2461 tg3_phy_set_wirespeed(tp);
2462 return 0;
2463 }
2464
2465 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2467 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2468 TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473 (TG3_GPIO_MSG_DRVR_PRES << 12))
2474
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479 (TG3_GPIO_MSG_NEED_VAUX << 12))
2480
tg3_set_function_status(struct tg3 * tp,u32 newstat)2481 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2482 {
2483 u32 status, shift;
2484
2485 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2486 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2487 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2488 else
2489 status = tr32(TG3_CPMU_DRV_STATUS);
2490
2491 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2492 status &= ~(TG3_GPIO_MSG_MASK << shift);
2493 status |= (newstat << shift);
2494
2495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2498 else
2499 tw32(TG3_CPMU_DRV_STATUS, status);
2500
2501 return status >> TG3_APE_GPIO_MSG_SHIFT;
2502 }
2503
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2505 {
2506 if (!tg3_flag(tp, IS_NIC))
2507 return 0;
2508
2509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2512 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2513 return -EIO;
2514
2515 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2516
2517 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2518 TG3_GRC_LCLCTL_PWRSW_DELAY);
2519
2520 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2521 } else {
2522 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2523 TG3_GRC_LCLCTL_PWRSW_DELAY);
2524 }
2525
2526 return 0;
2527 }
2528
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2529 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2530 {
2531 u32 grc_local_ctrl;
2532
2533 if (!tg3_flag(tp, IS_NIC) ||
2534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2535 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2536 return;
2537
2538 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2539
2540 tw32_wait_f(GRC_LOCAL_CTRL,
2541 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2542 TG3_GRC_LCLCTL_PWRSW_DELAY);
2543
2544 tw32_wait_f(GRC_LOCAL_CTRL,
2545 grc_local_ctrl,
2546 TG3_GRC_LCLCTL_PWRSW_DELAY);
2547
2548 tw32_wait_f(GRC_LOCAL_CTRL,
2549 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2550 TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 }
2552
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2554 {
2555 if (!tg3_flag(tp, IS_NIC))
2556 return;
2557
2558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2560 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2561 (GRC_LCLCTRL_GPIO_OE0 |
2562 GRC_LCLCTRL_GPIO_OE1 |
2563 GRC_LCLCTRL_GPIO_OE2 |
2564 GRC_LCLCTRL_GPIO_OUTPUT0 |
2565 GRC_LCLCTRL_GPIO_OUTPUT1),
2566 TG3_GRC_LCLCTL_PWRSW_DELAY);
2567 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2568 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2569 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2571 GRC_LCLCTRL_GPIO_OE1 |
2572 GRC_LCLCTRL_GPIO_OE2 |
2573 GRC_LCLCTRL_GPIO_OUTPUT0 |
2574 GRC_LCLCTRL_GPIO_OUTPUT1 |
2575 tp->grc_local_ctrl;
2576 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2577 TG3_GRC_LCLCTL_PWRSW_DELAY);
2578
2579 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2580 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2581 TG3_GRC_LCLCTL_PWRSW_DELAY);
2582
2583 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2584 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2585 TG3_GRC_LCLCTL_PWRSW_DELAY);
2586 } else {
2587 u32 no_gpio2;
2588 u32 grc_local_ctrl = 0;
2589
2590 /* Workaround to prevent overdrawing Amps. */
2591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2592 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2593 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2594 grc_local_ctrl,
2595 TG3_GRC_LCLCTL_PWRSW_DELAY);
2596 }
2597
2598 /* On 5753 and variants, GPIO2 cannot be used. */
2599 no_gpio2 = tp->nic_sram_data_cfg &
2600 NIC_SRAM_DATA_CFG_NO_GPIO2;
2601
2602 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2603 GRC_LCLCTRL_GPIO_OE1 |
2604 GRC_LCLCTRL_GPIO_OE2 |
2605 GRC_LCLCTRL_GPIO_OUTPUT1 |
2606 GRC_LCLCTRL_GPIO_OUTPUT2;
2607 if (no_gpio2) {
2608 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2609 GRC_LCLCTRL_GPIO_OUTPUT2);
2610 }
2611 tw32_wait_f(GRC_LOCAL_CTRL,
2612 tp->grc_local_ctrl | grc_local_ctrl,
2613 TG3_GRC_LCLCTL_PWRSW_DELAY);
2614
2615 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2616
2617 tw32_wait_f(GRC_LOCAL_CTRL,
2618 tp->grc_local_ctrl | grc_local_ctrl,
2619 TG3_GRC_LCLCTL_PWRSW_DELAY);
2620
2621 if (!no_gpio2) {
2622 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2623 tw32_wait_f(GRC_LOCAL_CTRL,
2624 tp->grc_local_ctrl | grc_local_ctrl,
2625 TG3_GRC_LCLCTL_PWRSW_DELAY);
2626 }
2627 }
2628 }
2629
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2630 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2631 {
2632 u32 msg = 0;
2633
2634 /* Serialize power state transitions */
2635 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2636 return;
2637
2638 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2639 msg = TG3_GPIO_MSG_NEED_VAUX;
2640
2641 msg = tg3_set_function_status(tp, msg);
2642
2643 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2644 goto done;
2645
2646 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2647 tg3_pwrsrc_switch_to_vaux(tp);
2648 else
2649 tg3_pwrsrc_die_with_vmain(tp);
2650
2651 done:
2652 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2653 }
2654
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2655 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2656 {
2657 bool need_vaux = false;
2658
2659 /* The GPIOs do something completely different on 57765. */
2660 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2661 return;
2662
2663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2665 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2666 tg3_frob_aux_power_5717(tp, include_wol ?
2667 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2668 return;
2669 }
2670
2671 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2672 struct net_device *dev_peer;
2673
2674 dev_peer = pci_get_drvdata(tp->pdev_peer);
2675
2676 /* remove_one() may have been run on the peer. */
2677 if (dev_peer) {
2678 struct tg3 *tp_peer = netdev_priv(dev_peer);
2679
2680 if (tg3_flag(tp_peer, INIT_COMPLETE))
2681 return;
2682
2683 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2684 tg3_flag(tp_peer, ENABLE_ASF))
2685 need_vaux = true;
2686 }
2687 }
2688
2689 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2690 tg3_flag(tp, ENABLE_ASF))
2691 need_vaux = true;
2692
2693 if (need_vaux)
2694 tg3_pwrsrc_switch_to_vaux(tp);
2695 else
2696 tg3_pwrsrc_die_with_vmain(tp);
2697 }
2698
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)2699 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2700 {
2701 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2702 return 1;
2703 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2704 if (speed != SPEED_10)
2705 return 1;
2706 } else if (speed == SPEED_10)
2707 return 1;
2708
2709 return 0;
2710 }
2711
2712 static int tg3_setup_phy(struct tg3 *, int);
2713 static int tg3_halt_cpu(struct tg3 *, u32);
2714
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)2715 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2716 {
2717 u32 val;
2718
2719 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2721 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2722 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2723
2724 sg_dig_ctrl |=
2725 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2726 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2727 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2728 }
2729 return;
2730 }
2731
2732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2733 tg3_bmcr_reset(tp);
2734 val = tr32(GRC_MISC_CFG);
2735 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2736 udelay(40);
2737 return;
2738 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2739 u32 phytest;
2740 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2741 u32 phy;
2742
2743 tg3_writephy(tp, MII_ADVERTISE, 0);
2744 tg3_writephy(tp, MII_BMCR,
2745 BMCR_ANENABLE | BMCR_ANRESTART);
2746
2747 tg3_writephy(tp, MII_TG3_FET_TEST,
2748 phytest | MII_TG3_FET_SHADOW_EN);
2749 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2750 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2751 tg3_writephy(tp,
2752 MII_TG3_FET_SHDW_AUXMODE4,
2753 phy);
2754 }
2755 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2756 }
2757 return;
2758 } else if (do_low_power) {
2759 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2760 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2761
2762 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2763 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2764 MII_TG3_AUXCTL_PCTL_VREG_11V;
2765 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2766 }
2767
2768 /* The PHY should not be powered down on some chips because
2769 * of bugs.
2770 */
2771 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2772 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2773 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2774 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2775 return;
2776
2777 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2778 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2779 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2780 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2781 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2782 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2783 }
2784
2785 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2786 }
2787
2788 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)2789 static int tg3_nvram_lock(struct tg3 *tp)
2790 {
2791 if (tg3_flag(tp, NVRAM)) {
2792 int i;
2793
2794 if (tp->nvram_lock_cnt == 0) {
2795 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2796 for (i = 0; i < 8000; i++) {
2797 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2798 break;
2799 udelay(20);
2800 }
2801 if (i == 8000) {
2802 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2803 return -ENODEV;
2804 }
2805 }
2806 tp->nvram_lock_cnt++;
2807 }
2808 return 0;
2809 }
2810
2811 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)2812 static void tg3_nvram_unlock(struct tg3 *tp)
2813 {
2814 if (tg3_flag(tp, NVRAM)) {
2815 if (tp->nvram_lock_cnt > 0)
2816 tp->nvram_lock_cnt--;
2817 if (tp->nvram_lock_cnt == 0)
2818 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2819 }
2820 }
2821
2822 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)2823 static void tg3_enable_nvram_access(struct tg3 *tp)
2824 {
2825 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2826 u32 nvaccess = tr32(NVRAM_ACCESS);
2827
2828 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2829 }
2830 }
2831
2832 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)2833 static void tg3_disable_nvram_access(struct tg3 *tp)
2834 {
2835 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2836 u32 nvaccess = tr32(NVRAM_ACCESS);
2837
2838 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2839 }
2840 }
2841
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)2842 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2843 u32 offset, u32 *val)
2844 {
2845 u32 tmp;
2846 int i;
2847
2848 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2849 return -EINVAL;
2850
2851 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2852 EEPROM_ADDR_DEVID_MASK |
2853 EEPROM_ADDR_READ);
2854 tw32(GRC_EEPROM_ADDR,
2855 tmp |
2856 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2857 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2858 EEPROM_ADDR_ADDR_MASK) |
2859 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2860
2861 for (i = 0; i < 1000; i++) {
2862 tmp = tr32(GRC_EEPROM_ADDR);
2863
2864 if (tmp & EEPROM_ADDR_COMPLETE)
2865 break;
2866 msleep(1);
2867 }
2868 if (!(tmp & EEPROM_ADDR_COMPLETE))
2869 return -EBUSY;
2870
2871 tmp = tr32(GRC_EEPROM_DATA);
2872
2873 /*
2874 * The data will always be opposite the native endian
2875 * format. Perform a blind byteswap to compensate.
2876 */
2877 *val = swab32(tmp);
2878
2879 return 0;
2880 }
2881
2882 #define NVRAM_CMD_TIMEOUT 10000
2883
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)2884 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2885 {
2886 int i;
2887
2888 tw32(NVRAM_CMD, nvram_cmd);
2889 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2890 udelay(10);
2891 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2892 udelay(10);
2893 break;
2894 }
2895 }
2896
2897 if (i == NVRAM_CMD_TIMEOUT)
2898 return -EBUSY;
2899
2900 return 0;
2901 }
2902
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)2903 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2904 {
2905 if (tg3_flag(tp, NVRAM) &&
2906 tg3_flag(tp, NVRAM_BUFFERED) &&
2907 tg3_flag(tp, FLASH) &&
2908 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2909 (tp->nvram_jedecnum == JEDEC_ATMEL))
2910
2911 addr = ((addr / tp->nvram_pagesize) <<
2912 ATMEL_AT45DB0X1B_PAGE_POS) +
2913 (addr % tp->nvram_pagesize);
2914
2915 return addr;
2916 }
2917
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)2918 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2919 {
2920 if (tg3_flag(tp, NVRAM) &&
2921 tg3_flag(tp, NVRAM_BUFFERED) &&
2922 tg3_flag(tp, FLASH) &&
2923 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2924 (tp->nvram_jedecnum == JEDEC_ATMEL))
2925
2926 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2927 tp->nvram_pagesize) +
2928 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2929
2930 return addr;
2931 }
2932
2933 /* NOTE: Data read in from NVRAM is byteswapped according to
2934 * the byteswapping settings for all other register accesses.
2935 * tg3 devices are BE devices, so on a BE machine, the data
2936 * returned will be exactly as it is seen in NVRAM. On a LE
2937 * machine, the 32-bit value will be byteswapped.
2938 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)2939 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2940 {
2941 int ret;
2942
2943 if (!tg3_flag(tp, NVRAM))
2944 return tg3_nvram_read_using_eeprom(tp, offset, val);
2945
2946 offset = tg3_nvram_phys_addr(tp, offset);
2947
2948 if (offset > NVRAM_ADDR_MSK)
2949 return -EINVAL;
2950
2951 ret = tg3_nvram_lock(tp);
2952 if (ret)
2953 return ret;
2954
2955 tg3_enable_nvram_access(tp);
2956
2957 tw32(NVRAM_ADDR, offset);
2958 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2959 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2960
2961 if (ret == 0)
2962 *val = tr32(NVRAM_RDDATA);
2963
2964 tg3_disable_nvram_access(tp);
2965
2966 tg3_nvram_unlock(tp);
2967
2968 return ret;
2969 }
2970
2971 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)2972 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2973 {
2974 u32 v;
2975 int res = tg3_nvram_read(tp, offset, &v);
2976 if (!res)
2977 *val = cpu_to_be32(v);
2978 return res;
2979 }
2980
2981 #define RX_CPU_SCRATCH_BASE 0x30000
2982 #define RX_CPU_SCRATCH_SIZE 0x04000
2983 #define TX_CPU_SCRATCH_BASE 0x34000
2984 #define TX_CPU_SCRATCH_SIZE 0x04000
2985
2986 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 offset)2987 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
2988 {
2989 int i;
2990
2991 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
2992
2993 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2994 u32 val = tr32(GRC_VCPU_EXT_CTRL);
2995
2996 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
2997 return 0;
2998 }
2999 if (offset == RX_CPU_BASE) {
3000 for (i = 0; i < 10000; i++) {
3001 tw32(offset + CPU_STATE, 0xffffffff);
3002 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3003 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3004 break;
3005 }
3006
3007 tw32(offset + CPU_STATE, 0xffffffff);
3008 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3009 udelay(10);
3010 } else {
3011 for (i = 0; i < 10000; i++) {
3012 tw32(offset + CPU_STATE, 0xffffffff);
3013 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3014 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3015 break;
3016 }
3017 }
3018
3019 if (i >= 10000) {
3020 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3021 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3022 return -ENODEV;
3023 }
3024
3025 /* Clear firmware's nvram arbitration. */
3026 if (tg3_flag(tp, NVRAM))
3027 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3028 return 0;
3029 }
3030
3031 struct fw_info {
3032 unsigned int fw_base;
3033 unsigned int fw_len;
3034 const __be32 *fw_data;
3035 };
3036
3037 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,struct fw_info * info)3038 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3039 u32 cpu_scratch_base, int cpu_scratch_size,
3040 struct fw_info *info)
3041 {
3042 int err, lock_err, i;
3043 void (*write_op)(struct tg3 *, u32, u32);
3044
3045 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3046 netdev_err(tp->dev,
3047 "%s: Trying to load TX cpu firmware which is 5705\n",
3048 __func__);
3049 return -EINVAL;
3050 }
3051
3052 if (tg3_flag(tp, 5705_PLUS))
3053 write_op = tg3_write_mem;
3054 else
3055 write_op = tg3_write_indirect_reg32;
3056
3057 /* It is possible that bootcode is still loading at this point.
3058 * Get the nvram lock first before halting the cpu.
3059 */
3060 lock_err = tg3_nvram_lock(tp);
3061 err = tg3_halt_cpu(tp, cpu_base);
3062 if (!lock_err)
3063 tg3_nvram_unlock(tp);
3064 if (err)
3065 goto out;
3066
3067 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3068 write_op(tp, cpu_scratch_base + i, 0);
3069 tw32(cpu_base + CPU_STATE, 0xffffffff);
3070 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3071 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3072 write_op(tp, (cpu_scratch_base +
3073 (info->fw_base & 0xffff) +
3074 (i * sizeof(u32))),
3075 be32_to_cpu(info->fw_data[i]));
3076
3077 err = 0;
3078
3079 out:
3080 return err;
3081 }
3082
3083 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3084 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3085 {
3086 struct fw_info info;
3087 const __be32 *fw_data;
3088 int err, i;
3089
3090 fw_data = (void *)tp->fw->data;
3091
3092 /* Firmware blob starts with version numbers, followed by
3093 start address and length. We are setting complete length.
3094 length = end_address_of_bss - start_address_of_text.
3095 Remainder is the blob to be loaded contiguously
3096 from start address. */
3097
3098 info.fw_base = be32_to_cpu(fw_data[1]);
3099 info.fw_len = tp->fw->size - 12;
3100 info.fw_data = &fw_data[3];
3101
3102 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3103 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3104 &info);
3105 if (err)
3106 return err;
3107
3108 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3109 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3110 &info);
3111 if (err)
3112 return err;
3113
3114 /* Now startup only the RX cpu. */
3115 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3116 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3117
3118 for (i = 0; i < 5; i++) {
3119 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3120 break;
3121 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3122 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3123 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3124 udelay(1000);
3125 }
3126 if (i >= 5) {
3127 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3128 "should be %08x\n", __func__,
3129 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3130 return -ENODEV;
3131 }
3132 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3133 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3134
3135 return 0;
3136 }
3137
3138 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3139 static int tg3_load_tso_firmware(struct tg3 *tp)
3140 {
3141 struct fw_info info;
3142 const __be32 *fw_data;
3143 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3144 int err, i;
3145
3146 if (tg3_flag(tp, HW_TSO_1) ||
3147 tg3_flag(tp, HW_TSO_2) ||
3148 tg3_flag(tp, HW_TSO_3))
3149 return 0;
3150
3151 fw_data = (void *)tp->fw->data;
3152
3153 /* Firmware blob starts with version numbers, followed by
3154 start address and length. We are setting complete length.
3155 length = end_address_of_bss - start_address_of_text.
3156 Remainder is the blob to be loaded contiguously
3157 from start address. */
3158
3159 info.fw_base = be32_to_cpu(fw_data[1]);
3160 cpu_scratch_size = tp->fw_len;
3161 info.fw_len = tp->fw->size - 12;
3162 info.fw_data = &fw_data[3];
3163
3164 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3165 cpu_base = RX_CPU_BASE;
3166 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3167 } else {
3168 cpu_base = TX_CPU_BASE;
3169 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3170 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3171 }
3172
3173 err = tg3_load_firmware_cpu(tp, cpu_base,
3174 cpu_scratch_base, cpu_scratch_size,
3175 &info);
3176 if (err)
3177 return err;
3178
3179 /* Now startup the cpu. */
3180 tw32(cpu_base + CPU_STATE, 0xffffffff);
3181 tw32_f(cpu_base + CPU_PC, info.fw_base);
3182
3183 for (i = 0; i < 5; i++) {
3184 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3185 break;
3186 tw32(cpu_base + CPU_STATE, 0xffffffff);
3187 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3188 tw32_f(cpu_base + CPU_PC, info.fw_base);
3189 udelay(1000);
3190 }
3191 if (i >= 5) {
3192 netdev_err(tp->dev,
3193 "%s fails to set CPU PC, is %08x should be %08x\n",
3194 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3195 return -ENODEV;
3196 }
3197 tw32(cpu_base + CPU_STATE, 0xffffffff);
3198 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3199 return 0;
3200 }
3201
3202
3203 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,int skip_mac_1)3204 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3205 {
3206 u32 addr_high, addr_low;
3207 int i;
3208
3209 addr_high = ((tp->dev->dev_addr[0] << 8) |
3210 tp->dev->dev_addr[1]);
3211 addr_low = ((tp->dev->dev_addr[2] << 24) |
3212 (tp->dev->dev_addr[3] << 16) |
3213 (tp->dev->dev_addr[4] << 8) |
3214 (tp->dev->dev_addr[5] << 0));
3215 for (i = 0; i < 4; i++) {
3216 if (i == 1 && skip_mac_1)
3217 continue;
3218 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3219 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3220 }
3221
3222 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3224 for (i = 0; i < 12; i++) {
3225 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3226 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3227 }
3228 }
3229
3230 addr_high = (tp->dev->dev_addr[0] +
3231 tp->dev->dev_addr[1] +
3232 tp->dev->dev_addr[2] +
3233 tp->dev->dev_addr[3] +
3234 tp->dev->dev_addr[4] +
3235 tp->dev->dev_addr[5]) &
3236 TX_BACKOFF_SEED_MASK;
3237 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3238 }
3239
tg3_enable_register_access(struct tg3 * tp)3240 static void tg3_enable_register_access(struct tg3 *tp)
3241 {
3242 /*
3243 * Make sure register accesses (indirect or otherwise) will function
3244 * correctly.
3245 */
3246 pci_write_config_dword(tp->pdev,
3247 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3248 }
3249
tg3_power_up(struct tg3 * tp)3250 static int tg3_power_up(struct tg3 *tp)
3251 {
3252 int err;
3253
3254 tg3_enable_register_access(tp);
3255
3256 err = pci_set_power_state(tp->pdev, PCI_D0);
3257 if (!err) {
3258 /* Switch out of Vaux if it is a NIC */
3259 tg3_pwrsrc_switch_to_vmain(tp);
3260 } else {
3261 netdev_err(tp->dev, "Transition to D0 failed\n");
3262 }
3263
3264 return err;
3265 }
3266
tg3_power_down_prepare(struct tg3 * tp)3267 static int tg3_power_down_prepare(struct tg3 *tp)
3268 {
3269 u32 misc_host_ctrl;
3270 bool device_should_wake, do_low_power;
3271
3272 tg3_enable_register_access(tp);
3273
3274 /* Restore the CLKREQ setting. */
3275 if (tg3_flag(tp, CLKREQ_BUG)) {
3276 u16 lnkctl;
3277
3278 pci_read_config_word(tp->pdev,
3279 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3280 &lnkctl);
3281 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3282 pci_write_config_word(tp->pdev,
3283 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3284 lnkctl);
3285 }
3286
3287 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3288 tw32(TG3PCI_MISC_HOST_CTRL,
3289 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3290
3291 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3292 tg3_flag(tp, WOL_ENABLE);
3293
3294 if (tg3_flag(tp, USE_PHYLIB)) {
3295 do_low_power = false;
3296 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3297 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3298 struct phy_device *phydev;
3299 u32 phyid, advertising;
3300
3301 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3302
3303 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3304
3305 tp->link_config.orig_speed = phydev->speed;
3306 tp->link_config.orig_duplex = phydev->duplex;
3307 tp->link_config.orig_autoneg = phydev->autoneg;
3308 tp->link_config.orig_advertising = phydev->advertising;
3309
3310 advertising = ADVERTISED_TP |
3311 ADVERTISED_Pause |
3312 ADVERTISED_Autoneg |
3313 ADVERTISED_10baseT_Half;
3314
3315 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3316 if (tg3_flag(tp, WOL_SPEED_100MB))
3317 advertising |=
3318 ADVERTISED_100baseT_Half |
3319 ADVERTISED_100baseT_Full |
3320 ADVERTISED_10baseT_Full;
3321 else
3322 advertising |= ADVERTISED_10baseT_Full;
3323 }
3324
3325 phydev->advertising = advertising;
3326
3327 phy_start_aneg(phydev);
3328
3329 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3330 if (phyid != PHY_ID_BCMAC131) {
3331 phyid &= PHY_BCM_OUI_MASK;
3332 if (phyid == PHY_BCM_OUI_1 ||
3333 phyid == PHY_BCM_OUI_2 ||
3334 phyid == PHY_BCM_OUI_3)
3335 do_low_power = true;
3336 }
3337 }
3338 } else {
3339 do_low_power = true;
3340
3341 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3342 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3343 tp->link_config.orig_speed = tp->link_config.speed;
3344 tp->link_config.orig_duplex = tp->link_config.duplex;
3345 tp->link_config.orig_autoneg = tp->link_config.autoneg;
3346 }
3347
3348 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3349 tp->link_config.speed = SPEED_10;
3350 tp->link_config.duplex = DUPLEX_HALF;
3351 tp->link_config.autoneg = AUTONEG_ENABLE;
3352 tg3_setup_phy(tp, 0);
3353 }
3354 }
3355
3356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3357 u32 val;
3358
3359 val = tr32(GRC_VCPU_EXT_CTRL);
3360 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3361 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3362 int i;
3363 u32 val;
3364
3365 for (i = 0; i < 200; i++) {
3366 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3367 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3368 break;
3369 msleep(1);
3370 }
3371 }
3372 if (tg3_flag(tp, WOL_CAP))
3373 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3374 WOL_DRV_STATE_SHUTDOWN |
3375 WOL_DRV_WOL |
3376 WOL_SET_MAGIC_PKT);
3377
3378 if (device_should_wake) {
3379 u32 mac_mode;
3380
3381 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3382 if (do_low_power &&
3383 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3384 tg3_phy_auxctl_write(tp,
3385 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3386 MII_TG3_AUXCTL_PCTL_WOL_EN |
3387 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3388 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3389 udelay(40);
3390 }
3391
3392 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3393 mac_mode = MAC_MODE_PORT_MODE_GMII;
3394 else
3395 mac_mode = MAC_MODE_PORT_MODE_MII;
3396
3397 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3398 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3399 ASIC_REV_5700) {
3400 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3401 SPEED_100 : SPEED_10;
3402 if (tg3_5700_link_polarity(tp, speed))
3403 mac_mode |= MAC_MODE_LINK_POLARITY;
3404 else
3405 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3406 }
3407 } else {
3408 mac_mode = MAC_MODE_PORT_MODE_TBI;
3409 }
3410
3411 if (!tg3_flag(tp, 5750_PLUS))
3412 tw32(MAC_LED_CTRL, tp->led_ctrl);
3413
3414 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3415 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3416 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3417 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3418
3419 if (tg3_flag(tp, ENABLE_APE))
3420 mac_mode |= MAC_MODE_APE_TX_EN |
3421 MAC_MODE_APE_RX_EN |
3422 MAC_MODE_TDE_ENABLE;
3423
3424 tw32_f(MAC_MODE, mac_mode);
3425 udelay(100);
3426
3427 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3428 udelay(10);
3429 }
3430
3431 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3432 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3433 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3434 u32 base_val;
3435
3436 base_val = tp->pci_clock_ctrl;
3437 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3438 CLOCK_CTRL_TXCLK_DISABLE);
3439
3440 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3441 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3442 } else if (tg3_flag(tp, 5780_CLASS) ||
3443 tg3_flag(tp, CPMU_PRESENT) ||
3444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3445 /* do nothing */
3446 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3447 u32 newbits1, newbits2;
3448
3449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3452 CLOCK_CTRL_TXCLK_DISABLE |
3453 CLOCK_CTRL_ALTCLK);
3454 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3455 } else if (tg3_flag(tp, 5705_PLUS)) {
3456 newbits1 = CLOCK_CTRL_625_CORE;
3457 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3458 } else {
3459 newbits1 = CLOCK_CTRL_ALTCLK;
3460 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3461 }
3462
3463 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3464 40);
3465
3466 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3467 40);
3468
3469 if (!tg3_flag(tp, 5705_PLUS)) {
3470 u32 newbits3;
3471
3472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3474 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3475 CLOCK_CTRL_TXCLK_DISABLE |
3476 CLOCK_CTRL_44MHZ_CORE);
3477 } else {
3478 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3479 }
3480
3481 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3482 tp->pci_clock_ctrl | newbits3, 40);
3483 }
3484 }
3485
3486 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3487 tg3_power_down_phy(tp, do_low_power);
3488
3489 tg3_frob_aux_power(tp, true);
3490
3491 /* Workaround for unstable PLL clock */
3492 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3493 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3494 u32 val = tr32(0x7d00);
3495
3496 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3497 tw32(0x7d00, val);
3498 if (!tg3_flag(tp, ENABLE_ASF)) {
3499 int err;
3500
3501 err = tg3_nvram_lock(tp);
3502 tg3_halt_cpu(tp, RX_CPU_BASE);
3503 if (!err)
3504 tg3_nvram_unlock(tp);
3505 }
3506 }
3507
3508 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3509
3510 return 0;
3511 }
3512
tg3_power_down(struct tg3 * tp)3513 static void tg3_power_down(struct tg3 *tp)
3514 {
3515 tg3_power_down_prepare(tp);
3516
3517 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3518 pci_set_power_state(tp->pdev, PCI_D3hot);
3519 }
3520
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u16 * speed,u8 * duplex)3521 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3522 {
3523 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3524 case MII_TG3_AUX_STAT_10HALF:
3525 *speed = SPEED_10;
3526 *duplex = DUPLEX_HALF;
3527 break;
3528
3529 case MII_TG3_AUX_STAT_10FULL:
3530 *speed = SPEED_10;
3531 *duplex = DUPLEX_FULL;
3532 break;
3533
3534 case MII_TG3_AUX_STAT_100HALF:
3535 *speed = SPEED_100;
3536 *duplex = DUPLEX_HALF;
3537 break;
3538
3539 case MII_TG3_AUX_STAT_100FULL:
3540 *speed = SPEED_100;
3541 *duplex = DUPLEX_FULL;
3542 break;
3543
3544 case MII_TG3_AUX_STAT_1000HALF:
3545 *speed = SPEED_1000;
3546 *duplex = DUPLEX_HALF;
3547 break;
3548
3549 case MII_TG3_AUX_STAT_1000FULL:
3550 *speed = SPEED_1000;
3551 *duplex = DUPLEX_FULL;
3552 break;
3553
3554 default:
3555 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3556 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3557 SPEED_10;
3558 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3559 DUPLEX_HALF;
3560 break;
3561 }
3562 *speed = SPEED_INVALID;
3563 *duplex = DUPLEX_INVALID;
3564 break;
3565 }
3566 }
3567
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)3568 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3569 {
3570 int err = 0;
3571 u32 val, new_adv;
3572
3573 new_adv = ADVERTISE_CSMA;
3574 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3575 new_adv |= mii_advertise_flowctrl(flowctrl);
3576
3577 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3578 if (err)
3579 goto done;
3580
3581 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3582 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3583
3584 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3585 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3586 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3587
3588 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3589 if (err)
3590 goto done;
3591 }
3592
3593 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3594 goto done;
3595
3596 tw32(TG3_CPMU_EEE_MODE,
3597 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3598
3599 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3600 if (!err) {
3601 u32 err2;
3602
3603 val = 0;
3604 /* Advertise 100-BaseTX EEE ability */
3605 if (advertise & ADVERTISED_100baseT_Full)
3606 val |= MDIO_AN_EEE_ADV_100TX;
3607 /* Advertise 1000-BaseT EEE ability */
3608 if (advertise & ADVERTISED_1000baseT_Full)
3609 val |= MDIO_AN_EEE_ADV_1000T;
3610 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3611 if (err)
3612 val = 0;
3613
3614 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3615 case ASIC_REV_5717:
3616 case ASIC_REV_57765:
3617 case ASIC_REV_57766:
3618 case ASIC_REV_5719:
3619 /* If we advertised any eee advertisements above... */
3620 if (val)
3621 val = MII_TG3_DSP_TAP26_ALNOKO |
3622 MII_TG3_DSP_TAP26_RMRXSTO |
3623 MII_TG3_DSP_TAP26_OPCSINPT;
3624 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3625 /* Fall through */
3626 case ASIC_REV_5720:
3627 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3628 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3629 MII_TG3_DSP_CH34TP2_HIBW01);
3630 }
3631
3632 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3633 if (!err)
3634 err = err2;
3635 }
3636
3637 done:
3638 return err;
3639 }
3640
tg3_phy_copper_begin(struct tg3 * tp)3641 static void tg3_phy_copper_begin(struct tg3 *tp)
3642 {
3643 u32 new_adv;
3644 int i;
3645
3646 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3647 new_adv = ADVERTISED_10baseT_Half |
3648 ADVERTISED_10baseT_Full;
3649 if (tg3_flag(tp, WOL_SPEED_100MB))
3650 new_adv |= ADVERTISED_100baseT_Half |
3651 ADVERTISED_100baseT_Full;
3652
3653 tg3_phy_autoneg_cfg(tp, new_adv,
3654 FLOW_CTRL_TX | FLOW_CTRL_RX);
3655 } else if (tp->link_config.speed == SPEED_INVALID) {
3656 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3657 tp->link_config.advertising &=
3658 ~(ADVERTISED_1000baseT_Half |
3659 ADVERTISED_1000baseT_Full);
3660
3661 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3662 tp->link_config.flowctrl);
3663 } else {
3664 /* Asking for a specific link mode. */
3665 if (tp->link_config.speed == SPEED_1000) {
3666 if (tp->link_config.duplex == DUPLEX_FULL)
3667 new_adv = ADVERTISED_1000baseT_Full;
3668 else
3669 new_adv = ADVERTISED_1000baseT_Half;
3670 } else if (tp->link_config.speed == SPEED_100) {
3671 if (tp->link_config.duplex == DUPLEX_FULL)
3672 new_adv = ADVERTISED_100baseT_Full;
3673 else
3674 new_adv = ADVERTISED_100baseT_Half;
3675 } else {
3676 if (tp->link_config.duplex == DUPLEX_FULL)
3677 new_adv = ADVERTISED_10baseT_Full;
3678 else
3679 new_adv = ADVERTISED_10baseT_Half;
3680 }
3681
3682 tg3_phy_autoneg_cfg(tp, new_adv,
3683 tp->link_config.flowctrl);
3684 }
3685
3686 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3687 tp->link_config.speed != SPEED_INVALID) {
3688 u32 bmcr, orig_bmcr;
3689
3690 tp->link_config.active_speed = tp->link_config.speed;
3691 tp->link_config.active_duplex = tp->link_config.duplex;
3692
3693 bmcr = 0;
3694 switch (tp->link_config.speed) {
3695 default:
3696 case SPEED_10:
3697 break;
3698
3699 case SPEED_100:
3700 bmcr |= BMCR_SPEED100;
3701 break;
3702
3703 case SPEED_1000:
3704 bmcr |= BMCR_SPEED1000;
3705 break;
3706 }
3707
3708 if (tp->link_config.duplex == DUPLEX_FULL)
3709 bmcr |= BMCR_FULLDPLX;
3710
3711 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3712 (bmcr != orig_bmcr)) {
3713 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3714 for (i = 0; i < 1500; i++) {
3715 u32 tmp;
3716
3717 udelay(10);
3718 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3719 tg3_readphy(tp, MII_BMSR, &tmp))
3720 continue;
3721 if (!(tmp & BMSR_LSTATUS)) {
3722 udelay(40);
3723 break;
3724 }
3725 }
3726 tg3_writephy(tp, MII_BMCR, bmcr);
3727 udelay(40);
3728 }
3729 } else {
3730 tg3_writephy(tp, MII_BMCR,
3731 BMCR_ANENABLE | BMCR_ANRESTART);
3732 }
3733 }
3734
tg3_init_5401phy_dsp(struct tg3 * tp)3735 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3736 {
3737 int err;
3738
3739 /* Turn off tap power management. */
3740 /* Set Extended packet length bit */
3741 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3742
3743 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3744 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3745 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3746 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3747 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3748
3749 udelay(40);
3750
3751 return err;
3752 }
3753
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)3754 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3755 {
3756 u32 advmsk, tgtadv, advertising;
3757
3758 advertising = tp->link_config.advertising;
3759 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3760
3761 advmsk = ADVERTISE_ALL;
3762 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3763 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3764 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3765 }
3766
3767 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3768 return false;
3769
3770 if ((*lcladv & advmsk) != tgtadv)
3771 return false;
3772
3773 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3774 u32 tg3_ctrl;
3775
3776 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3777
3778 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3779 return false;
3780
3781 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3782 if (tg3_ctrl != tgtadv)
3783 return false;
3784 }
3785
3786 return true;
3787 }
3788
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)3789 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
3790 {
3791 u32 lpeth = 0;
3792
3793 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3794 u32 val;
3795
3796 if (tg3_readphy(tp, MII_STAT1000, &val))
3797 return false;
3798
3799 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
3800 }
3801
3802 if (tg3_readphy(tp, MII_LPA, rmtadv))
3803 return false;
3804
3805 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
3806 tp->link_config.rmt_adv = lpeth;
3807
3808 return true;
3809 }
3810
tg3_setup_copper_phy(struct tg3 * tp,int force_reset)3811 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3812 {
3813 int current_link_up;
3814 u32 bmsr, val;
3815 u32 lcl_adv, rmt_adv;
3816 u16 current_speed;
3817 u8 current_duplex;
3818 int i, err;
3819
3820 tw32(MAC_EVENT, 0);
3821
3822 tw32_f(MAC_STATUS,
3823 (MAC_STATUS_SYNC_CHANGED |
3824 MAC_STATUS_CFG_CHANGED |
3825 MAC_STATUS_MI_COMPLETION |
3826 MAC_STATUS_LNKSTATE_CHANGED));
3827 udelay(40);
3828
3829 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3830 tw32_f(MAC_MI_MODE,
3831 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3832 udelay(80);
3833 }
3834
3835 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3836
3837 /* Some third-party PHYs need to be reset on link going
3838 * down.
3839 */
3840 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3841 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3843 netif_carrier_ok(tp->dev)) {
3844 tg3_readphy(tp, MII_BMSR, &bmsr);
3845 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3846 !(bmsr & BMSR_LSTATUS))
3847 force_reset = 1;
3848 }
3849 if (force_reset)
3850 tg3_phy_reset(tp);
3851
3852 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3853 tg3_readphy(tp, MII_BMSR, &bmsr);
3854 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3855 !tg3_flag(tp, INIT_COMPLETE))
3856 bmsr = 0;
3857
3858 if (!(bmsr & BMSR_LSTATUS)) {
3859 err = tg3_init_5401phy_dsp(tp);
3860 if (err)
3861 return err;
3862
3863 tg3_readphy(tp, MII_BMSR, &bmsr);
3864 for (i = 0; i < 1000; i++) {
3865 udelay(10);
3866 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3867 (bmsr & BMSR_LSTATUS)) {
3868 udelay(40);
3869 break;
3870 }
3871 }
3872
3873 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3874 TG3_PHY_REV_BCM5401_B0 &&
3875 !(bmsr & BMSR_LSTATUS) &&
3876 tp->link_config.active_speed == SPEED_1000) {
3877 err = tg3_phy_reset(tp);
3878 if (!err)
3879 err = tg3_init_5401phy_dsp(tp);
3880 if (err)
3881 return err;
3882 }
3883 }
3884 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3885 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3886 /* 5701 {A0,B0} CRC bug workaround */
3887 tg3_writephy(tp, 0x15, 0x0a75);
3888 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3889 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3890 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3891 }
3892
3893 /* Clear pending interrupts... */
3894 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3895 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3896
3897 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3898 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3899 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3900 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3901
3902 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3904 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3905 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3906 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3907 else
3908 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3909 }
3910
3911 current_link_up = 0;
3912 current_speed = SPEED_INVALID;
3913 current_duplex = DUPLEX_INVALID;
3914 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
3915 tp->link_config.rmt_adv = 0;
3916
3917 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3918 err = tg3_phy_auxctl_read(tp,
3919 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3920 &val);
3921 if (!err && !(val & (1 << 10))) {
3922 tg3_phy_auxctl_write(tp,
3923 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3924 val | (1 << 10));
3925 goto relink;
3926 }
3927 }
3928
3929 bmsr = 0;
3930 for (i = 0; i < 100; i++) {
3931 tg3_readphy(tp, MII_BMSR, &bmsr);
3932 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3933 (bmsr & BMSR_LSTATUS))
3934 break;
3935 udelay(40);
3936 }
3937
3938 if (bmsr & BMSR_LSTATUS) {
3939 u32 aux_stat, bmcr;
3940
3941 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3942 for (i = 0; i < 2000; i++) {
3943 udelay(10);
3944 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3945 aux_stat)
3946 break;
3947 }
3948
3949 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3950 ¤t_speed,
3951 ¤t_duplex);
3952
3953 bmcr = 0;
3954 for (i = 0; i < 200; i++) {
3955 tg3_readphy(tp, MII_BMCR, &bmcr);
3956 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3957 continue;
3958 if (bmcr && bmcr != 0x7fff)
3959 break;
3960 udelay(10);
3961 }
3962
3963 lcl_adv = 0;
3964 rmt_adv = 0;
3965
3966 tp->link_config.active_speed = current_speed;
3967 tp->link_config.active_duplex = current_duplex;
3968
3969 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3970 if ((bmcr & BMCR_ANENABLE) &&
3971 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
3972 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
3973 current_link_up = 1;
3974 } else {
3975 if (!(bmcr & BMCR_ANENABLE) &&
3976 tp->link_config.speed == current_speed &&
3977 tp->link_config.duplex == current_duplex &&
3978 tp->link_config.flowctrl ==
3979 tp->link_config.active_flowctrl) {
3980 current_link_up = 1;
3981 }
3982 }
3983
3984 if (current_link_up == 1 &&
3985 tp->link_config.active_duplex == DUPLEX_FULL) {
3986 u32 reg, bit;
3987
3988 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3989 reg = MII_TG3_FET_GEN_STAT;
3990 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
3991 } else {
3992 reg = MII_TG3_EXT_STAT;
3993 bit = MII_TG3_EXT_STAT_MDIX;
3994 }
3995
3996 if (!tg3_readphy(tp, reg, &val) && (val & bit))
3997 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
3998
3999 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4000 }
4001 }
4002
4003 relink:
4004 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4005 tg3_phy_copper_begin(tp);
4006
4007 tg3_readphy(tp, MII_BMSR, &bmsr);
4008 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4009 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4010 current_link_up = 1;
4011 }
4012
4013 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4014 if (current_link_up == 1) {
4015 if (tp->link_config.active_speed == SPEED_100 ||
4016 tp->link_config.active_speed == SPEED_10)
4017 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4018 else
4019 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4020 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4021 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4022 else
4023 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4024
4025 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4026 if (tp->link_config.active_duplex == DUPLEX_HALF)
4027 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4028
4029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4030 if (current_link_up == 1 &&
4031 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4032 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4033 else
4034 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4035 }
4036
4037 /* ??? Without this setting Netgear GA302T PHY does not
4038 * ??? send/receive packets...
4039 */
4040 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4041 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4042 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4043 tw32_f(MAC_MI_MODE, tp->mi_mode);
4044 udelay(80);
4045 }
4046
4047 tw32_f(MAC_MODE, tp->mac_mode);
4048 udelay(40);
4049
4050 tg3_phy_eee_adjust(tp, current_link_up);
4051
4052 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4053 /* Polled via timer. */
4054 tw32_f(MAC_EVENT, 0);
4055 } else {
4056 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4057 }
4058 udelay(40);
4059
4060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4061 current_link_up == 1 &&
4062 tp->link_config.active_speed == SPEED_1000 &&
4063 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4064 udelay(120);
4065 tw32_f(MAC_STATUS,
4066 (MAC_STATUS_SYNC_CHANGED |
4067 MAC_STATUS_CFG_CHANGED));
4068 udelay(40);
4069 tg3_write_mem(tp,
4070 NIC_SRAM_FIRMWARE_MBOX,
4071 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4072 }
4073
4074 /* Prevent send BD corruption. */
4075 if (tg3_flag(tp, CLKREQ_BUG)) {
4076 u16 oldlnkctl, newlnkctl;
4077
4078 pci_read_config_word(tp->pdev,
4079 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4080 &oldlnkctl);
4081 if (tp->link_config.active_speed == SPEED_100 ||
4082 tp->link_config.active_speed == SPEED_10)
4083 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4084 else
4085 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4086 if (newlnkctl != oldlnkctl)
4087 pci_write_config_word(tp->pdev,
4088 pci_pcie_cap(tp->pdev) +
4089 PCI_EXP_LNKCTL, newlnkctl);
4090 }
4091
4092 if (current_link_up != netif_carrier_ok(tp->dev)) {
4093 if (current_link_up)
4094 netif_carrier_on(tp->dev);
4095 else
4096 netif_carrier_off(tp->dev);
4097 tg3_link_report(tp);
4098 }
4099
4100 return 0;
4101 }
4102
4103 struct tg3_fiber_aneginfo {
4104 int state;
4105 #define ANEG_STATE_UNKNOWN 0
4106 #define ANEG_STATE_AN_ENABLE 1
4107 #define ANEG_STATE_RESTART_INIT 2
4108 #define ANEG_STATE_RESTART 3
4109 #define ANEG_STATE_DISABLE_LINK_OK 4
4110 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4111 #define ANEG_STATE_ABILITY_DETECT 6
4112 #define ANEG_STATE_ACK_DETECT_INIT 7
4113 #define ANEG_STATE_ACK_DETECT 8
4114 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4115 #define ANEG_STATE_COMPLETE_ACK 10
4116 #define ANEG_STATE_IDLE_DETECT_INIT 11
4117 #define ANEG_STATE_IDLE_DETECT 12
4118 #define ANEG_STATE_LINK_OK 13
4119 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4120 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4121
4122 u32 flags;
4123 #define MR_AN_ENABLE 0x00000001
4124 #define MR_RESTART_AN 0x00000002
4125 #define MR_AN_COMPLETE 0x00000004
4126 #define MR_PAGE_RX 0x00000008
4127 #define MR_NP_LOADED 0x00000010
4128 #define MR_TOGGLE_TX 0x00000020
4129 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4130 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4131 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4132 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4133 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4134 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4135 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4136 #define MR_TOGGLE_RX 0x00002000
4137 #define MR_NP_RX 0x00004000
4138
4139 #define MR_LINK_OK 0x80000000
4140
4141 unsigned long link_time, cur_time;
4142
4143 u32 ability_match_cfg;
4144 int ability_match_count;
4145
4146 char ability_match, idle_match, ack_match;
4147
4148 u32 txconfig, rxconfig;
4149 #define ANEG_CFG_NP 0x00000080
4150 #define ANEG_CFG_ACK 0x00000040
4151 #define ANEG_CFG_RF2 0x00000020
4152 #define ANEG_CFG_RF1 0x00000010
4153 #define ANEG_CFG_PS2 0x00000001
4154 #define ANEG_CFG_PS1 0x00008000
4155 #define ANEG_CFG_HD 0x00004000
4156 #define ANEG_CFG_FD 0x00002000
4157 #define ANEG_CFG_INVAL 0x00001f06
4158
4159 };
4160 #define ANEG_OK 0
4161 #define ANEG_DONE 1
4162 #define ANEG_TIMER_ENAB 2
4163 #define ANEG_FAILED -1
4164
4165 #define ANEG_STATE_SETTLE_TIME 10000
4166
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)4167 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4168 struct tg3_fiber_aneginfo *ap)
4169 {
4170 u16 flowctrl;
4171 unsigned long delta;
4172 u32 rx_cfg_reg;
4173 int ret;
4174
4175 if (ap->state == ANEG_STATE_UNKNOWN) {
4176 ap->rxconfig = 0;
4177 ap->link_time = 0;
4178 ap->cur_time = 0;
4179 ap->ability_match_cfg = 0;
4180 ap->ability_match_count = 0;
4181 ap->ability_match = 0;
4182 ap->idle_match = 0;
4183 ap->ack_match = 0;
4184 }
4185 ap->cur_time++;
4186
4187 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4188 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4189
4190 if (rx_cfg_reg != ap->ability_match_cfg) {
4191 ap->ability_match_cfg = rx_cfg_reg;
4192 ap->ability_match = 0;
4193 ap->ability_match_count = 0;
4194 } else {
4195 if (++ap->ability_match_count > 1) {
4196 ap->ability_match = 1;
4197 ap->ability_match_cfg = rx_cfg_reg;
4198 }
4199 }
4200 if (rx_cfg_reg & ANEG_CFG_ACK)
4201 ap->ack_match = 1;
4202 else
4203 ap->ack_match = 0;
4204
4205 ap->idle_match = 0;
4206 } else {
4207 ap->idle_match = 1;
4208 ap->ability_match_cfg = 0;
4209 ap->ability_match_count = 0;
4210 ap->ability_match = 0;
4211 ap->ack_match = 0;
4212
4213 rx_cfg_reg = 0;
4214 }
4215
4216 ap->rxconfig = rx_cfg_reg;
4217 ret = ANEG_OK;
4218
4219 switch (ap->state) {
4220 case ANEG_STATE_UNKNOWN:
4221 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4222 ap->state = ANEG_STATE_AN_ENABLE;
4223
4224 /* fallthru */
4225 case ANEG_STATE_AN_ENABLE:
4226 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4227 if (ap->flags & MR_AN_ENABLE) {
4228 ap->link_time = 0;
4229 ap->cur_time = 0;
4230 ap->ability_match_cfg = 0;
4231 ap->ability_match_count = 0;
4232 ap->ability_match = 0;
4233 ap->idle_match = 0;
4234 ap->ack_match = 0;
4235
4236 ap->state = ANEG_STATE_RESTART_INIT;
4237 } else {
4238 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4239 }
4240 break;
4241
4242 case ANEG_STATE_RESTART_INIT:
4243 ap->link_time = ap->cur_time;
4244 ap->flags &= ~(MR_NP_LOADED);
4245 ap->txconfig = 0;
4246 tw32(MAC_TX_AUTO_NEG, 0);
4247 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4248 tw32_f(MAC_MODE, tp->mac_mode);
4249 udelay(40);
4250
4251 ret = ANEG_TIMER_ENAB;
4252 ap->state = ANEG_STATE_RESTART;
4253
4254 /* fallthru */
4255 case ANEG_STATE_RESTART:
4256 delta = ap->cur_time - ap->link_time;
4257 if (delta > ANEG_STATE_SETTLE_TIME)
4258 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4259 else
4260 ret = ANEG_TIMER_ENAB;
4261 break;
4262
4263 case ANEG_STATE_DISABLE_LINK_OK:
4264 ret = ANEG_DONE;
4265 break;
4266
4267 case ANEG_STATE_ABILITY_DETECT_INIT:
4268 ap->flags &= ~(MR_TOGGLE_TX);
4269 ap->txconfig = ANEG_CFG_FD;
4270 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4271 if (flowctrl & ADVERTISE_1000XPAUSE)
4272 ap->txconfig |= ANEG_CFG_PS1;
4273 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4274 ap->txconfig |= ANEG_CFG_PS2;
4275 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4276 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4277 tw32_f(MAC_MODE, tp->mac_mode);
4278 udelay(40);
4279
4280 ap->state = ANEG_STATE_ABILITY_DETECT;
4281 break;
4282
4283 case ANEG_STATE_ABILITY_DETECT:
4284 if (ap->ability_match != 0 && ap->rxconfig != 0)
4285 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4286 break;
4287
4288 case ANEG_STATE_ACK_DETECT_INIT:
4289 ap->txconfig |= ANEG_CFG_ACK;
4290 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4291 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4292 tw32_f(MAC_MODE, tp->mac_mode);
4293 udelay(40);
4294
4295 ap->state = ANEG_STATE_ACK_DETECT;
4296
4297 /* fallthru */
4298 case ANEG_STATE_ACK_DETECT:
4299 if (ap->ack_match != 0) {
4300 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4301 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4302 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4303 } else {
4304 ap->state = ANEG_STATE_AN_ENABLE;
4305 }
4306 } else if (ap->ability_match != 0 &&
4307 ap->rxconfig == 0) {
4308 ap->state = ANEG_STATE_AN_ENABLE;
4309 }
4310 break;
4311
4312 case ANEG_STATE_COMPLETE_ACK_INIT:
4313 if (ap->rxconfig & ANEG_CFG_INVAL) {
4314 ret = ANEG_FAILED;
4315 break;
4316 }
4317 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4318 MR_LP_ADV_HALF_DUPLEX |
4319 MR_LP_ADV_SYM_PAUSE |
4320 MR_LP_ADV_ASYM_PAUSE |
4321 MR_LP_ADV_REMOTE_FAULT1 |
4322 MR_LP_ADV_REMOTE_FAULT2 |
4323 MR_LP_ADV_NEXT_PAGE |
4324 MR_TOGGLE_RX |
4325 MR_NP_RX);
4326 if (ap->rxconfig & ANEG_CFG_FD)
4327 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4328 if (ap->rxconfig & ANEG_CFG_HD)
4329 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4330 if (ap->rxconfig & ANEG_CFG_PS1)
4331 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4332 if (ap->rxconfig & ANEG_CFG_PS2)
4333 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4334 if (ap->rxconfig & ANEG_CFG_RF1)
4335 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4336 if (ap->rxconfig & ANEG_CFG_RF2)
4337 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4338 if (ap->rxconfig & ANEG_CFG_NP)
4339 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4340
4341 ap->link_time = ap->cur_time;
4342
4343 ap->flags ^= (MR_TOGGLE_TX);
4344 if (ap->rxconfig & 0x0008)
4345 ap->flags |= MR_TOGGLE_RX;
4346 if (ap->rxconfig & ANEG_CFG_NP)
4347 ap->flags |= MR_NP_RX;
4348 ap->flags |= MR_PAGE_RX;
4349
4350 ap->state = ANEG_STATE_COMPLETE_ACK;
4351 ret = ANEG_TIMER_ENAB;
4352 break;
4353
4354 case ANEG_STATE_COMPLETE_ACK:
4355 if (ap->ability_match != 0 &&
4356 ap->rxconfig == 0) {
4357 ap->state = ANEG_STATE_AN_ENABLE;
4358 break;
4359 }
4360 delta = ap->cur_time - ap->link_time;
4361 if (delta > ANEG_STATE_SETTLE_TIME) {
4362 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4363 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4364 } else {
4365 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4366 !(ap->flags & MR_NP_RX)) {
4367 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4368 } else {
4369 ret = ANEG_FAILED;
4370 }
4371 }
4372 }
4373 break;
4374
4375 case ANEG_STATE_IDLE_DETECT_INIT:
4376 ap->link_time = ap->cur_time;
4377 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4378 tw32_f(MAC_MODE, tp->mac_mode);
4379 udelay(40);
4380
4381 ap->state = ANEG_STATE_IDLE_DETECT;
4382 ret = ANEG_TIMER_ENAB;
4383 break;
4384
4385 case ANEG_STATE_IDLE_DETECT:
4386 if (ap->ability_match != 0 &&
4387 ap->rxconfig == 0) {
4388 ap->state = ANEG_STATE_AN_ENABLE;
4389 break;
4390 }
4391 delta = ap->cur_time - ap->link_time;
4392 if (delta > ANEG_STATE_SETTLE_TIME) {
4393 /* XXX another gem from the Broadcom driver :( */
4394 ap->state = ANEG_STATE_LINK_OK;
4395 }
4396 break;
4397
4398 case ANEG_STATE_LINK_OK:
4399 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4400 ret = ANEG_DONE;
4401 break;
4402
4403 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4404 /* ??? unimplemented */
4405 break;
4406
4407 case ANEG_STATE_NEXT_PAGE_WAIT:
4408 /* ??? unimplemented */
4409 break;
4410
4411 default:
4412 ret = ANEG_FAILED;
4413 break;
4414 }
4415
4416 return ret;
4417 }
4418
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)4419 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4420 {
4421 int res = 0;
4422 struct tg3_fiber_aneginfo aninfo;
4423 int status = ANEG_FAILED;
4424 unsigned int tick;
4425 u32 tmp;
4426
4427 tw32_f(MAC_TX_AUTO_NEG, 0);
4428
4429 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4430 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4431 udelay(40);
4432
4433 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4434 udelay(40);
4435
4436 memset(&aninfo, 0, sizeof(aninfo));
4437 aninfo.flags |= MR_AN_ENABLE;
4438 aninfo.state = ANEG_STATE_UNKNOWN;
4439 aninfo.cur_time = 0;
4440 tick = 0;
4441 while (++tick < 195000) {
4442 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4443 if (status == ANEG_DONE || status == ANEG_FAILED)
4444 break;
4445
4446 udelay(1);
4447 }
4448
4449 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4450 tw32_f(MAC_MODE, tp->mac_mode);
4451 udelay(40);
4452
4453 *txflags = aninfo.txconfig;
4454 *rxflags = aninfo.flags;
4455
4456 if (status == ANEG_DONE &&
4457 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4458 MR_LP_ADV_FULL_DUPLEX)))
4459 res = 1;
4460
4461 return res;
4462 }
4463
tg3_init_bcm8002(struct tg3 * tp)4464 static void tg3_init_bcm8002(struct tg3 *tp)
4465 {
4466 u32 mac_status = tr32(MAC_STATUS);
4467 int i;
4468
4469 /* Reset when initting first time or we have a link. */
4470 if (tg3_flag(tp, INIT_COMPLETE) &&
4471 !(mac_status & MAC_STATUS_PCS_SYNCED))
4472 return;
4473
4474 /* Set PLL lock range. */
4475 tg3_writephy(tp, 0x16, 0x8007);
4476
4477 /* SW reset */
4478 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4479
4480 /* Wait for reset to complete. */
4481 /* XXX schedule_timeout() ... */
4482 for (i = 0; i < 500; i++)
4483 udelay(10);
4484
4485 /* Config mode; select PMA/Ch 1 regs. */
4486 tg3_writephy(tp, 0x10, 0x8411);
4487
4488 /* Enable auto-lock and comdet, select txclk for tx. */
4489 tg3_writephy(tp, 0x11, 0x0a10);
4490
4491 tg3_writephy(tp, 0x18, 0x00a0);
4492 tg3_writephy(tp, 0x16, 0x41ff);
4493
4494 /* Assert and deassert POR. */
4495 tg3_writephy(tp, 0x13, 0x0400);
4496 udelay(40);
4497 tg3_writephy(tp, 0x13, 0x0000);
4498
4499 tg3_writephy(tp, 0x11, 0x0a50);
4500 udelay(40);
4501 tg3_writephy(tp, 0x11, 0x0a10);
4502
4503 /* Wait for signal to stabilize */
4504 /* XXX schedule_timeout() ... */
4505 for (i = 0; i < 15000; i++)
4506 udelay(10);
4507
4508 /* Deselect the channel register so we can read the PHYID
4509 * later.
4510 */
4511 tg3_writephy(tp, 0x10, 0x8011);
4512 }
4513
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)4514 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4515 {
4516 u16 flowctrl;
4517 u32 sg_dig_ctrl, sg_dig_status;
4518 u32 serdes_cfg, expected_sg_dig_ctrl;
4519 int workaround, port_a;
4520 int current_link_up;
4521
4522 serdes_cfg = 0;
4523 expected_sg_dig_ctrl = 0;
4524 workaround = 0;
4525 port_a = 1;
4526 current_link_up = 0;
4527
4528 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4529 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4530 workaround = 1;
4531 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4532 port_a = 0;
4533
4534 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4535 /* preserve bits 20-23 for voltage regulator */
4536 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4537 }
4538
4539 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4540
4541 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4542 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4543 if (workaround) {
4544 u32 val = serdes_cfg;
4545
4546 if (port_a)
4547 val |= 0xc010000;
4548 else
4549 val |= 0x4010000;
4550 tw32_f(MAC_SERDES_CFG, val);
4551 }
4552
4553 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4554 }
4555 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4556 tg3_setup_flow_control(tp, 0, 0);
4557 current_link_up = 1;
4558 }
4559 goto out;
4560 }
4561
4562 /* Want auto-negotiation. */
4563 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4564
4565 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4566 if (flowctrl & ADVERTISE_1000XPAUSE)
4567 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4568 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4569 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4570
4571 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4572 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4573 tp->serdes_counter &&
4574 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4575 MAC_STATUS_RCVD_CFG)) ==
4576 MAC_STATUS_PCS_SYNCED)) {
4577 tp->serdes_counter--;
4578 current_link_up = 1;
4579 goto out;
4580 }
4581 restart_autoneg:
4582 if (workaround)
4583 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4584 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4585 udelay(5);
4586 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4587
4588 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4589 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4590 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4591 MAC_STATUS_SIGNAL_DET)) {
4592 sg_dig_status = tr32(SG_DIG_STATUS);
4593 mac_status = tr32(MAC_STATUS);
4594
4595 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4596 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4597 u32 local_adv = 0, remote_adv = 0;
4598
4599 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4600 local_adv |= ADVERTISE_1000XPAUSE;
4601 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4602 local_adv |= ADVERTISE_1000XPSE_ASYM;
4603
4604 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4605 remote_adv |= LPA_1000XPAUSE;
4606 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4607 remote_adv |= LPA_1000XPAUSE_ASYM;
4608
4609 tp->link_config.rmt_adv =
4610 mii_adv_to_ethtool_adv_x(remote_adv);
4611
4612 tg3_setup_flow_control(tp, local_adv, remote_adv);
4613 current_link_up = 1;
4614 tp->serdes_counter = 0;
4615 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4616 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4617 if (tp->serdes_counter)
4618 tp->serdes_counter--;
4619 else {
4620 if (workaround) {
4621 u32 val = serdes_cfg;
4622
4623 if (port_a)
4624 val |= 0xc010000;
4625 else
4626 val |= 0x4010000;
4627
4628 tw32_f(MAC_SERDES_CFG, val);
4629 }
4630
4631 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4632 udelay(40);
4633
4634 /* Link parallel detection - link is up */
4635 /* only if we have PCS_SYNC and not */
4636 /* receiving config code words */
4637 mac_status = tr32(MAC_STATUS);
4638 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4639 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4640 tg3_setup_flow_control(tp, 0, 0);
4641 current_link_up = 1;
4642 tp->phy_flags |=
4643 TG3_PHYFLG_PARALLEL_DETECT;
4644 tp->serdes_counter =
4645 SERDES_PARALLEL_DET_TIMEOUT;
4646 } else
4647 goto restart_autoneg;
4648 }
4649 }
4650 } else {
4651 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4652 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4653 }
4654
4655 out:
4656 return current_link_up;
4657 }
4658
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)4659 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4660 {
4661 int current_link_up = 0;
4662
4663 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4664 goto out;
4665
4666 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4667 u32 txflags, rxflags;
4668 int i;
4669
4670 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4671 u32 local_adv = 0, remote_adv = 0;
4672
4673 if (txflags & ANEG_CFG_PS1)
4674 local_adv |= ADVERTISE_1000XPAUSE;
4675 if (txflags & ANEG_CFG_PS2)
4676 local_adv |= ADVERTISE_1000XPSE_ASYM;
4677
4678 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4679 remote_adv |= LPA_1000XPAUSE;
4680 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4681 remote_adv |= LPA_1000XPAUSE_ASYM;
4682
4683 tp->link_config.rmt_adv =
4684 mii_adv_to_ethtool_adv_x(remote_adv);
4685
4686 tg3_setup_flow_control(tp, local_adv, remote_adv);
4687
4688 current_link_up = 1;
4689 }
4690 for (i = 0; i < 30; i++) {
4691 udelay(20);
4692 tw32_f(MAC_STATUS,
4693 (MAC_STATUS_SYNC_CHANGED |
4694 MAC_STATUS_CFG_CHANGED));
4695 udelay(40);
4696 if ((tr32(MAC_STATUS) &
4697 (MAC_STATUS_SYNC_CHANGED |
4698 MAC_STATUS_CFG_CHANGED)) == 0)
4699 break;
4700 }
4701
4702 mac_status = tr32(MAC_STATUS);
4703 if (current_link_up == 0 &&
4704 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4705 !(mac_status & MAC_STATUS_RCVD_CFG))
4706 current_link_up = 1;
4707 } else {
4708 tg3_setup_flow_control(tp, 0, 0);
4709
4710 /* Forcing 1000FD link up. */
4711 current_link_up = 1;
4712
4713 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4714 udelay(40);
4715
4716 tw32_f(MAC_MODE, tp->mac_mode);
4717 udelay(40);
4718 }
4719
4720 out:
4721 return current_link_up;
4722 }
4723
tg3_setup_fiber_phy(struct tg3 * tp,int force_reset)4724 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4725 {
4726 u32 orig_pause_cfg;
4727 u16 orig_active_speed;
4728 u8 orig_active_duplex;
4729 u32 mac_status;
4730 int current_link_up;
4731 int i;
4732
4733 orig_pause_cfg = tp->link_config.active_flowctrl;
4734 orig_active_speed = tp->link_config.active_speed;
4735 orig_active_duplex = tp->link_config.active_duplex;
4736
4737 if (!tg3_flag(tp, HW_AUTONEG) &&
4738 netif_carrier_ok(tp->dev) &&
4739 tg3_flag(tp, INIT_COMPLETE)) {
4740 mac_status = tr32(MAC_STATUS);
4741 mac_status &= (MAC_STATUS_PCS_SYNCED |
4742 MAC_STATUS_SIGNAL_DET |
4743 MAC_STATUS_CFG_CHANGED |
4744 MAC_STATUS_RCVD_CFG);
4745 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4746 MAC_STATUS_SIGNAL_DET)) {
4747 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4748 MAC_STATUS_CFG_CHANGED));
4749 return 0;
4750 }
4751 }
4752
4753 tw32_f(MAC_TX_AUTO_NEG, 0);
4754
4755 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4756 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4757 tw32_f(MAC_MODE, tp->mac_mode);
4758 udelay(40);
4759
4760 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4761 tg3_init_bcm8002(tp);
4762
4763 /* Enable link change event even when serdes polling. */
4764 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4765 udelay(40);
4766
4767 current_link_up = 0;
4768 tp->link_config.rmt_adv = 0;
4769 mac_status = tr32(MAC_STATUS);
4770
4771 if (tg3_flag(tp, HW_AUTONEG))
4772 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4773 else
4774 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4775
4776 tp->napi[0].hw_status->status =
4777 (SD_STATUS_UPDATED |
4778 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4779
4780 for (i = 0; i < 100; i++) {
4781 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4782 MAC_STATUS_CFG_CHANGED));
4783 udelay(5);
4784 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4785 MAC_STATUS_CFG_CHANGED |
4786 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4787 break;
4788 }
4789
4790 mac_status = tr32(MAC_STATUS);
4791 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4792 current_link_up = 0;
4793 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4794 tp->serdes_counter == 0) {
4795 tw32_f(MAC_MODE, (tp->mac_mode |
4796 MAC_MODE_SEND_CONFIGS));
4797 udelay(1);
4798 tw32_f(MAC_MODE, tp->mac_mode);
4799 }
4800 }
4801
4802 if (current_link_up == 1) {
4803 tp->link_config.active_speed = SPEED_1000;
4804 tp->link_config.active_duplex = DUPLEX_FULL;
4805 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4806 LED_CTRL_LNKLED_OVERRIDE |
4807 LED_CTRL_1000MBPS_ON));
4808 } else {
4809 tp->link_config.active_speed = SPEED_INVALID;
4810 tp->link_config.active_duplex = DUPLEX_INVALID;
4811 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4812 LED_CTRL_LNKLED_OVERRIDE |
4813 LED_CTRL_TRAFFIC_OVERRIDE));
4814 }
4815
4816 if (current_link_up != netif_carrier_ok(tp->dev)) {
4817 if (current_link_up)
4818 netif_carrier_on(tp->dev);
4819 else
4820 netif_carrier_off(tp->dev);
4821 tg3_link_report(tp);
4822 } else {
4823 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4824 if (orig_pause_cfg != now_pause_cfg ||
4825 orig_active_speed != tp->link_config.active_speed ||
4826 orig_active_duplex != tp->link_config.active_duplex)
4827 tg3_link_report(tp);
4828 }
4829
4830 return 0;
4831 }
4832
tg3_setup_fiber_mii_phy(struct tg3 * tp,int force_reset)4833 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4834 {
4835 int current_link_up, err = 0;
4836 u32 bmsr, bmcr;
4837 u16 current_speed;
4838 u8 current_duplex;
4839 u32 local_adv, remote_adv;
4840
4841 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4842 tw32_f(MAC_MODE, tp->mac_mode);
4843 udelay(40);
4844
4845 tw32(MAC_EVENT, 0);
4846
4847 tw32_f(MAC_STATUS,
4848 (MAC_STATUS_SYNC_CHANGED |
4849 MAC_STATUS_CFG_CHANGED |
4850 MAC_STATUS_MI_COMPLETION |
4851 MAC_STATUS_LNKSTATE_CHANGED));
4852 udelay(40);
4853
4854 if (force_reset)
4855 tg3_phy_reset(tp);
4856
4857 current_link_up = 0;
4858 current_speed = SPEED_INVALID;
4859 current_duplex = DUPLEX_INVALID;
4860 tp->link_config.rmt_adv = 0;
4861
4862 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4863 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4864 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4865 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4866 bmsr |= BMSR_LSTATUS;
4867 else
4868 bmsr &= ~BMSR_LSTATUS;
4869 }
4870
4871 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4872
4873 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4874 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4875 /* do nothing, just check for link up at the end */
4876 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4877 u32 adv, newadv;
4878
4879 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4880 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4881 ADVERTISE_1000XPAUSE |
4882 ADVERTISE_1000XPSE_ASYM |
4883 ADVERTISE_SLCT);
4884
4885 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4886 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
4887
4888 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
4889 tg3_writephy(tp, MII_ADVERTISE, newadv);
4890 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4891 tg3_writephy(tp, MII_BMCR, bmcr);
4892
4893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4894 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4895 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4896
4897 return err;
4898 }
4899 } else {
4900 u32 new_bmcr;
4901
4902 bmcr &= ~BMCR_SPEED1000;
4903 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4904
4905 if (tp->link_config.duplex == DUPLEX_FULL)
4906 new_bmcr |= BMCR_FULLDPLX;
4907
4908 if (new_bmcr != bmcr) {
4909 /* BMCR_SPEED1000 is a reserved bit that needs
4910 * to be set on write.
4911 */
4912 new_bmcr |= BMCR_SPEED1000;
4913
4914 /* Force a linkdown */
4915 if (netif_carrier_ok(tp->dev)) {
4916 u32 adv;
4917
4918 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4919 adv &= ~(ADVERTISE_1000XFULL |
4920 ADVERTISE_1000XHALF |
4921 ADVERTISE_SLCT);
4922 tg3_writephy(tp, MII_ADVERTISE, adv);
4923 tg3_writephy(tp, MII_BMCR, bmcr |
4924 BMCR_ANRESTART |
4925 BMCR_ANENABLE);
4926 udelay(10);
4927 netif_carrier_off(tp->dev);
4928 }
4929 tg3_writephy(tp, MII_BMCR, new_bmcr);
4930 bmcr = new_bmcr;
4931 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4932 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4933 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4934 ASIC_REV_5714) {
4935 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4936 bmsr |= BMSR_LSTATUS;
4937 else
4938 bmsr &= ~BMSR_LSTATUS;
4939 }
4940 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4941 }
4942 }
4943
4944 if (bmsr & BMSR_LSTATUS) {
4945 current_speed = SPEED_1000;
4946 current_link_up = 1;
4947 if (bmcr & BMCR_FULLDPLX)
4948 current_duplex = DUPLEX_FULL;
4949 else
4950 current_duplex = DUPLEX_HALF;
4951
4952 local_adv = 0;
4953 remote_adv = 0;
4954
4955 if (bmcr & BMCR_ANENABLE) {
4956 u32 common;
4957
4958 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4959 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4960 common = local_adv & remote_adv;
4961 if (common & (ADVERTISE_1000XHALF |
4962 ADVERTISE_1000XFULL)) {
4963 if (common & ADVERTISE_1000XFULL)
4964 current_duplex = DUPLEX_FULL;
4965 else
4966 current_duplex = DUPLEX_HALF;
4967
4968 tp->link_config.rmt_adv =
4969 mii_adv_to_ethtool_adv_x(remote_adv);
4970 } else if (!tg3_flag(tp, 5780_CLASS)) {
4971 /* Link is up via parallel detect */
4972 } else {
4973 current_link_up = 0;
4974 }
4975 }
4976 }
4977
4978 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4979 tg3_setup_flow_control(tp, local_adv, remote_adv);
4980
4981 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4982 if (tp->link_config.active_duplex == DUPLEX_HALF)
4983 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4984
4985 tw32_f(MAC_MODE, tp->mac_mode);
4986 udelay(40);
4987
4988 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4989
4990 tp->link_config.active_speed = current_speed;
4991 tp->link_config.active_duplex = current_duplex;
4992
4993 if (current_link_up != netif_carrier_ok(tp->dev)) {
4994 if (current_link_up)
4995 netif_carrier_on(tp->dev);
4996 else {
4997 netif_carrier_off(tp->dev);
4998 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4999 }
5000 tg3_link_report(tp);
5001 }
5002 return err;
5003 }
5004
tg3_serdes_parallel_detect(struct tg3 * tp)5005 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5006 {
5007 if (tp->serdes_counter) {
5008 /* Give autoneg time to complete. */
5009 tp->serdes_counter--;
5010 return;
5011 }
5012
5013 if (!netif_carrier_ok(tp->dev) &&
5014 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5015 u32 bmcr;
5016
5017 tg3_readphy(tp, MII_BMCR, &bmcr);
5018 if (bmcr & BMCR_ANENABLE) {
5019 u32 phy1, phy2;
5020
5021 /* Select shadow register 0x1f */
5022 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5023 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5024
5025 /* Select expansion interrupt status register */
5026 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5027 MII_TG3_DSP_EXP1_INT_STAT);
5028 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5029 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5030
5031 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5032 /* We have signal detect and not receiving
5033 * config code words, link is up by parallel
5034 * detection.
5035 */
5036
5037 bmcr &= ~BMCR_ANENABLE;
5038 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5039 tg3_writephy(tp, MII_BMCR, bmcr);
5040 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5041 }
5042 }
5043 } else if (netif_carrier_ok(tp->dev) &&
5044 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5045 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5046 u32 phy2;
5047
5048 /* Select expansion interrupt status register */
5049 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5050 MII_TG3_DSP_EXP1_INT_STAT);
5051 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5052 if (phy2 & 0x20) {
5053 u32 bmcr;
5054
5055 /* Config code words received, turn on autoneg. */
5056 tg3_readphy(tp, MII_BMCR, &bmcr);
5057 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5058
5059 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5060
5061 }
5062 }
5063 }
5064
tg3_setup_phy(struct tg3 * tp,int force_reset)5065 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5066 {
5067 u32 val;
5068 int err;
5069
5070 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5071 err = tg3_setup_fiber_phy(tp, force_reset);
5072 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5073 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5074 else
5075 err = tg3_setup_copper_phy(tp, force_reset);
5076
5077 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5078 u32 scale;
5079
5080 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5081 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5082 scale = 65;
5083 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5084 scale = 6;
5085 else
5086 scale = 12;
5087
5088 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5089 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5090 tw32(GRC_MISC_CFG, val);
5091 }
5092
5093 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5094 (6 << TX_LENGTHS_IPG_SHIFT);
5095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5096 val |= tr32(MAC_TX_LENGTHS) &
5097 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5098 TX_LENGTHS_CNT_DWN_VAL_MSK);
5099
5100 if (tp->link_config.active_speed == SPEED_1000 &&
5101 tp->link_config.active_duplex == DUPLEX_HALF)
5102 tw32(MAC_TX_LENGTHS, val |
5103 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5104 else
5105 tw32(MAC_TX_LENGTHS, val |
5106 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5107
5108 if (!tg3_flag(tp, 5705_PLUS)) {
5109 if (netif_carrier_ok(tp->dev)) {
5110 tw32(HOSTCC_STAT_COAL_TICKS,
5111 tp->coal.stats_block_coalesce_usecs);
5112 } else {
5113 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5114 }
5115 }
5116
5117 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5118 val = tr32(PCIE_PWR_MGMT_THRESH);
5119 if (!netif_carrier_ok(tp->dev))
5120 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5121 tp->pwrmgmt_thresh;
5122 else
5123 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5124 tw32(PCIE_PWR_MGMT_THRESH, val);
5125 }
5126
5127 return err;
5128 }
5129
tg3_irq_sync(struct tg3 * tp)5130 static inline int tg3_irq_sync(struct tg3 *tp)
5131 {
5132 return tp->irq_sync;
5133 }
5134
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)5135 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5136 {
5137 int i;
5138
5139 dst = (u32 *)((u8 *)dst + off);
5140 for (i = 0; i < len; i += sizeof(u32))
5141 *dst++ = tr32(off + i);
5142 }
5143
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)5144 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5145 {
5146 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5147 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5148 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5149 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5150 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5151 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5152 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5153 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5154 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5155 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5156 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5157 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5158 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5159 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5160 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5161 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5162 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5163 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5164 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5165
5166 if (tg3_flag(tp, SUPPORT_MSIX))
5167 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5168
5169 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5170 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5171 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5172 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5173 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5174 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5175 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5176 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5177
5178 if (!tg3_flag(tp, 5705_PLUS)) {
5179 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5180 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5181 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5182 }
5183
5184 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5185 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5186 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5187 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5188 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5189
5190 if (tg3_flag(tp, NVRAM))
5191 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5192 }
5193
tg3_dump_state(struct tg3 * tp)5194 static void tg3_dump_state(struct tg3 *tp)
5195 {
5196 int i;
5197 u32 *regs;
5198
5199 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5200 if (!regs) {
5201 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5202 return;
5203 }
5204
5205 if (tg3_flag(tp, PCI_EXPRESS)) {
5206 /* Read up to but not including private PCI registers */
5207 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5208 regs[i / sizeof(u32)] = tr32(i);
5209 } else
5210 tg3_dump_legacy_regs(tp, regs);
5211
5212 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5213 if (!regs[i + 0] && !regs[i + 1] &&
5214 !regs[i + 2] && !regs[i + 3])
5215 continue;
5216
5217 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5218 i * 4,
5219 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5220 }
5221
5222 kfree(regs);
5223
5224 for (i = 0; i < tp->irq_cnt; i++) {
5225 struct tg3_napi *tnapi = &tp->napi[i];
5226
5227 /* SW status block */
5228 netdev_err(tp->dev,
5229 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5230 i,
5231 tnapi->hw_status->status,
5232 tnapi->hw_status->status_tag,
5233 tnapi->hw_status->rx_jumbo_consumer,
5234 tnapi->hw_status->rx_consumer,
5235 tnapi->hw_status->rx_mini_consumer,
5236 tnapi->hw_status->idx[0].rx_producer,
5237 tnapi->hw_status->idx[0].tx_consumer);
5238
5239 netdev_err(tp->dev,
5240 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5241 i,
5242 tnapi->last_tag, tnapi->last_irq_tag,
5243 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5244 tnapi->rx_rcb_ptr,
5245 tnapi->prodring.rx_std_prod_idx,
5246 tnapi->prodring.rx_std_cons_idx,
5247 tnapi->prodring.rx_jmb_prod_idx,
5248 tnapi->prodring.rx_jmb_cons_idx);
5249 }
5250 }
5251
5252 /* This is called whenever we suspect that the system chipset is re-
5253 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5254 * is bogus tx completions. We try to recover by setting the
5255 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5256 * in the workqueue.
5257 */
tg3_tx_recover(struct tg3 * tp)5258 static void tg3_tx_recover(struct tg3 *tp)
5259 {
5260 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5261 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5262
5263 netdev_warn(tp->dev,
5264 "The system may be re-ordering memory-mapped I/O "
5265 "cycles to the network device, attempting to recover. "
5266 "Please report the problem to the driver maintainer "
5267 "and include system chipset information.\n");
5268
5269 spin_lock(&tp->lock);
5270 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5271 spin_unlock(&tp->lock);
5272 }
5273
tg3_tx_avail(struct tg3_napi * tnapi)5274 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5275 {
5276 /* Tell compiler to fetch tx indices from memory. */
5277 barrier();
5278 return tnapi->tx_pending -
5279 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5280 }
5281
5282 /* Tigon3 never reports partial packet sends. So we do not
5283 * need special logic to handle SKBs that have not had all
5284 * of their frags sent yet, like SunGEM does.
5285 */
tg3_tx(struct tg3_napi * tnapi)5286 static void tg3_tx(struct tg3_napi *tnapi)
5287 {
5288 struct tg3 *tp = tnapi->tp;
5289 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5290 u32 sw_idx = tnapi->tx_cons;
5291 struct netdev_queue *txq;
5292 int index = tnapi - tp->napi;
5293 unsigned int pkts_compl = 0, bytes_compl = 0;
5294
5295 if (tg3_flag(tp, ENABLE_TSS))
5296 index--;
5297
5298 txq = netdev_get_tx_queue(tp->dev, index);
5299
5300 while (sw_idx != hw_idx) {
5301 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5302 struct sk_buff *skb = ri->skb;
5303 int i, tx_bug = 0;
5304
5305 if (unlikely(skb == NULL)) {
5306 tg3_tx_recover(tp);
5307 return;
5308 }
5309
5310 pci_unmap_single(tp->pdev,
5311 dma_unmap_addr(ri, mapping),
5312 skb_headlen(skb),
5313 PCI_DMA_TODEVICE);
5314
5315 ri->skb = NULL;
5316
5317 while (ri->fragmented) {
5318 ri->fragmented = false;
5319 sw_idx = NEXT_TX(sw_idx);
5320 ri = &tnapi->tx_buffers[sw_idx];
5321 }
5322
5323 sw_idx = NEXT_TX(sw_idx);
5324
5325 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5326 ri = &tnapi->tx_buffers[sw_idx];
5327 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5328 tx_bug = 1;
5329
5330 pci_unmap_page(tp->pdev,
5331 dma_unmap_addr(ri, mapping),
5332 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5333 PCI_DMA_TODEVICE);
5334
5335 while (ri->fragmented) {
5336 ri->fragmented = false;
5337 sw_idx = NEXT_TX(sw_idx);
5338 ri = &tnapi->tx_buffers[sw_idx];
5339 }
5340
5341 sw_idx = NEXT_TX(sw_idx);
5342 }
5343
5344 pkts_compl++;
5345 bytes_compl += skb->len;
5346
5347 dev_kfree_skb(skb);
5348
5349 if (unlikely(tx_bug)) {
5350 tg3_tx_recover(tp);
5351 return;
5352 }
5353 }
5354
5355 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5356
5357 tnapi->tx_cons = sw_idx;
5358
5359 /* Need to make the tx_cons update visible to tg3_start_xmit()
5360 * before checking for netif_queue_stopped(). Without the
5361 * memory barrier, there is a small possibility that tg3_start_xmit()
5362 * will miss it and cause the queue to be stopped forever.
5363 */
5364 smp_mb();
5365
5366 if (unlikely(netif_tx_queue_stopped(txq) &&
5367 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5368 __netif_tx_lock(txq, smp_processor_id());
5369 if (netif_tx_queue_stopped(txq) &&
5370 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5371 netif_tx_wake_queue(txq);
5372 __netif_tx_unlock(txq);
5373 }
5374 }
5375
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)5376 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5377 {
5378 if (!ri->data)
5379 return;
5380
5381 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5382 map_sz, PCI_DMA_FROMDEVICE);
5383 kfree(ri->data);
5384 ri->data = NULL;
5385 }
5386
5387 /* Returns size of skb allocated or < 0 on error.
5388 *
5389 * We only need to fill in the address because the other members
5390 * of the RX descriptor are invariant, see tg3_init_rings.
5391 *
5392 * Note the purposeful assymetry of cpu vs. chip accesses. For
5393 * posting buffers we only dirty the first cache line of the RX
5394 * descriptor (containing the address). Whereas for the RX status
5395 * buffers the cpu only reads the last cacheline of the RX descriptor
5396 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5397 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked)5398 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5399 u32 opaque_key, u32 dest_idx_unmasked)
5400 {
5401 struct tg3_rx_buffer_desc *desc;
5402 struct ring_info *map;
5403 u8 *data;
5404 dma_addr_t mapping;
5405 int skb_size, data_size, dest_idx;
5406
5407 switch (opaque_key) {
5408 case RXD_OPAQUE_RING_STD:
5409 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5410 desc = &tpr->rx_std[dest_idx];
5411 map = &tpr->rx_std_buffers[dest_idx];
5412 data_size = tp->rx_pkt_map_sz;
5413 break;
5414
5415 case RXD_OPAQUE_RING_JUMBO:
5416 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5417 desc = &tpr->rx_jmb[dest_idx].std;
5418 map = &tpr->rx_jmb_buffers[dest_idx];
5419 data_size = TG3_RX_JMB_MAP_SZ;
5420 break;
5421
5422 default:
5423 return -EINVAL;
5424 }
5425
5426 /* Do not overwrite any of the map or rp information
5427 * until we are sure we can commit to a new buffer.
5428 *
5429 * Callers depend upon this behavior and assume that
5430 * we leave everything unchanged if we fail.
5431 */
5432 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5433 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5434 data = kmalloc(skb_size, GFP_ATOMIC);
5435 if (!data)
5436 return -ENOMEM;
5437
5438 mapping = pci_map_single(tp->pdev,
5439 data + TG3_RX_OFFSET(tp),
5440 data_size,
5441 PCI_DMA_FROMDEVICE);
5442 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5443 kfree(data);
5444 return -EIO;
5445 }
5446
5447 map->data = data;
5448 dma_unmap_addr_set(map, mapping, mapping);
5449
5450 desc->addr_hi = ((u64)mapping >> 32);
5451 desc->addr_lo = ((u64)mapping & 0xffffffff);
5452
5453 return data_size;
5454 }
5455
5456 /* We only need to move over in the address because the other
5457 * members of the RX descriptor are invariant. See notes above
5458 * tg3_alloc_rx_data for full details.
5459 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)5460 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5461 struct tg3_rx_prodring_set *dpr,
5462 u32 opaque_key, int src_idx,
5463 u32 dest_idx_unmasked)
5464 {
5465 struct tg3 *tp = tnapi->tp;
5466 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5467 struct ring_info *src_map, *dest_map;
5468 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5469 int dest_idx;
5470
5471 switch (opaque_key) {
5472 case RXD_OPAQUE_RING_STD:
5473 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5474 dest_desc = &dpr->rx_std[dest_idx];
5475 dest_map = &dpr->rx_std_buffers[dest_idx];
5476 src_desc = &spr->rx_std[src_idx];
5477 src_map = &spr->rx_std_buffers[src_idx];
5478 break;
5479
5480 case RXD_OPAQUE_RING_JUMBO:
5481 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5482 dest_desc = &dpr->rx_jmb[dest_idx].std;
5483 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5484 src_desc = &spr->rx_jmb[src_idx].std;
5485 src_map = &spr->rx_jmb_buffers[src_idx];
5486 break;
5487
5488 default:
5489 return;
5490 }
5491
5492 dest_map->data = src_map->data;
5493 dma_unmap_addr_set(dest_map, mapping,
5494 dma_unmap_addr(src_map, mapping));
5495 dest_desc->addr_hi = src_desc->addr_hi;
5496 dest_desc->addr_lo = src_desc->addr_lo;
5497
5498 /* Ensure that the update to the skb happens after the physical
5499 * addresses have been transferred to the new BD location.
5500 */
5501 smp_wmb();
5502
5503 src_map->data = NULL;
5504 }
5505
5506 /* The RX ring scheme is composed of multiple rings which post fresh
5507 * buffers to the chip, and one special ring the chip uses to report
5508 * status back to the host.
5509 *
5510 * The special ring reports the status of received packets to the
5511 * host. The chip does not write into the original descriptor the
5512 * RX buffer was obtained from. The chip simply takes the original
5513 * descriptor as provided by the host, updates the status and length
5514 * field, then writes this into the next status ring entry.
5515 *
5516 * Each ring the host uses to post buffers to the chip is described
5517 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5518 * it is first placed into the on-chip ram. When the packet's length
5519 * is known, it walks down the TG3_BDINFO entries to select the ring.
5520 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5521 * which is within the range of the new packet's length is chosen.
5522 *
5523 * The "separate ring for rx status" scheme may sound queer, but it makes
5524 * sense from a cache coherency perspective. If only the host writes
5525 * to the buffer post rings, and only the chip writes to the rx status
5526 * rings, then cache lines never move beyond shared-modified state.
5527 * If both the host and chip were to write into the same ring, cache line
5528 * eviction could occur since both entities want it in an exclusive state.
5529 */
tg3_rx(struct tg3_napi * tnapi,int budget)5530 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5531 {
5532 struct tg3 *tp = tnapi->tp;
5533 u32 work_mask, rx_std_posted = 0;
5534 u32 std_prod_idx, jmb_prod_idx;
5535 u32 sw_idx = tnapi->rx_rcb_ptr;
5536 u16 hw_idx;
5537 int received;
5538 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5539
5540 hw_idx = *(tnapi->rx_rcb_prod_idx);
5541 /*
5542 * We need to order the read of hw_idx and the read of
5543 * the opaque cookie.
5544 */
5545 rmb();
5546 work_mask = 0;
5547 received = 0;
5548 std_prod_idx = tpr->rx_std_prod_idx;
5549 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5550 while (sw_idx != hw_idx && budget > 0) {
5551 struct ring_info *ri;
5552 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5553 unsigned int len;
5554 struct sk_buff *skb;
5555 dma_addr_t dma_addr;
5556 u32 opaque_key, desc_idx, *post_ptr;
5557 u8 *data;
5558
5559 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5560 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5561 if (opaque_key == RXD_OPAQUE_RING_STD) {
5562 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5563 dma_addr = dma_unmap_addr(ri, mapping);
5564 data = ri->data;
5565 post_ptr = &std_prod_idx;
5566 rx_std_posted++;
5567 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5568 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5569 dma_addr = dma_unmap_addr(ri, mapping);
5570 data = ri->data;
5571 post_ptr = &jmb_prod_idx;
5572 } else
5573 goto next_pkt_nopost;
5574
5575 work_mask |= opaque_key;
5576
5577 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5578 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5579 drop_it:
5580 tg3_recycle_rx(tnapi, tpr, opaque_key,
5581 desc_idx, *post_ptr);
5582 drop_it_no_recycle:
5583 /* Other statistics kept track of by card. */
5584 tp->rx_dropped++;
5585 goto next_pkt;
5586 }
5587
5588 prefetch(data + TG3_RX_OFFSET(tp));
5589 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5590 ETH_FCS_LEN;
5591
5592 if (len > TG3_RX_COPY_THRESH(tp)) {
5593 int skb_size;
5594
5595 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5596 *post_ptr);
5597 if (skb_size < 0)
5598 goto drop_it;
5599
5600 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5601 PCI_DMA_FROMDEVICE);
5602
5603 skb = build_skb(data);
5604 if (!skb) {
5605 kfree(data);
5606 goto drop_it_no_recycle;
5607 }
5608 skb_reserve(skb, TG3_RX_OFFSET(tp));
5609 /* Ensure that the update to the data happens
5610 * after the usage of the old DMA mapping.
5611 */
5612 smp_wmb();
5613
5614 ri->data = NULL;
5615
5616 } else {
5617 tg3_recycle_rx(tnapi, tpr, opaque_key,
5618 desc_idx, *post_ptr);
5619
5620 skb = netdev_alloc_skb(tp->dev,
5621 len + TG3_RAW_IP_ALIGN);
5622 if (skb == NULL)
5623 goto drop_it_no_recycle;
5624
5625 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5626 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5627 memcpy(skb->data,
5628 data + TG3_RX_OFFSET(tp),
5629 len);
5630 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5631 }
5632
5633 skb_put(skb, len);
5634 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5635 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5636 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5637 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5638 skb->ip_summed = CHECKSUM_UNNECESSARY;
5639 else
5640 skb_checksum_none_assert(skb);
5641
5642 skb->protocol = eth_type_trans(skb, tp->dev);
5643
5644 if (len > (tp->dev->mtu + ETH_HLEN) &&
5645 skb->protocol != htons(ETH_P_8021Q)) {
5646 dev_kfree_skb(skb);
5647 goto drop_it_no_recycle;
5648 }
5649
5650 if (desc->type_flags & RXD_FLAG_VLAN &&
5651 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5652 __vlan_hwaccel_put_tag(skb,
5653 desc->err_vlan & RXD_VLAN_MASK);
5654
5655 napi_gro_receive(&tnapi->napi, skb);
5656
5657 received++;
5658 budget--;
5659
5660 next_pkt:
5661 (*post_ptr)++;
5662
5663 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5664 tpr->rx_std_prod_idx = std_prod_idx &
5665 tp->rx_std_ring_mask;
5666 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5667 tpr->rx_std_prod_idx);
5668 work_mask &= ~RXD_OPAQUE_RING_STD;
5669 rx_std_posted = 0;
5670 }
5671 next_pkt_nopost:
5672 sw_idx++;
5673 sw_idx &= tp->rx_ret_ring_mask;
5674
5675 /* Refresh hw_idx to see if there is new work */
5676 if (sw_idx == hw_idx) {
5677 hw_idx = *(tnapi->rx_rcb_prod_idx);
5678 rmb();
5679 }
5680 }
5681
5682 /* ACK the status ring. */
5683 tnapi->rx_rcb_ptr = sw_idx;
5684 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5685
5686 /* Refill RX ring(s). */
5687 if (!tg3_flag(tp, ENABLE_RSS)) {
5688 if (work_mask & RXD_OPAQUE_RING_STD) {
5689 tpr->rx_std_prod_idx = std_prod_idx &
5690 tp->rx_std_ring_mask;
5691 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5692 tpr->rx_std_prod_idx);
5693 }
5694 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5695 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5696 tp->rx_jmb_ring_mask;
5697 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5698 tpr->rx_jmb_prod_idx);
5699 }
5700 mmiowb();
5701 } else if (work_mask) {
5702 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5703 * updated before the producer indices can be updated.
5704 */
5705 smp_wmb();
5706
5707 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5708 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5709
5710 if (tnapi != &tp->napi[1])
5711 napi_schedule(&tp->napi[1].napi);
5712 }
5713
5714 return received;
5715 }
5716
tg3_poll_link(struct tg3 * tp)5717 static void tg3_poll_link(struct tg3 *tp)
5718 {
5719 /* handle link change and other phy events */
5720 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5721 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5722
5723 if (sblk->status & SD_STATUS_LINK_CHG) {
5724 sblk->status = SD_STATUS_UPDATED |
5725 (sblk->status & ~SD_STATUS_LINK_CHG);
5726 spin_lock(&tp->lock);
5727 if (tg3_flag(tp, USE_PHYLIB)) {
5728 tw32_f(MAC_STATUS,
5729 (MAC_STATUS_SYNC_CHANGED |
5730 MAC_STATUS_CFG_CHANGED |
5731 MAC_STATUS_MI_COMPLETION |
5732 MAC_STATUS_LNKSTATE_CHANGED));
5733 udelay(40);
5734 } else
5735 tg3_setup_phy(tp, 0);
5736 spin_unlock(&tp->lock);
5737 }
5738 }
5739 }
5740
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)5741 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5742 struct tg3_rx_prodring_set *dpr,
5743 struct tg3_rx_prodring_set *spr)
5744 {
5745 u32 si, di, cpycnt, src_prod_idx;
5746 int i, err = 0;
5747
5748 while (1) {
5749 src_prod_idx = spr->rx_std_prod_idx;
5750
5751 /* Make sure updates to the rx_std_buffers[] entries and the
5752 * standard producer index are seen in the correct order.
5753 */
5754 smp_rmb();
5755
5756 if (spr->rx_std_cons_idx == src_prod_idx)
5757 break;
5758
5759 if (spr->rx_std_cons_idx < src_prod_idx)
5760 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5761 else
5762 cpycnt = tp->rx_std_ring_mask + 1 -
5763 spr->rx_std_cons_idx;
5764
5765 cpycnt = min(cpycnt,
5766 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5767
5768 si = spr->rx_std_cons_idx;
5769 di = dpr->rx_std_prod_idx;
5770
5771 for (i = di; i < di + cpycnt; i++) {
5772 if (dpr->rx_std_buffers[i].data) {
5773 cpycnt = i - di;
5774 err = -ENOSPC;
5775 break;
5776 }
5777 }
5778
5779 if (!cpycnt)
5780 break;
5781
5782 /* Ensure that updates to the rx_std_buffers ring and the
5783 * shadowed hardware producer ring from tg3_recycle_skb() are
5784 * ordered correctly WRT the skb check above.
5785 */
5786 smp_rmb();
5787
5788 memcpy(&dpr->rx_std_buffers[di],
5789 &spr->rx_std_buffers[si],
5790 cpycnt * sizeof(struct ring_info));
5791
5792 for (i = 0; i < cpycnt; i++, di++, si++) {
5793 struct tg3_rx_buffer_desc *sbd, *dbd;
5794 sbd = &spr->rx_std[si];
5795 dbd = &dpr->rx_std[di];
5796 dbd->addr_hi = sbd->addr_hi;
5797 dbd->addr_lo = sbd->addr_lo;
5798 }
5799
5800 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5801 tp->rx_std_ring_mask;
5802 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5803 tp->rx_std_ring_mask;
5804 }
5805
5806 while (1) {
5807 src_prod_idx = spr->rx_jmb_prod_idx;
5808
5809 /* Make sure updates to the rx_jmb_buffers[] entries and
5810 * the jumbo producer index are seen in the correct order.
5811 */
5812 smp_rmb();
5813
5814 if (spr->rx_jmb_cons_idx == src_prod_idx)
5815 break;
5816
5817 if (spr->rx_jmb_cons_idx < src_prod_idx)
5818 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5819 else
5820 cpycnt = tp->rx_jmb_ring_mask + 1 -
5821 spr->rx_jmb_cons_idx;
5822
5823 cpycnt = min(cpycnt,
5824 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5825
5826 si = spr->rx_jmb_cons_idx;
5827 di = dpr->rx_jmb_prod_idx;
5828
5829 for (i = di; i < di + cpycnt; i++) {
5830 if (dpr->rx_jmb_buffers[i].data) {
5831 cpycnt = i - di;
5832 err = -ENOSPC;
5833 break;
5834 }
5835 }
5836
5837 if (!cpycnt)
5838 break;
5839
5840 /* Ensure that updates to the rx_jmb_buffers ring and the
5841 * shadowed hardware producer ring from tg3_recycle_skb() are
5842 * ordered correctly WRT the skb check above.
5843 */
5844 smp_rmb();
5845
5846 memcpy(&dpr->rx_jmb_buffers[di],
5847 &spr->rx_jmb_buffers[si],
5848 cpycnt * sizeof(struct ring_info));
5849
5850 for (i = 0; i < cpycnt; i++, di++, si++) {
5851 struct tg3_rx_buffer_desc *sbd, *dbd;
5852 sbd = &spr->rx_jmb[si].std;
5853 dbd = &dpr->rx_jmb[di].std;
5854 dbd->addr_hi = sbd->addr_hi;
5855 dbd->addr_lo = sbd->addr_lo;
5856 }
5857
5858 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5859 tp->rx_jmb_ring_mask;
5860 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5861 tp->rx_jmb_ring_mask;
5862 }
5863
5864 return err;
5865 }
5866
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)5867 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5868 {
5869 struct tg3 *tp = tnapi->tp;
5870
5871 /* run TX completion thread */
5872 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5873 tg3_tx(tnapi);
5874 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5875 return work_done;
5876 }
5877
5878 /* run RX thread, within the bounds set by NAPI.
5879 * All RX "locking" is done by ensuring outside
5880 * code synchronizes with tg3->napi.poll()
5881 */
5882 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5883 work_done += tg3_rx(tnapi, budget - work_done);
5884
5885 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5886 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5887 int i, err = 0;
5888 u32 std_prod_idx = dpr->rx_std_prod_idx;
5889 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5890
5891 for (i = 1; i < tp->irq_cnt; i++)
5892 err |= tg3_rx_prodring_xfer(tp, dpr,
5893 &tp->napi[i].prodring);
5894
5895 wmb();
5896
5897 if (std_prod_idx != dpr->rx_std_prod_idx)
5898 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5899 dpr->rx_std_prod_idx);
5900
5901 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5902 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5903 dpr->rx_jmb_prod_idx);
5904
5905 mmiowb();
5906
5907 if (err)
5908 tw32_f(HOSTCC_MODE, tp->coal_now);
5909 }
5910
5911 return work_done;
5912 }
5913
tg3_reset_task_schedule(struct tg3 * tp)5914 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5915 {
5916 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5917 schedule_work(&tp->reset_task);
5918 }
5919
tg3_reset_task_cancel(struct tg3 * tp)5920 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5921 {
5922 cancel_work_sync(&tp->reset_task);
5923 tg3_flag_clear(tp, RESET_TASK_PENDING);
5924 }
5925
tg3_poll_msix(struct napi_struct * napi,int budget)5926 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5927 {
5928 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5929 struct tg3 *tp = tnapi->tp;
5930 int work_done = 0;
5931 struct tg3_hw_status *sblk = tnapi->hw_status;
5932
5933 while (1) {
5934 work_done = tg3_poll_work(tnapi, work_done, budget);
5935
5936 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5937 goto tx_recovery;
5938
5939 if (unlikely(work_done >= budget))
5940 break;
5941
5942 /* tp->last_tag is used in tg3_int_reenable() below
5943 * to tell the hw how much work has been processed,
5944 * so we must read it before checking for more work.
5945 */
5946 tnapi->last_tag = sblk->status_tag;
5947 tnapi->last_irq_tag = tnapi->last_tag;
5948 rmb();
5949
5950 /* check for RX/TX work to do */
5951 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5952 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5953 napi_complete(napi);
5954 /* Reenable interrupts. */
5955 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5956 mmiowb();
5957 break;
5958 }
5959 }
5960
5961 return work_done;
5962
5963 tx_recovery:
5964 /* work_done is guaranteed to be less than budget. */
5965 napi_complete(napi);
5966 tg3_reset_task_schedule(tp);
5967 return work_done;
5968 }
5969
tg3_process_error(struct tg3 * tp)5970 static void tg3_process_error(struct tg3 *tp)
5971 {
5972 u32 val;
5973 bool real_error = false;
5974
5975 if (tg3_flag(tp, ERROR_PROCESSED))
5976 return;
5977
5978 /* Check Flow Attention register */
5979 val = tr32(HOSTCC_FLOW_ATTN);
5980 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5981 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5982 real_error = true;
5983 }
5984
5985 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5986 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5987 real_error = true;
5988 }
5989
5990 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5991 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5992 real_error = true;
5993 }
5994
5995 if (!real_error)
5996 return;
5997
5998 tg3_dump_state(tp);
5999
6000 tg3_flag_set(tp, ERROR_PROCESSED);
6001 tg3_reset_task_schedule(tp);
6002 }
6003
tg3_poll(struct napi_struct * napi,int budget)6004 static int tg3_poll(struct napi_struct *napi, int budget)
6005 {
6006 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6007 struct tg3 *tp = tnapi->tp;
6008 int work_done = 0;
6009 struct tg3_hw_status *sblk = tnapi->hw_status;
6010
6011 while (1) {
6012 if (sblk->status & SD_STATUS_ERROR)
6013 tg3_process_error(tp);
6014
6015 tg3_poll_link(tp);
6016
6017 work_done = tg3_poll_work(tnapi, work_done, budget);
6018
6019 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6020 goto tx_recovery;
6021
6022 if (unlikely(work_done >= budget))
6023 break;
6024
6025 if (tg3_flag(tp, TAGGED_STATUS)) {
6026 /* tp->last_tag is used in tg3_int_reenable() below
6027 * to tell the hw how much work has been processed,
6028 * so we must read it before checking for more work.
6029 */
6030 tnapi->last_tag = sblk->status_tag;
6031 tnapi->last_irq_tag = tnapi->last_tag;
6032 rmb();
6033 } else
6034 sblk->status &= ~SD_STATUS_UPDATED;
6035
6036 if (likely(!tg3_has_work(tnapi))) {
6037 napi_complete(napi);
6038 tg3_int_reenable(tnapi);
6039 break;
6040 }
6041 }
6042
6043 return work_done;
6044
6045 tx_recovery:
6046 /* work_done is guaranteed to be less than budget. */
6047 napi_complete(napi);
6048 tg3_reset_task_schedule(tp);
6049 return work_done;
6050 }
6051
tg3_napi_disable(struct tg3 * tp)6052 static void tg3_napi_disable(struct tg3 *tp)
6053 {
6054 int i;
6055
6056 for (i = tp->irq_cnt - 1; i >= 0; i--)
6057 napi_disable(&tp->napi[i].napi);
6058 }
6059
tg3_napi_enable(struct tg3 * tp)6060 static void tg3_napi_enable(struct tg3 *tp)
6061 {
6062 int i;
6063
6064 for (i = 0; i < tp->irq_cnt; i++)
6065 napi_enable(&tp->napi[i].napi);
6066 }
6067
tg3_napi_init(struct tg3 * tp)6068 static void tg3_napi_init(struct tg3 *tp)
6069 {
6070 int i;
6071
6072 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6073 for (i = 1; i < tp->irq_cnt; i++)
6074 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6075 }
6076
tg3_napi_fini(struct tg3 * tp)6077 static void tg3_napi_fini(struct tg3 *tp)
6078 {
6079 int i;
6080
6081 for (i = 0; i < tp->irq_cnt; i++)
6082 netif_napi_del(&tp->napi[i].napi);
6083 }
6084
tg3_netif_stop(struct tg3 * tp)6085 static inline void tg3_netif_stop(struct tg3 *tp)
6086 {
6087 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6088 tg3_napi_disable(tp);
6089 netif_tx_disable(tp->dev);
6090 }
6091
tg3_netif_start(struct tg3 * tp)6092 static inline void tg3_netif_start(struct tg3 *tp)
6093 {
6094 /* NOTE: unconditional netif_tx_wake_all_queues is only
6095 * appropriate so long as all callers are assured to
6096 * have free tx slots (such as after tg3_init_hw)
6097 */
6098 netif_tx_wake_all_queues(tp->dev);
6099
6100 tg3_napi_enable(tp);
6101 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6102 tg3_enable_ints(tp);
6103 }
6104
tg3_irq_quiesce(struct tg3 * tp)6105 static void tg3_irq_quiesce(struct tg3 *tp)
6106 {
6107 int i;
6108
6109 BUG_ON(tp->irq_sync);
6110
6111 tp->irq_sync = 1;
6112 smp_mb();
6113
6114 for (i = 0; i < tp->irq_cnt; i++)
6115 synchronize_irq(tp->napi[i].irq_vec);
6116 }
6117
6118 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6119 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6120 * with as well. Most of the time, this is not necessary except when
6121 * shutting down the device.
6122 */
tg3_full_lock(struct tg3 * tp,int irq_sync)6123 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6124 {
6125 spin_lock_bh(&tp->lock);
6126 if (irq_sync)
6127 tg3_irq_quiesce(tp);
6128 }
6129
tg3_full_unlock(struct tg3 * tp)6130 static inline void tg3_full_unlock(struct tg3 *tp)
6131 {
6132 spin_unlock_bh(&tp->lock);
6133 }
6134
6135 /* One-shot MSI handler - Chip automatically disables interrupt
6136 * after sending MSI so driver doesn't have to do it.
6137 */
tg3_msi_1shot(int irq,void * dev_id)6138 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6139 {
6140 struct tg3_napi *tnapi = dev_id;
6141 struct tg3 *tp = tnapi->tp;
6142
6143 prefetch(tnapi->hw_status);
6144 if (tnapi->rx_rcb)
6145 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6146
6147 if (likely(!tg3_irq_sync(tp)))
6148 napi_schedule(&tnapi->napi);
6149
6150 return IRQ_HANDLED;
6151 }
6152
6153 /* MSI ISR - No need to check for interrupt sharing and no need to
6154 * flush status block and interrupt mailbox. PCI ordering rules
6155 * guarantee that MSI will arrive after the status block.
6156 */
tg3_msi(int irq,void * dev_id)6157 static irqreturn_t tg3_msi(int irq, void *dev_id)
6158 {
6159 struct tg3_napi *tnapi = dev_id;
6160 struct tg3 *tp = tnapi->tp;
6161
6162 prefetch(tnapi->hw_status);
6163 if (tnapi->rx_rcb)
6164 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6165 /*
6166 * Writing any value to intr-mbox-0 clears PCI INTA# and
6167 * chip-internal interrupt pending events.
6168 * Writing non-zero to intr-mbox-0 additional tells the
6169 * NIC to stop sending us irqs, engaging "in-intr-handler"
6170 * event coalescing.
6171 */
6172 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6173 if (likely(!tg3_irq_sync(tp)))
6174 napi_schedule(&tnapi->napi);
6175
6176 return IRQ_RETVAL(1);
6177 }
6178
tg3_interrupt(int irq,void * dev_id)6179 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6180 {
6181 struct tg3_napi *tnapi = dev_id;
6182 struct tg3 *tp = tnapi->tp;
6183 struct tg3_hw_status *sblk = tnapi->hw_status;
6184 unsigned int handled = 1;
6185
6186 /* In INTx mode, it is possible for the interrupt to arrive at
6187 * the CPU before the status block posted prior to the interrupt.
6188 * Reading the PCI State register will confirm whether the
6189 * interrupt is ours and will flush the status block.
6190 */
6191 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6192 if (tg3_flag(tp, CHIP_RESETTING) ||
6193 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6194 handled = 0;
6195 goto out;
6196 }
6197 }
6198
6199 /*
6200 * Writing any value to intr-mbox-0 clears PCI INTA# and
6201 * chip-internal interrupt pending events.
6202 * Writing non-zero to intr-mbox-0 additional tells the
6203 * NIC to stop sending us irqs, engaging "in-intr-handler"
6204 * event coalescing.
6205 *
6206 * Flush the mailbox to de-assert the IRQ immediately to prevent
6207 * spurious interrupts. The flush impacts performance but
6208 * excessive spurious interrupts can be worse in some cases.
6209 */
6210 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6211 if (tg3_irq_sync(tp))
6212 goto out;
6213 sblk->status &= ~SD_STATUS_UPDATED;
6214 if (likely(tg3_has_work(tnapi))) {
6215 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6216 napi_schedule(&tnapi->napi);
6217 } else {
6218 /* No work, shared interrupt perhaps? re-enable
6219 * interrupts, and flush that PCI write
6220 */
6221 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6222 0x00000000);
6223 }
6224 out:
6225 return IRQ_RETVAL(handled);
6226 }
6227
tg3_interrupt_tagged(int irq,void * dev_id)6228 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6229 {
6230 struct tg3_napi *tnapi = dev_id;
6231 struct tg3 *tp = tnapi->tp;
6232 struct tg3_hw_status *sblk = tnapi->hw_status;
6233 unsigned int handled = 1;
6234
6235 /* In INTx mode, it is possible for the interrupt to arrive at
6236 * the CPU before the status block posted prior to the interrupt.
6237 * Reading the PCI State register will confirm whether the
6238 * interrupt is ours and will flush the status block.
6239 */
6240 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6241 if (tg3_flag(tp, CHIP_RESETTING) ||
6242 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6243 handled = 0;
6244 goto out;
6245 }
6246 }
6247
6248 /*
6249 * writing any value to intr-mbox-0 clears PCI INTA# and
6250 * chip-internal interrupt pending events.
6251 * writing non-zero to intr-mbox-0 additional tells the
6252 * NIC to stop sending us irqs, engaging "in-intr-handler"
6253 * event coalescing.
6254 *
6255 * Flush the mailbox to de-assert the IRQ immediately to prevent
6256 * spurious interrupts. The flush impacts performance but
6257 * excessive spurious interrupts can be worse in some cases.
6258 */
6259 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6260
6261 /*
6262 * In a shared interrupt configuration, sometimes other devices'
6263 * interrupts will scream. We record the current status tag here
6264 * so that the above check can report that the screaming interrupts
6265 * are unhandled. Eventually they will be silenced.
6266 */
6267 tnapi->last_irq_tag = sblk->status_tag;
6268
6269 if (tg3_irq_sync(tp))
6270 goto out;
6271
6272 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6273
6274 napi_schedule(&tnapi->napi);
6275
6276 out:
6277 return IRQ_RETVAL(handled);
6278 }
6279
6280 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)6281 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6282 {
6283 struct tg3_napi *tnapi = dev_id;
6284 struct tg3 *tp = tnapi->tp;
6285 struct tg3_hw_status *sblk = tnapi->hw_status;
6286
6287 if ((sblk->status & SD_STATUS_UPDATED) ||
6288 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6289 tg3_disable_ints(tp);
6290 return IRQ_RETVAL(1);
6291 }
6292 return IRQ_RETVAL(0);
6293 }
6294
6295 static int tg3_init_hw(struct tg3 *, int);
6296 static int tg3_halt(struct tg3 *, int, int);
6297
6298 /* Restart hardware after configuration changes, self-test, etc.
6299 * Invoked with tp->lock held.
6300 */
tg3_restart_hw(struct tg3 * tp,int reset_phy)6301 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6302 __releases(tp->lock)
6303 __acquires(tp->lock)
6304 {
6305 int err;
6306
6307 err = tg3_init_hw(tp, reset_phy);
6308 if (err) {
6309 netdev_err(tp->dev,
6310 "Failed to re-initialize device, aborting\n");
6311 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6312 tg3_full_unlock(tp);
6313 del_timer_sync(&tp->timer);
6314 tp->irq_sync = 0;
6315 tg3_napi_enable(tp);
6316 dev_close(tp->dev);
6317 tg3_full_lock(tp, 0);
6318 }
6319 return err;
6320 }
6321
6322 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)6323 static void tg3_poll_controller(struct net_device *dev)
6324 {
6325 int i;
6326 struct tg3 *tp = netdev_priv(dev);
6327
6328 for (i = 0; i < tp->irq_cnt; i++)
6329 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6330 }
6331 #endif
6332
tg3_reset_task(struct work_struct * work)6333 static void tg3_reset_task(struct work_struct *work)
6334 {
6335 struct tg3 *tp = container_of(work, struct tg3, reset_task);
6336 int err;
6337
6338 tg3_full_lock(tp, 0);
6339
6340 if (!netif_running(tp->dev)) {
6341 tg3_flag_clear(tp, RESET_TASK_PENDING);
6342 tg3_full_unlock(tp);
6343 return;
6344 }
6345
6346 tg3_full_unlock(tp);
6347
6348 tg3_phy_stop(tp);
6349
6350 tg3_netif_stop(tp);
6351
6352 tg3_full_lock(tp, 1);
6353
6354 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6355 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6356 tp->write32_rx_mbox = tg3_write_flush_reg32;
6357 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6358 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6359 }
6360
6361 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6362 err = tg3_init_hw(tp, 1);
6363 if (err)
6364 goto out;
6365
6366 tg3_netif_start(tp);
6367
6368 out:
6369 tg3_full_unlock(tp);
6370
6371 if (!err)
6372 tg3_phy_start(tp);
6373
6374 tg3_flag_clear(tp, RESET_TASK_PENDING);
6375 }
6376
tg3_tx_timeout(struct net_device * dev)6377 static void tg3_tx_timeout(struct net_device *dev)
6378 {
6379 struct tg3 *tp = netdev_priv(dev);
6380
6381 if (netif_msg_tx_err(tp)) {
6382 netdev_err(dev, "transmit timed out, resetting\n");
6383 tg3_dump_state(tp);
6384 }
6385
6386 tg3_reset_task_schedule(tp);
6387 }
6388
6389 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)6390 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6391 {
6392 u32 base = (u32) mapping & 0xffffffff;
6393
6394 return (base > 0xffffdcc0) && (base + len + 8 < base);
6395 }
6396
6397 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)6398 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6399 int len)
6400 {
6401 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6402 if (tg3_flag(tp, 40BIT_DMA_BUG))
6403 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6404 return 0;
6405 #else
6406 return 0;
6407 #endif
6408 }
6409
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)6410 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6411 dma_addr_t mapping, u32 len, u32 flags,
6412 u32 mss, u32 vlan)
6413 {
6414 txbd->addr_hi = ((u64) mapping >> 32);
6415 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6416 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6417 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6418 }
6419
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)6420 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6421 dma_addr_t map, u32 len, u32 flags,
6422 u32 mss, u32 vlan)
6423 {
6424 struct tg3 *tp = tnapi->tp;
6425 bool hwbug = false;
6426
6427 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6428 hwbug = true;
6429
6430 if (tg3_4g_overflow_test(map, len))
6431 hwbug = true;
6432
6433 if (tg3_40bit_overflow_test(tp, map, len))
6434 hwbug = true;
6435
6436 if (tp->dma_limit) {
6437 u32 prvidx = *entry;
6438 u32 tmp_flag = flags & ~TXD_FLAG_END;
6439 while (len > tp->dma_limit && *budget) {
6440 u32 frag_len = tp->dma_limit;
6441 len -= tp->dma_limit;
6442
6443 /* Avoid the 8byte DMA problem */
6444 if (len <= 8) {
6445 len += tp->dma_limit / 2;
6446 frag_len = tp->dma_limit / 2;
6447 }
6448
6449 tnapi->tx_buffers[*entry].fragmented = true;
6450
6451 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6452 frag_len, tmp_flag, mss, vlan);
6453 *budget -= 1;
6454 prvidx = *entry;
6455 *entry = NEXT_TX(*entry);
6456
6457 map += frag_len;
6458 }
6459
6460 if (len) {
6461 if (*budget) {
6462 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6463 len, flags, mss, vlan);
6464 *budget -= 1;
6465 *entry = NEXT_TX(*entry);
6466 } else {
6467 hwbug = true;
6468 tnapi->tx_buffers[prvidx].fragmented = false;
6469 }
6470 }
6471 } else {
6472 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6473 len, flags, mss, vlan);
6474 *entry = NEXT_TX(*entry);
6475 }
6476
6477 return hwbug;
6478 }
6479
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)6480 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6481 {
6482 int i;
6483 struct sk_buff *skb;
6484 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6485
6486 skb = txb->skb;
6487 txb->skb = NULL;
6488
6489 pci_unmap_single(tnapi->tp->pdev,
6490 dma_unmap_addr(txb, mapping),
6491 skb_headlen(skb),
6492 PCI_DMA_TODEVICE);
6493
6494 while (txb->fragmented) {
6495 txb->fragmented = false;
6496 entry = NEXT_TX(entry);
6497 txb = &tnapi->tx_buffers[entry];
6498 }
6499
6500 for (i = 0; i <= last; i++) {
6501 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6502
6503 entry = NEXT_TX(entry);
6504 txb = &tnapi->tx_buffers[entry];
6505
6506 pci_unmap_page(tnapi->tp->pdev,
6507 dma_unmap_addr(txb, mapping),
6508 skb_frag_size(frag), PCI_DMA_TODEVICE);
6509
6510 while (txb->fragmented) {
6511 txb->fragmented = false;
6512 entry = NEXT_TX(entry);
6513 txb = &tnapi->tx_buffers[entry];
6514 }
6515 }
6516 }
6517
6518 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)6519 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6520 struct sk_buff **pskb,
6521 u32 *entry, u32 *budget,
6522 u32 base_flags, u32 mss, u32 vlan)
6523 {
6524 struct tg3 *tp = tnapi->tp;
6525 struct sk_buff *new_skb, *skb = *pskb;
6526 dma_addr_t new_addr = 0;
6527 int ret = 0;
6528
6529 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6530 new_skb = skb_copy(skb, GFP_ATOMIC);
6531 else {
6532 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6533
6534 new_skb = skb_copy_expand(skb,
6535 skb_headroom(skb) + more_headroom,
6536 skb_tailroom(skb), GFP_ATOMIC);
6537 }
6538
6539 if (!new_skb) {
6540 ret = -1;
6541 } else {
6542 /* New SKB is guaranteed to be linear. */
6543 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6544 PCI_DMA_TODEVICE);
6545 /* Make sure the mapping succeeded */
6546 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6547 dev_kfree_skb(new_skb);
6548 ret = -1;
6549 } else {
6550 u32 save_entry = *entry;
6551
6552 base_flags |= TXD_FLAG_END;
6553
6554 tnapi->tx_buffers[*entry].skb = new_skb;
6555 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6556 mapping, new_addr);
6557
6558 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6559 new_skb->len, base_flags,
6560 mss, vlan)) {
6561 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6562 dev_kfree_skb(new_skb);
6563 ret = -1;
6564 }
6565 }
6566 }
6567
6568 dev_kfree_skb(skb);
6569 *pskb = new_skb;
6570 return ret;
6571 }
6572
6573 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6574
6575 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6576 * TSO header is greater than 80 bytes.
6577 */
tg3_tso_bug(struct tg3 * tp,struct sk_buff * skb)6578 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6579 {
6580 struct sk_buff *segs, *nskb;
6581 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6582
6583 /* Estimate the number of fragments in the worst case */
6584 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6585 netif_stop_queue(tp->dev);
6586
6587 /* netif_tx_stop_queue() must be done before checking
6588 * checking tx index in tg3_tx_avail() below, because in
6589 * tg3_tx(), we update tx index before checking for
6590 * netif_tx_queue_stopped().
6591 */
6592 smp_mb();
6593 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6594 return NETDEV_TX_BUSY;
6595
6596 netif_wake_queue(tp->dev);
6597 }
6598
6599 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6600 if (IS_ERR(segs))
6601 goto tg3_tso_bug_end;
6602
6603 do {
6604 nskb = segs;
6605 segs = segs->next;
6606 nskb->next = NULL;
6607 tg3_start_xmit(nskb, tp->dev);
6608 } while (segs);
6609
6610 tg3_tso_bug_end:
6611 dev_kfree_skb(skb);
6612
6613 return NETDEV_TX_OK;
6614 }
6615
6616 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6617 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6618 */
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)6619 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6620 {
6621 struct tg3 *tp = netdev_priv(dev);
6622 u32 len, entry, base_flags, mss, vlan = 0;
6623 u32 budget;
6624 int i = -1, would_hit_hwbug;
6625 dma_addr_t mapping;
6626 struct tg3_napi *tnapi;
6627 struct netdev_queue *txq;
6628 unsigned int last;
6629
6630 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6631 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6632 if (tg3_flag(tp, ENABLE_TSS))
6633 tnapi++;
6634
6635 budget = tg3_tx_avail(tnapi);
6636
6637 /* We are running in BH disabled context with netif_tx_lock
6638 * and TX reclaim runs via tp->napi.poll inside of a software
6639 * interrupt. Furthermore, IRQ processing runs lockless so we have
6640 * no IRQ context deadlocks to worry about either. Rejoice!
6641 */
6642 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6643 if (!netif_tx_queue_stopped(txq)) {
6644 netif_tx_stop_queue(txq);
6645
6646 /* This is a hard error, log it. */
6647 netdev_err(dev,
6648 "BUG! Tx Ring full when queue awake!\n");
6649 }
6650 return NETDEV_TX_BUSY;
6651 }
6652
6653 entry = tnapi->tx_prod;
6654 base_flags = 0;
6655 if (skb->ip_summed == CHECKSUM_PARTIAL)
6656 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6657
6658 mss = skb_shinfo(skb)->gso_size;
6659 if (mss) {
6660 struct iphdr *iph;
6661 u32 tcp_opt_len, hdr_len;
6662
6663 if (skb_header_cloned(skb) &&
6664 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6665 goto drop;
6666
6667 iph = ip_hdr(skb);
6668 tcp_opt_len = tcp_optlen(skb);
6669
6670 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6671
6672 if (!skb_is_gso_v6(skb)) {
6673 iph->check = 0;
6674 iph->tot_len = htons(mss + hdr_len);
6675 }
6676
6677 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6678 tg3_flag(tp, TSO_BUG))
6679 return tg3_tso_bug(tp, skb);
6680
6681 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6682 TXD_FLAG_CPU_POST_DMA);
6683
6684 if (tg3_flag(tp, HW_TSO_1) ||
6685 tg3_flag(tp, HW_TSO_2) ||
6686 tg3_flag(tp, HW_TSO_3)) {
6687 tcp_hdr(skb)->check = 0;
6688 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6689 } else
6690 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6691 iph->daddr, 0,
6692 IPPROTO_TCP,
6693 0);
6694
6695 if (tg3_flag(tp, HW_TSO_3)) {
6696 mss |= (hdr_len & 0xc) << 12;
6697 if (hdr_len & 0x10)
6698 base_flags |= 0x00000010;
6699 base_flags |= (hdr_len & 0x3e0) << 5;
6700 } else if (tg3_flag(tp, HW_TSO_2))
6701 mss |= hdr_len << 9;
6702 else if (tg3_flag(tp, HW_TSO_1) ||
6703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6704 if (tcp_opt_len || iph->ihl > 5) {
6705 int tsflags;
6706
6707 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6708 mss |= (tsflags << 11);
6709 }
6710 } else {
6711 if (tcp_opt_len || iph->ihl > 5) {
6712 int tsflags;
6713
6714 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6715 base_flags |= tsflags << 12;
6716 }
6717 }
6718 }
6719
6720 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6721 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6722 base_flags |= TXD_FLAG_JMB_PKT;
6723
6724 if (vlan_tx_tag_present(skb)) {
6725 base_flags |= TXD_FLAG_VLAN;
6726 vlan = vlan_tx_tag_get(skb);
6727 }
6728
6729 len = skb_headlen(skb);
6730
6731 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6732 if (pci_dma_mapping_error(tp->pdev, mapping))
6733 goto drop;
6734
6735
6736 tnapi->tx_buffers[entry].skb = skb;
6737 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6738
6739 would_hit_hwbug = 0;
6740
6741 if (tg3_flag(tp, 5701_DMA_BUG))
6742 would_hit_hwbug = 1;
6743
6744 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6745 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6746 mss, vlan)) {
6747 would_hit_hwbug = 1;
6748 /* Now loop through additional data fragments, and queue them. */
6749 } else if (skb_shinfo(skb)->nr_frags > 0) {
6750 u32 tmp_mss = mss;
6751
6752 if (!tg3_flag(tp, HW_TSO_1) &&
6753 !tg3_flag(tp, HW_TSO_2) &&
6754 !tg3_flag(tp, HW_TSO_3))
6755 tmp_mss = 0;
6756
6757 last = skb_shinfo(skb)->nr_frags - 1;
6758 for (i = 0; i <= last; i++) {
6759 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6760
6761 len = skb_frag_size(frag);
6762 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6763 len, DMA_TO_DEVICE);
6764
6765 tnapi->tx_buffers[entry].skb = NULL;
6766 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6767 mapping);
6768 if (dma_mapping_error(&tp->pdev->dev, mapping))
6769 goto dma_error;
6770
6771 if (!budget ||
6772 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6773 len, base_flags |
6774 ((i == last) ? TXD_FLAG_END : 0),
6775 tmp_mss, vlan)) {
6776 would_hit_hwbug = 1;
6777 break;
6778 }
6779 }
6780 }
6781
6782 if (would_hit_hwbug) {
6783 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6784
6785 /* If the workaround fails due to memory/mapping
6786 * failure, silently drop this packet.
6787 */
6788 entry = tnapi->tx_prod;
6789 budget = tg3_tx_avail(tnapi);
6790 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6791 base_flags, mss, vlan))
6792 goto drop_nofree;
6793 }
6794
6795 skb_tx_timestamp(skb);
6796 netdev_tx_sent_queue(txq, skb->len);
6797
6798 /* Packets are ready, update Tx producer idx local and on card. */
6799 tw32_tx_mbox(tnapi->prodmbox, entry);
6800
6801 tnapi->tx_prod = entry;
6802 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6803 netif_tx_stop_queue(txq);
6804
6805 /* netif_tx_stop_queue() must be done before checking
6806 * checking tx index in tg3_tx_avail() below, because in
6807 * tg3_tx(), we update tx index before checking for
6808 * netif_tx_queue_stopped().
6809 */
6810 smp_mb();
6811 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6812 netif_tx_wake_queue(txq);
6813 }
6814
6815 mmiowb();
6816 return NETDEV_TX_OK;
6817
6818 dma_error:
6819 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6820 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6821 drop:
6822 dev_kfree_skb(skb);
6823 drop_nofree:
6824 tp->tx_dropped++;
6825 return NETDEV_TX_OK;
6826 }
6827
tg3_mac_loopback(struct tg3 * tp,bool enable)6828 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6829 {
6830 if (enable) {
6831 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6832 MAC_MODE_PORT_MODE_MASK);
6833
6834 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6835
6836 if (!tg3_flag(tp, 5705_PLUS))
6837 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6838
6839 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6841 else
6842 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6843 } else {
6844 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6845
6846 if (tg3_flag(tp, 5705_PLUS) ||
6847 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6849 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6850 }
6851
6852 tw32(MAC_MODE, tp->mac_mode);
6853 udelay(40);
6854 }
6855
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)6856 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6857 {
6858 u32 val, bmcr, mac_mode, ptest = 0;
6859
6860 tg3_phy_toggle_apd(tp, false);
6861 tg3_phy_toggle_automdix(tp, 0);
6862
6863 if (extlpbk && tg3_phy_set_extloopbk(tp))
6864 return -EIO;
6865
6866 bmcr = BMCR_FULLDPLX;
6867 switch (speed) {
6868 case SPEED_10:
6869 break;
6870 case SPEED_100:
6871 bmcr |= BMCR_SPEED100;
6872 break;
6873 case SPEED_1000:
6874 default:
6875 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6876 speed = SPEED_100;
6877 bmcr |= BMCR_SPEED100;
6878 } else {
6879 speed = SPEED_1000;
6880 bmcr |= BMCR_SPEED1000;
6881 }
6882 }
6883
6884 if (extlpbk) {
6885 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6886 tg3_readphy(tp, MII_CTRL1000, &val);
6887 val |= CTL1000_AS_MASTER |
6888 CTL1000_ENABLE_MASTER;
6889 tg3_writephy(tp, MII_CTRL1000, val);
6890 } else {
6891 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6892 MII_TG3_FET_PTEST_TRIM_2;
6893 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6894 }
6895 } else
6896 bmcr |= BMCR_LOOPBACK;
6897
6898 tg3_writephy(tp, MII_BMCR, bmcr);
6899
6900 /* The write needs to be flushed for the FETs */
6901 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6902 tg3_readphy(tp, MII_BMCR, &bmcr);
6903
6904 udelay(40);
6905
6906 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6908 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6909 MII_TG3_FET_PTEST_FRC_TX_LINK |
6910 MII_TG3_FET_PTEST_FRC_TX_LOCK);
6911
6912 /* The write needs to be flushed for the AC131 */
6913 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6914 }
6915
6916 /* Reset to prevent losing 1st rx packet intermittently */
6917 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6918 tg3_flag(tp, 5780_CLASS)) {
6919 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6920 udelay(10);
6921 tw32_f(MAC_RX_MODE, tp->rx_mode);
6922 }
6923
6924 mac_mode = tp->mac_mode &
6925 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6926 if (speed == SPEED_1000)
6927 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6928 else
6929 mac_mode |= MAC_MODE_PORT_MODE_MII;
6930
6931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6932 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6933
6934 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6935 mac_mode &= ~MAC_MODE_LINK_POLARITY;
6936 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6937 mac_mode |= MAC_MODE_LINK_POLARITY;
6938
6939 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6940 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6941 }
6942
6943 tw32(MAC_MODE, mac_mode);
6944 udelay(40);
6945
6946 return 0;
6947 }
6948
tg3_set_loopback(struct net_device * dev,netdev_features_t features)6949 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
6950 {
6951 struct tg3 *tp = netdev_priv(dev);
6952
6953 if (features & NETIF_F_LOOPBACK) {
6954 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6955 return;
6956
6957 spin_lock_bh(&tp->lock);
6958 tg3_mac_loopback(tp, true);
6959 netif_carrier_on(tp->dev);
6960 spin_unlock_bh(&tp->lock);
6961 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6962 } else {
6963 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6964 return;
6965
6966 spin_lock_bh(&tp->lock);
6967 tg3_mac_loopback(tp, false);
6968 /* Force link status check */
6969 tg3_setup_phy(tp, 1);
6970 spin_unlock_bh(&tp->lock);
6971 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6972 }
6973 }
6974
tg3_fix_features(struct net_device * dev,netdev_features_t features)6975 static netdev_features_t tg3_fix_features(struct net_device *dev,
6976 netdev_features_t features)
6977 {
6978 struct tg3 *tp = netdev_priv(dev);
6979
6980 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6981 features &= ~NETIF_F_ALL_TSO;
6982
6983 return features;
6984 }
6985
tg3_set_features(struct net_device * dev,netdev_features_t features)6986 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
6987 {
6988 netdev_features_t changed = dev->features ^ features;
6989
6990 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6991 tg3_set_loopback(dev, features);
6992
6993 return 0;
6994 }
6995
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)6996 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6997 int new_mtu)
6998 {
6999 dev->mtu = new_mtu;
7000
7001 if (new_mtu > ETH_DATA_LEN) {
7002 if (tg3_flag(tp, 5780_CLASS)) {
7003 netdev_update_features(dev);
7004 tg3_flag_clear(tp, TSO_CAPABLE);
7005 } else {
7006 tg3_flag_set(tp, JUMBO_RING_ENABLE);
7007 }
7008 } else {
7009 if (tg3_flag(tp, 5780_CLASS)) {
7010 tg3_flag_set(tp, TSO_CAPABLE);
7011 netdev_update_features(dev);
7012 }
7013 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7014 }
7015 }
7016
tg3_change_mtu(struct net_device * dev,int new_mtu)7017 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7018 {
7019 struct tg3 *tp = netdev_priv(dev);
7020 int err;
7021
7022 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7023 return -EINVAL;
7024
7025 if (!netif_running(dev)) {
7026 /* We'll just catch it later when the
7027 * device is up'd.
7028 */
7029 tg3_set_mtu(dev, tp, new_mtu);
7030 return 0;
7031 }
7032
7033 tg3_phy_stop(tp);
7034
7035 tg3_netif_stop(tp);
7036
7037 tg3_full_lock(tp, 1);
7038
7039 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7040
7041 tg3_set_mtu(dev, tp, new_mtu);
7042
7043 err = tg3_restart_hw(tp, 0);
7044
7045 if (!err)
7046 tg3_netif_start(tp);
7047
7048 tg3_full_unlock(tp);
7049
7050 if (!err)
7051 tg3_phy_start(tp);
7052
7053 return err;
7054 }
7055
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)7056 static void tg3_rx_prodring_free(struct tg3 *tp,
7057 struct tg3_rx_prodring_set *tpr)
7058 {
7059 int i;
7060
7061 if (tpr != &tp->napi[0].prodring) {
7062 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7063 i = (i + 1) & tp->rx_std_ring_mask)
7064 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7065 tp->rx_pkt_map_sz);
7066
7067 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7068 for (i = tpr->rx_jmb_cons_idx;
7069 i != tpr->rx_jmb_prod_idx;
7070 i = (i + 1) & tp->rx_jmb_ring_mask) {
7071 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7072 TG3_RX_JMB_MAP_SZ);
7073 }
7074 }
7075
7076 return;
7077 }
7078
7079 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7080 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7081 tp->rx_pkt_map_sz);
7082
7083 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7084 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7085 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7086 TG3_RX_JMB_MAP_SZ);
7087 }
7088 }
7089
7090 /* Initialize rx rings for packet processing.
7091 *
7092 * The chip has been shut down and the driver detached from
7093 * the networking, so no interrupts or new tx packets will
7094 * end up in the driver. tp->{tx,}lock are held and thus
7095 * we may not sleep.
7096 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)7097 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7098 struct tg3_rx_prodring_set *tpr)
7099 {
7100 u32 i, rx_pkt_dma_sz;
7101
7102 tpr->rx_std_cons_idx = 0;
7103 tpr->rx_std_prod_idx = 0;
7104 tpr->rx_jmb_cons_idx = 0;
7105 tpr->rx_jmb_prod_idx = 0;
7106
7107 if (tpr != &tp->napi[0].prodring) {
7108 memset(&tpr->rx_std_buffers[0], 0,
7109 TG3_RX_STD_BUFF_RING_SIZE(tp));
7110 if (tpr->rx_jmb_buffers)
7111 memset(&tpr->rx_jmb_buffers[0], 0,
7112 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7113 goto done;
7114 }
7115
7116 /* Zero out all descriptors. */
7117 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7118
7119 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7120 if (tg3_flag(tp, 5780_CLASS) &&
7121 tp->dev->mtu > ETH_DATA_LEN)
7122 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7123 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7124
7125 /* Initialize invariants of the rings, we only set this
7126 * stuff once. This works because the card does not
7127 * write into the rx buffer posting rings.
7128 */
7129 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7130 struct tg3_rx_buffer_desc *rxd;
7131
7132 rxd = &tpr->rx_std[i];
7133 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7134 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7135 rxd->opaque = (RXD_OPAQUE_RING_STD |
7136 (i << RXD_OPAQUE_INDEX_SHIFT));
7137 }
7138
7139 /* Now allocate fresh SKBs for each rx ring. */
7140 for (i = 0; i < tp->rx_pending; i++) {
7141 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7142 netdev_warn(tp->dev,
7143 "Using a smaller RX standard ring. Only "
7144 "%d out of %d buffers were allocated "
7145 "successfully\n", i, tp->rx_pending);
7146 if (i == 0)
7147 goto initfail;
7148 tp->rx_pending = i;
7149 break;
7150 }
7151 }
7152
7153 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7154 goto done;
7155
7156 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7157
7158 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7159 goto done;
7160
7161 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7162 struct tg3_rx_buffer_desc *rxd;
7163
7164 rxd = &tpr->rx_jmb[i].std;
7165 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7166 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7167 RXD_FLAG_JUMBO;
7168 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7169 (i << RXD_OPAQUE_INDEX_SHIFT));
7170 }
7171
7172 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7173 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7174 netdev_warn(tp->dev,
7175 "Using a smaller RX jumbo ring. Only %d "
7176 "out of %d buffers were allocated "
7177 "successfully\n", i, tp->rx_jumbo_pending);
7178 if (i == 0)
7179 goto initfail;
7180 tp->rx_jumbo_pending = i;
7181 break;
7182 }
7183 }
7184
7185 done:
7186 return 0;
7187
7188 initfail:
7189 tg3_rx_prodring_free(tp, tpr);
7190 return -ENOMEM;
7191 }
7192
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)7193 static void tg3_rx_prodring_fini(struct tg3 *tp,
7194 struct tg3_rx_prodring_set *tpr)
7195 {
7196 kfree(tpr->rx_std_buffers);
7197 tpr->rx_std_buffers = NULL;
7198 kfree(tpr->rx_jmb_buffers);
7199 tpr->rx_jmb_buffers = NULL;
7200 if (tpr->rx_std) {
7201 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7202 tpr->rx_std, tpr->rx_std_mapping);
7203 tpr->rx_std = NULL;
7204 }
7205 if (tpr->rx_jmb) {
7206 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7207 tpr->rx_jmb, tpr->rx_jmb_mapping);
7208 tpr->rx_jmb = NULL;
7209 }
7210 }
7211
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)7212 static int tg3_rx_prodring_init(struct tg3 *tp,
7213 struct tg3_rx_prodring_set *tpr)
7214 {
7215 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7216 GFP_KERNEL);
7217 if (!tpr->rx_std_buffers)
7218 return -ENOMEM;
7219
7220 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7221 TG3_RX_STD_RING_BYTES(tp),
7222 &tpr->rx_std_mapping,
7223 GFP_KERNEL);
7224 if (!tpr->rx_std)
7225 goto err_out;
7226
7227 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7228 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7229 GFP_KERNEL);
7230 if (!tpr->rx_jmb_buffers)
7231 goto err_out;
7232
7233 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7234 TG3_RX_JMB_RING_BYTES(tp),
7235 &tpr->rx_jmb_mapping,
7236 GFP_KERNEL);
7237 if (!tpr->rx_jmb)
7238 goto err_out;
7239 }
7240
7241 return 0;
7242
7243 err_out:
7244 tg3_rx_prodring_fini(tp, tpr);
7245 return -ENOMEM;
7246 }
7247
7248 /* Free up pending packets in all rx/tx rings.
7249 *
7250 * The chip has been shut down and the driver detached from
7251 * the networking, so no interrupts or new tx packets will
7252 * end up in the driver. tp->{tx,}lock is not held and we are not
7253 * in an interrupt context and thus may sleep.
7254 */
tg3_free_rings(struct tg3 * tp)7255 static void tg3_free_rings(struct tg3 *tp)
7256 {
7257 int i, j;
7258
7259 for (j = 0; j < tp->irq_cnt; j++) {
7260 struct tg3_napi *tnapi = &tp->napi[j];
7261
7262 tg3_rx_prodring_free(tp, &tnapi->prodring);
7263
7264 if (!tnapi->tx_buffers)
7265 continue;
7266
7267 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7268 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7269
7270 if (!skb)
7271 continue;
7272
7273 tg3_tx_skb_unmap(tnapi, i,
7274 skb_shinfo(skb)->nr_frags - 1);
7275
7276 dev_kfree_skb_any(skb);
7277 }
7278 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7279 }
7280 }
7281
7282 /* Initialize tx/rx rings for packet processing.
7283 *
7284 * The chip has been shut down and the driver detached from
7285 * the networking, so no interrupts or new tx packets will
7286 * end up in the driver. tp->{tx,}lock are held and thus
7287 * we may not sleep.
7288 */
tg3_init_rings(struct tg3 * tp)7289 static int tg3_init_rings(struct tg3 *tp)
7290 {
7291 int i;
7292
7293 /* Free up all the SKBs. */
7294 tg3_free_rings(tp);
7295
7296 for (i = 0; i < tp->irq_cnt; i++) {
7297 struct tg3_napi *tnapi = &tp->napi[i];
7298
7299 tnapi->last_tag = 0;
7300 tnapi->last_irq_tag = 0;
7301 tnapi->hw_status->status = 0;
7302 tnapi->hw_status->status_tag = 0;
7303 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7304
7305 tnapi->tx_prod = 0;
7306 tnapi->tx_cons = 0;
7307 if (tnapi->tx_ring)
7308 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7309
7310 tnapi->rx_rcb_ptr = 0;
7311 if (tnapi->rx_rcb)
7312 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7313
7314 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7315 tg3_free_rings(tp);
7316 return -ENOMEM;
7317 }
7318 }
7319
7320 return 0;
7321 }
7322
7323 /*
7324 * Must not be invoked with interrupt sources disabled and
7325 * the hardware shutdown down.
7326 */
tg3_free_consistent(struct tg3 * tp)7327 static void tg3_free_consistent(struct tg3 *tp)
7328 {
7329 int i;
7330
7331 for (i = 0; i < tp->irq_cnt; i++) {
7332 struct tg3_napi *tnapi = &tp->napi[i];
7333
7334 if (tnapi->tx_ring) {
7335 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7336 tnapi->tx_ring, tnapi->tx_desc_mapping);
7337 tnapi->tx_ring = NULL;
7338 }
7339
7340 kfree(tnapi->tx_buffers);
7341 tnapi->tx_buffers = NULL;
7342
7343 if (tnapi->rx_rcb) {
7344 dma_free_coherent(&tp->pdev->dev,
7345 TG3_RX_RCB_RING_BYTES(tp),
7346 tnapi->rx_rcb,
7347 tnapi->rx_rcb_mapping);
7348 tnapi->rx_rcb = NULL;
7349 }
7350
7351 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7352
7353 if (tnapi->hw_status) {
7354 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7355 tnapi->hw_status,
7356 tnapi->status_mapping);
7357 tnapi->hw_status = NULL;
7358 }
7359 }
7360
7361 if (tp->hw_stats) {
7362 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7363 tp->hw_stats, tp->stats_mapping);
7364 tp->hw_stats = NULL;
7365 }
7366 }
7367
7368 /*
7369 * Must not be invoked with interrupt sources disabled and
7370 * the hardware shutdown down. Can sleep.
7371 */
tg3_alloc_consistent(struct tg3 * tp)7372 static int tg3_alloc_consistent(struct tg3 *tp)
7373 {
7374 int i;
7375
7376 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7377 sizeof(struct tg3_hw_stats),
7378 &tp->stats_mapping,
7379 GFP_KERNEL);
7380 if (!tp->hw_stats)
7381 goto err_out;
7382
7383 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7384
7385 for (i = 0; i < tp->irq_cnt; i++) {
7386 struct tg3_napi *tnapi = &tp->napi[i];
7387 struct tg3_hw_status *sblk;
7388
7389 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7390 TG3_HW_STATUS_SIZE,
7391 &tnapi->status_mapping,
7392 GFP_KERNEL);
7393 if (!tnapi->hw_status)
7394 goto err_out;
7395
7396 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7397 sblk = tnapi->hw_status;
7398
7399 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7400 goto err_out;
7401
7402 /* If multivector TSS is enabled, vector 0 does not handle
7403 * tx interrupts. Don't allocate any resources for it.
7404 */
7405 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7406 (i && tg3_flag(tp, ENABLE_TSS))) {
7407 tnapi->tx_buffers = kzalloc(
7408 sizeof(struct tg3_tx_ring_info) *
7409 TG3_TX_RING_SIZE, GFP_KERNEL);
7410 if (!tnapi->tx_buffers)
7411 goto err_out;
7412
7413 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7414 TG3_TX_RING_BYTES,
7415 &tnapi->tx_desc_mapping,
7416 GFP_KERNEL);
7417 if (!tnapi->tx_ring)
7418 goto err_out;
7419 }
7420
7421 /*
7422 * When RSS is enabled, the status block format changes
7423 * slightly. The "rx_jumbo_consumer", "reserved",
7424 * and "rx_mini_consumer" members get mapped to the
7425 * other three rx return ring producer indexes.
7426 */
7427 switch (i) {
7428 default:
7429 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7430 break;
7431 case 2:
7432 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7433 break;
7434 case 3:
7435 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7436 break;
7437 case 4:
7438 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7439 break;
7440 }
7441
7442 /*
7443 * If multivector RSS is enabled, vector 0 does not handle
7444 * rx or tx interrupts. Don't allocate any resources for it.
7445 */
7446 if (!i && tg3_flag(tp, ENABLE_RSS))
7447 continue;
7448
7449 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7450 TG3_RX_RCB_RING_BYTES(tp),
7451 &tnapi->rx_rcb_mapping,
7452 GFP_KERNEL);
7453 if (!tnapi->rx_rcb)
7454 goto err_out;
7455
7456 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7457 }
7458
7459 return 0;
7460
7461 err_out:
7462 tg3_free_consistent(tp);
7463 return -ENOMEM;
7464 }
7465
7466 #define MAX_WAIT_CNT 1000
7467
7468 /* To stop a block, clear the enable bit and poll till it
7469 * clears. tp->lock is held.
7470 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,int silent)7471 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7472 {
7473 unsigned int i;
7474 u32 val;
7475
7476 if (tg3_flag(tp, 5705_PLUS)) {
7477 switch (ofs) {
7478 case RCVLSC_MODE:
7479 case DMAC_MODE:
7480 case MBFREE_MODE:
7481 case BUFMGR_MODE:
7482 case MEMARB_MODE:
7483 /* We can't enable/disable these bits of the
7484 * 5705/5750, just say success.
7485 */
7486 return 0;
7487
7488 default:
7489 break;
7490 }
7491 }
7492
7493 val = tr32(ofs);
7494 val &= ~enable_bit;
7495 tw32_f(ofs, val);
7496
7497 for (i = 0; i < MAX_WAIT_CNT; i++) {
7498 udelay(100);
7499 val = tr32(ofs);
7500 if ((val & enable_bit) == 0)
7501 break;
7502 }
7503
7504 if (i == MAX_WAIT_CNT && !silent) {
7505 dev_err(&tp->pdev->dev,
7506 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7507 ofs, enable_bit);
7508 return -ENODEV;
7509 }
7510
7511 return 0;
7512 }
7513
7514 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,int silent)7515 static int tg3_abort_hw(struct tg3 *tp, int silent)
7516 {
7517 int i, err;
7518
7519 tg3_disable_ints(tp);
7520
7521 tp->rx_mode &= ~RX_MODE_ENABLE;
7522 tw32_f(MAC_RX_MODE, tp->rx_mode);
7523 udelay(10);
7524
7525 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7526 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7527 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7528 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7529 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7530 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7531
7532 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7533 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7534 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7535 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7536 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7537 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7538 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7539
7540 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7541 tw32_f(MAC_MODE, tp->mac_mode);
7542 udelay(40);
7543
7544 tp->tx_mode &= ~TX_MODE_ENABLE;
7545 tw32_f(MAC_TX_MODE, tp->tx_mode);
7546
7547 for (i = 0; i < MAX_WAIT_CNT; i++) {
7548 udelay(100);
7549 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7550 break;
7551 }
7552 if (i >= MAX_WAIT_CNT) {
7553 dev_err(&tp->pdev->dev,
7554 "%s timed out, TX_MODE_ENABLE will not clear "
7555 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7556 err |= -ENODEV;
7557 }
7558
7559 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7560 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7561 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7562
7563 tw32(FTQ_RESET, 0xffffffff);
7564 tw32(FTQ_RESET, 0x00000000);
7565
7566 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7567 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7568
7569 for (i = 0; i < tp->irq_cnt; i++) {
7570 struct tg3_napi *tnapi = &tp->napi[i];
7571 if (tnapi->hw_status)
7572 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7573 }
7574
7575 return err;
7576 }
7577
7578 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)7579 static void tg3_save_pci_state(struct tg3 *tp)
7580 {
7581 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7582 }
7583
7584 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)7585 static void tg3_restore_pci_state(struct tg3 *tp)
7586 {
7587 u32 val;
7588
7589 /* Re-enable indirect register accesses. */
7590 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7591 tp->misc_host_ctrl);
7592
7593 /* Set MAX PCI retry to zero. */
7594 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7595 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7596 tg3_flag(tp, PCIX_MODE))
7597 val |= PCISTATE_RETRY_SAME_DMA;
7598 /* Allow reads and writes to the APE register and memory space. */
7599 if (tg3_flag(tp, ENABLE_APE))
7600 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7601 PCISTATE_ALLOW_APE_SHMEM_WR |
7602 PCISTATE_ALLOW_APE_PSPACE_WR;
7603 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7604
7605 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7606
7607 if (!tg3_flag(tp, PCI_EXPRESS)) {
7608 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7609 tp->pci_cacheline_sz);
7610 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7611 tp->pci_lat_timer);
7612 }
7613
7614 /* Make sure PCI-X relaxed ordering bit is clear. */
7615 if (tg3_flag(tp, PCIX_MODE)) {
7616 u16 pcix_cmd;
7617
7618 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7619 &pcix_cmd);
7620 pcix_cmd &= ~PCI_X_CMD_ERO;
7621 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7622 pcix_cmd);
7623 }
7624
7625 if (tg3_flag(tp, 5780_CLASS)) {
7626
7627 /* Chip reset on 5780 will reset MSI enable bit,
7628 * so need to restore it.
7629 */
7630 if (tg3_flag(tp, USING_MSI)) {
7631 u16 ctrl;
7632
7633 pci_read_config_word(tp->pdev,
7634 tp->msi_cap + PCI_MSI_FLAGS,
7635 &ctrl);
7636 pci_write_config_word(tp->pdev,
7637 tp->msi_cap + PCI_MSI_FLAGS,
7638 ctrl | PCI_MSI_FLAGS_ENABLE);
7639 val = tr32(MSGINT_MODE);
7640 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7641 }
7642 }
7643 }
7644
7645 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)7646 static int tg3_chip_reset(struct tg3 *tp)
7647 {
7648 u32 val;
7649 void (*write_op)(struct tg3 *, u32, u32);
7650 int i, err;
7651
7652 tg3_nvram_lock(tp);
7653
7654 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7655
7656 /* No matching tg3_nvram_unlock() after this because
7657 * chip reset below will undo the nvram lock.
7658 */
7659 tp->nvram_lock_cnt = 0;
7660
7661 /* GRC_MISC_CFG core clock reset will clear the memory
7662 * enable bit in PCI register 4 and the MSI enable bit
7663 * on some chips, so we save relevant registers here.
7664 */
7665 tg3_save_pci_state(tp);
7666
7667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7668 tg3_flag(tp, 5755_PLUS))
7669 tw32(GRC_FASTBOOT_PC, 0);
7670
7671 /*
7672 * We must avoid the readl() that normally takes place.
7673 * It locks machines, causes machine checks, and other
7674 * fun things. So, temporarily disable the 5701
7675 * hardware workaround, while we do the reset.
7676 */
7677 write_op = tp->write32;
7678 if (write_op == tg3_write_flush_reg32)
7679 tp->write32 = tg3_write32;
7680
7681 /* Prevent the irq handler from reading or writing PCI registers
7682 * during chip reset when the memory enable bit in the PCI command
7683 * register may be cleared. The chip does not generate interrupt
7684 * at this time, but the irq handler may still be called due to irq
7685 * sharing or irqpoll.
7686 */
7687 tg3_flag_set(tp, CHIP_RESETTING);
7688 for (i = 0; i < tp->irq_cnt; i++) {
7689 struct tg3_napi *tnapi = &tp->napi[i];
7690 if (tnapi->hw_status) {
7691 tnapi->hw_status->status = 0;
7692 tnapi->hw_status->status_tag = 0;
7693 }
7694 tnapi->last_tag = 0;
7695 tnapi->last_irq_tag = 0;
7696 }
7697 smp_mb();
7698
7699 for (i = 0; i < tp->irq_cnt; i++)
7700 synchronize_irq(tp->napi[i].irq_vec);
7701
7702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7703 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7704 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7705 }
7706
7707 /* do the reset */
7708 val = GRC_MISC_CFG_CORECLK_RESET;
7709
7710 if (tg3_flag(tp, PCI_EXPRESS)) {
7711 /* Force PCIe 1.0a mode */
7712 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7713 !tg3_flag(tp, 57765_PLUS) &&
7714 tr32(TG3_PCIE_PHY_TSTCTL) ==
7715 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7716 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7717
7718 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7719 tw32(GRC_MISC_CFG, (1 << 29));
7720 val |= (1 << 29);
7721 }
7722 }
7723
7724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7725 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7726 tw32(GRC_VCPU_EXT_CTRL,
7727 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7728 }
7729
7730 /* Manage gphy power for all CPMU absent PCIe devices. */
7731 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7732 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7733
7734 tw32(GRC_MISC_CFG, val);
7735
7736 /* restore 5701 hardware bug workaround write method */
7737 tp->write32 = write_op;
7738
7739 /* Unfortunately, we have to delay before the PCI read back.
7740 * Some 575X chips even will not respond to a PCI cfg access
7741 * when the reset command is given to the chip.
7742 *
7743 * How do these hardware designers expect things to work
7744 * properly if the PCI write is posted for a long period
7745 * of time? It is always necessary to have some method by
7746 * which a register read back can occur to push the write
7747 * out which does the reset.
7748 *
7749 * For most tg3 variants the trick below was working.
7750 * Ho hum...
7751 */
7752 udelay(120);
7753
7754 /* Flush PCI posted writes. The normal MMIO registers
7755 * are inaccessible at this time so this is the only
7756 * way to make this reliably (actually, this is no longer
7757 * the case, see above). I tried to use indirect
7758 * register read/write but this upset some 5701 variants.
7759 */
7760 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7761
7762 udelay(120);
7763
7764 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7765 u16 val16;
7766
7767 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7768 int i;
7769 u32 cfg_val;
7770
7771 /* Wait for link training to complete. */
7772 for (i = 0; i < 5000; i++)
7773 udelay(100);
7774
7775 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7776 pci_write_config_dword(tp->pdev, 0xc4,
7777 cfg_val | (1 << 15));
7778 }
7779
7780 /* Clear the "no snoop" and "relaxed ordering" bits. */
7781 pci_read_config_word(tp->pdev,
7782 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7783 &val16);
7784 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7785 PCI_EXP_DEVCTL_NOSNOOP_EN);
7786 /*
7787 * Older PCIe devices only support the 128 byte
7788 * MPS setting. Enforce the restriction.
7789 */
7790 if (!tg3_flag(tp, CPMU_PRESENT))
7791 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7792 pci_write_config_word(tp->pdev,
7793 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7794 val16);
7795
7796 /* Clear error status */
7797 pci_write_config_word(tp->pdev,
7798 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7799 PCI_EXP_DEVSTA_CED |
7800 PCI_EXP_DEVSTA_NFED |
7801 PCI_EXP_DEVSTA_FED |
7802 PCI_EXP_DEVSTA_URD);
7803 }
7804
7805 tg3_restore_pci_state(tp);
7806
7807 tg3_flag_clear(tp, CHIP_RESETTING);
7808 tg3_flag_clear(tp, ERROR_PROCESSED);
7809
7810 val = 0;
7811 if (tg3_flag(tp, 5780_CLASS))
7812 val = tr32(MEMARB_MODE);
7813 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7814
7815 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7816 tg3_stop_fw(tp);
7817 tw32(0x5000, 0x400);
7818 }
7819
7820 tw32(GRC_MODE, tp->grc_mode);
7821
7822 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7823 val = tr32(0xc4);
7824
7825 tw32(0xc4, val | (1 << 15));
7826 }
7827
7828 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7829 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7830 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7831 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7832 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7833 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7834 }
7835
7836 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7837 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7838 val = tp->mac_mode;
7839 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7840 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7841 val = tp->mac_mode;
7842 } else
7843 val = 0;
7844
7845 tw32_f(MAC_MODE, val);
7846 udelay(40);
7847
7848 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7849
7850 err = tg3_poll_fw(tp);
7851 if (err)
7852 return err;
7853
7854 tg3_mdio_start(tp);
7855
7856 if (tg3_flag(tp, PCI_EXPRESS) &&
7857 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7858 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7859 !tg3_flag(tp, 57765_PLUS)) {
7860 val = tr32(0x7c00);
7861
7862 tw32(0x7c00, val | (1 << 25));
7863 }
7864
7865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7866 val = tr32(TG3_CPMU_CLCK_ORIDE);
7867 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7868 }
7869
7870 /* Reprobe ASF enable state. */
7871 tg3_flag_clear(tp, ENABLE_ASF);
7872 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7873 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7874 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7875 u32 nic_cfg;
7876
7877 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7878 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7879 tg3_flag_set(tp, ENABLE_ASF);
7880 tp->last_event_jiffies = jiffies;
7881 if (tg3_flag(tp, 5750_PLUS))
7882 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7883 }
7884 }
7885
7886 return 0;
7887 }
7888
7889 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
7890 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
7891
7892 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,int silent)7893 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7894 {
7895 int err;
7896
7897 tg3_stop_fw(tp);
7898
7899 tg3_write_sig_pre_reset(tp, kind);
7900
7901 tg3_abort_hw(tp, silent);
7902 err = tg3_chip_reset(tp);
7903
7904 __tg3_set_mac_addr(tp, 0);
7905
7906 tg3_write_sig_legacy(tp, kind);
7907 tg3_write_sig_post_reset(tp, kind);
7908
7909 if (tp->hw_stats) {
7910 /* Save the stats across chip resets... */
7911 tg3_get_nstats(tp, &tp->net_stats_prev),
7912 tg3_get_estats(tp, &tp->estats_prev);
7913
7914 /* And make sure the next sample is new data */
7915 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7916 }
7917
7918 if (err)
7919 return err;
7920
7921 return 0;
7922 }
7923
tg3_set_mac_addr(struct net_device * dev,void * p)7924 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7925 {
7926 struct tg3 *tp = netdev_priv(dev);
7927 struct sockaddr *addr = p;
7928 int err = 0, skip_mac_1 = 0;
7929
7930 if (!is_valid_ether_addr(addr->sa_data))
7931 return -EINVAL;
7932
7933 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7934
7935 if (!netif_running(dev))
7936 return 0;
7937
7938 if (tg3_flag(tp, ENABLE_ASF)) {
7939 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7940
7941 addr0_high = tr32(MAC_ADDR_0_HIGH);
7942 addr0_low = tr32(MAC_ADDR_0_LOW);
7943 addr1_high = tr32(MAC_ADDR_1_HIGH);
7944 addr1_low = tr32(MAC_ADDR_1_LOW);
7945
7946 /* Skip MAC addr 1 if ASF is using it. */
7947 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7948 !(addr1_high == 0 && addr1_low == 0))
7949 skip_mac_1 = 1;
7950 }
7951 spin_lock_bh(&tp->lock);
7952 __tg3_set_mac_addr(tp, skip_mac_1);
7953 spin_unlock_bh(&tp->lock);
7954
7955 return err;
7956 }
7957
7958 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)7959 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7960 dma_addr_t mapping, u32 maxlen_flags,
7961 u32 nic_addr)
7962 {
7963 tg3_write_mem(tp,
7964 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7965 ((u64) mapping >> 32));
7966 tg3_write_mem(tp,
7967 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7968 ((u64) mapping & 0xffffffff));
7969 tg3_write_mem(tp,
7970 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7971 maxlen_flags);
7972
7973 if (!tg3_flag(tp, 5705_PLUS))
7974 tg3_write_mem(tp,
7975 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7976 nic_addr);
7977 }
7978
7979 static void __tg3_set_rx_mode(struct net_device *);
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)7980 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7981 {
7982 int i;
7983
7984 if (!tg3_flag(tp, ENABLE_TSS)) {
7985 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7986 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7987 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7988 } else {
7989 tw32(HOSTCC_TXCOL_TICKS, 0);
7990 tw32(HOSTCC_TXMAX_FRAMES, 0);
7991 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7992 }
7993
7994 if (!tg3_flag(tp, ENABLE_RSS)) {
7995 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7996 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7997 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7998 } else {
7999 tw32(HOSTCC_RXCOL_TICKS, 0);
8000 tw32(HOSTCC_RXMAX_FRAMES, 0);
8001 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8002 }
8003
8004 if (!tg3_flag(tp, 5705_PLUS)) {
8005 u32 val = ec->stats_block_coalesce_usecs;
8006
8007 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8008 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8009
8010 if (!netif_carrier_ok(tp->dev))
8011 val = 0;
8012
8013 tw32(HOSTCC_STAT_COAL_TICKS, val);
8014 }
8015
8016 for (i = 0; i < tp->irq_cnt - 1; i++) {
8017 u32 reg;
8018
8019 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8020 tw32(reg, ec->rx_coalesce_usecs);
8021 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8022 tw32(reg, ec->rx_max_coalesced_frames);
8023 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8024 tw32(reg, ec->rx_max_coalesced_frames_irq);
8025
8026 if (tg3_flag(tp, ENABLE_TSS)) {
8027 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8028 tw32(reg, ec->tx_coalesce_usecs);
8029 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8030 tw32(reg, ec->tx_max_coalesced_frames);
8031 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8032 tw32(reg, ec->tx_max_coalesced_frames_irq);
8033 }
8034 }
8035
8036 for (; i < tp->irq_max - 1; i++) {
8037 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8038 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8039 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8040
8041 if (tg3_flag(tp, ENABLE_TSS)) {
8042 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8043 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8044 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8045 }
8046 }
8047 }
8048
8049 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)8050 static void tg3_rings_reset(struct tg3 *tp)
8051 {
8052 int i;
8053 u32 stblk, txrcb, rxrcb, limit;
8054 struct tg3_napi *tnapi = &tp->napi[0];
8055
8056 /* Disable all transmit rings but the first. */
8057 if (!tg3_flag(tp, 5705_PLUS))
8058 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8059 else if (tg3_flag(tp, 5717_PLUS))
8060 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8061 else if (tg3_flag(tp, 57765_CLASS))
8062 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8063 else
8064 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8065
8066 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8067 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8068 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8069 BDINFO_FLAGS_DISABLED);
8070
8071
8072 /* Disable all receive return rings but the first. */
8073 if (tg3_flag(tp, 5717_PLUS))
8074 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8075 else if (!tg3_flag(tp, 5705_PLUS))
8076 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8077 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8078 tg3_flag(tp, 57765_CLASS))
8079 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8080 else
8081 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8082
8083 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8084 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8085 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8086 BDINFO_FLAGS_DISABLED);
8087
8088 /* Disable interrupts */
8089 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8090 tp->napi[0].chk_msi_cnt = 0;
8091 tp->napi[0].last_rx_cons = 0;
8092 tp->napi[0].last_tx_cons = 0;
8093
8094 /* Zero mailbox registers. */
8095 if (tg3_flag(tp, SUPPORT_MSIX)) {
8096 for (i = 1; i < tp->irq_max; i++) {
8097 tp->napi[i].tx_prod = 0;
8098 tp->napi[i].tx_cons = 0;
8099 if (tg3_flag(tp, ENABLE_TSS))
8100 tw32_mailbox(tp->napi[i].prodmbox, 0);
8101 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8102 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8103 tp->napi[i].chk_msi_cnt = 0;
8104 tp->napi[i].last_rx_cons = 0;
8105 tp->napi[i].last_tx_cons = 0;
8106 }
8107 if (!tg3_flag(tp, ENABLE_TSS))
8108 tw32_mailbox(tp->napi[0].prodmbox, 0);
8109 } else {
8110 tp->napi[0].tx_prod = 0;
8111 tp->napi[0].tx_cons = 0;
8112 tw32_mailbox(tp->napi[0].prodmbox, 0);
8113 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8114 }
8115
8116 /* Make sure the NIC-based send BD rings are disabled. */
8117 if (!tg3_flag(tp, 5705_PLUS)) {
8118 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8119 for (i = 0; i < 16; i++)
8120 tw32_tx_mbox(mbox + i * 8, 0);
8121 }
8122
8123 txrcb = NIC_SRAM_SEND_RCB;
8124 rxrcb = NIC_SRAM_RCV_RET_RCB;
8125
8126 /* Clear status block in ram. */
8127 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8128
8129 /* Set status block DMA address */
8130 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8131 ((u64) tnapi->status_mapping >> 32));
8132 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8133 ((u64) tnapi->status_mapping & 0xffffffff));
8134
8135 if (tnapi->tx_ring) {
8136 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8137 (TG3_TX_RING_SIZE <<
8138 BDINFO_FLAGS_MAXLEN_SHIFT),
8139 NIC_SRAM_TX_BUFFER_DESC);
8140 txrcb += TG3_BDINFO_SIZE;
8141 }
8142
8143 if (tnapi->rx_rcb) {
8144 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8145 (tp->rx_ret_ring_mask + 1) <<
8146 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8147 rxrcb += TG3_BDINFO_SIZE;
8148 }
8149
8150 stblk = HOSTCC_STATBLCK_RING1;
8151
8152 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8153 u64 mapping = (u64)tnapi->status_mapping;
8154 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8155 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8156
8157 /* Clear status block in ram. */
8158 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8159
8160 if (tnapi->tx_ring) {
8161 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8162 (TG3_TX_RING_SIZE <<
8163 BDINFO_FLAGS_MAXLEN_SHIFT),
8164 NIC_SRAM_TX_BUFFER_DESC);
8165 txrcb += TG3_BDINFO_SIZE;
8166 }
8167
8168 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8169 ((tp->rx_ret_ring_mask + 1) <<
8170 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8171
8172 stblk += 8;
8173 rxrcb += TG3_BDINFO_SIZE;
8174 }
8175 }
8176
tg3_setup_rxbd_thresholds(struct tg3 * tp)8177 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8178 {
8179 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8180
8181 if (!tg3_flag(tp, 5750_PLUS) ||
8182 tg3_flag(tp, 5780_CLASS) ||
8183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8184 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8185 tg3_flag(tp, 57765_PLUS))
8186 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8187 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8188 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8189 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8190 else
8191 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8192
8193 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8194 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8195
8196 val = min(nic_rep_thresh, host_rep_thresh);
8197 tw32(RCVBDI_STD_THRESH, val);
8198
8199 if (tg3_flag(tp, 57765_PLUS))
8200 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8201
8202 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8203 return;
8204
8205 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8206
8207 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8208
8209 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8210 tw32(RCVBDI_JUMBO_THRESH, val);
8211
8212 if (tg3_flag(tp, 57765_PLUS))
8213 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8214 }
8215
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp)8216 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8217 {
8218 int i;
8219
8220 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8221 tp->rss_ind_tbl[i] =
8222 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8223 }
8224
tg3_rss_check_indir_tbl(struct tg3 * tp)8225 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8226 {
8227 int i;
8228
8229 if (!tg3_flag(tp, SUPPORT_MSIX))
8230 return;
8231
8232 if (tp->irq_cnt <= 2) {
8233 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8234 return;
8235 }
8236
8237 /* Validate table against current IRQ count */
8238 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8239 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8240 break;
8241 }
8242
8243 if (i != TG3_RSS_INDIR_TBL_SIZE)
8244 tg3_rss_init_dflt_indir_tbl(tp);
8245 }
8246
tg3_rss_write_indir_tbl(struct tg3 * tp)8247 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8248 {
8249 int i = 0;
8250 u32 reg = MAC_RSS_INDIR_TBL_0;
8251
8252 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8253 u32 val = tp->rss_ind_tbl[i];
8254 i++;
8255 for (; i % 8; i++) {
8256 val <<= 4;
8257 val |= tp->rss_ind_tbl[i];
8258 }
8259 tw32(reg, val);
8260 reg += 4;
8261 }
8262 }
8263
8264 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,int reset_phy)8265 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8266 {
8267 u32 val, rdmac_mode;
8268 int i, err, limit;
8269 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8270
8271 tg3_disable_ints(tp);
8272
8273 tg3_stop_fw(tp);
8274
8275 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8276
8277 if (tg3_flag(tp, INIT_COMPLETE))
8278 tg3_abort_hw(tp, 1);
8279
8280 /* Enable MAC control of LPI */
8281 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8282 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8283 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8284 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8285
8286 tw32_f(TG3_CPMU_EEE_CTRL,
8287 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8288
8289 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8290 TG3_CPMU_EEEMD_LPI_IN_TX |
8291 TG3_CPMU_EEEMD_LPI_IN_RX |
8292 TG3_CPMU_EEEMD_EEE_ENABLE;
8293
8294 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8295 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8296
8297 if (tg3_flag(tp, ENABLE_APE))
8298 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8299
8300 tw32_f(TG3_CPMU_EEE_MODE, val);
8301
8302 tw32_f(TG3_CPMU_EEE_DBTMR1,
8303 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8304 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8305
8306 tw32_f(TG3_CPMU_EEE_DBTMR2,
8307 TG3_CPMU_DBTMR2_APE_TX_2047US |
8308 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8309 }
8310
8311 if (reset_phy)
8312 tg3_phy_reset(tp);
8313
8314 err = tg3_chip_reset(tp);
8315 if (err)
8316 return err;
8317
8318 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8319
8320 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8321 val = tr32(TG3_CPMU_CTRL);
8322 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8323 tw32(TG3_CPMU_CTRL, val);
8324
8325 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8326 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8327 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8328 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8329
8330 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8331 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8332 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8333 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8334
8335 val = tr32(TG3_CPMU_HST_ACC);
8336 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8337 val |= CPMU_HST_ACC_MACCLK_6_25;
8338 tw32(TG3_CPMU_HST_ACC, val);
8339 }
8340
8341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8342 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8343 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8344 PCIE_PWR_MGMT_L1_THRESH_4MS;
8345 tw32(PCIE_PWR_MGMT_THRESH, val);
8346
8347 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8348 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8349
8350 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8351
8352 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8353 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8354 }
8355
8356 if (tg3_flag(tp, L1PLLPD_EN)) {
8357 u32 grc_mode = tr32(GRC_MODE);
8358
8359 /* Access the lower 1K of PL PCIE block registers. */
8360 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8361 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8362
8363 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8364 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8365 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8366
8367 tw32(GRC_MODE, grc_mode);
8368 }
8369
8370 if (tg3_flag(tp, 57765_CLASS)) {
8371 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8372 u32 grc_mode = tr32(GRC_MODE);
8373
8374 /* Access the lower 1K of PL PCIE block registers. */
8375 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8376 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8377
8378 val = tr32(TG3_PCIE_TLDLPL_PORT +
8379 TG3_PCIE_PL_LO_PHYCTL5);
8380 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8381 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8382
8383 tw32(GRC_MODE, grc_mode);
8384 }
8385
8386 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8387 u32 grc_mode = tr32(GRC_MODE);
8388
8389 /* Access the lower 1K of DL PCIE block registers. */
8390 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8391 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8392
8393 val = tr32(TG3_PCIE_TLDLPL_PORT +
8394 TG3_PCIE_DL_LO_FTSMAX);
8395 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8396 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8397 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8398
8399 tw32(GRC_MODE, grc_mode);
8400 }
8401
8402 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8403 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8404 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8405 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8406 }
8407
8408 /* This works around an issue with Athlon chipsets on
8409 * B3 tigon3 silicon. This bit has no effect on any
8410 * other revision. But do not set this on PCI Express
8411 * chips and don't even touch the clocks if the CPMU is present.
8412 */
8413 if (!tg3_flag(tp, CPMU_PRESENT)) {
8414 if (!tg3_flag(tp, PCI_EXPRESS))
8415 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8416 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8417 }
8418
8419 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8420 tg3_flag(tp, PCIX_MODE)) {
8421 val = tr32(TG3PCI_PCISTATE);
8422 val |= PCISTATE_RETRY_SAME_DMA;
8423 tw32(TG3PCI_PCISTATE, val);
8424 }
8425
8426 if (tg3_flag(tp, ENABLE_APE)) {
8427 /* Allow reads and writes to the
8428 * APE register and memory space.
8429 */
8430 val = tr32(TG3PCI_PCISTATE);
8431 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8432 PCISTATE_ALLOW_APE_SHMEM_WR |
8433 PCISTATE_ALLOW_APE_PSPACE_WR;
8434 tw32(TG3PCI_PCISTATE, val);
8435 }
8436
8437 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8438 /* Enable some hw fixes. */
8439 val = tr32(TG3PCI_MSI_DATA);
8440 val |= (1 << 26) | (1 << 28) | (1 << 29);
8441 tw32(TG3PCI_MSI_DATA, val);
8442 }
8443
8444 /* Descriptor ring init may make accesses to the
8445 * NIC SRAM area to setup the TX descriptors, so we
8446 * can only do this after the hardware has been
8447 * successfully reset.
8448 */
8449 err = tg3_init_rings(tp);
8450 if (err)
8451 return err;
8452
8453 if (tg3_flag(tp, 57765_PLUS)) {
8454 val = tr32(TG3PCI_DMA_RW_CTRL) &
8455 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8456 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8457 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8458 if (!tg3_flag(tp, 57765_CLASS) &&
8459 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8460 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8461 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8462 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8463 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8464 /* This value is determined during the probe time DMA
8465 * engine test, tg3_test_dma.
8466 */
8467 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8468 }
8469
8470 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8471 GRC_MODE_4X_NIC_SEND_RINGS |
8472 GRC_MODE_NO_TX_PHDR_CSUM |
8473 GRC_MODE_NO_RX_PHDR_CSUM);
8474 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8475
8476 /* Pseudo-header checksum is done by hardware logic and not
8477 * the offload processers, so make the chip do the pseudo-
8478 * header checksums on receive. For transmit it is more
8479 * convenient to do the pseudo-header checksum in software
8480 * as Linux does that on transmit for us in all cases.
8481 */
8482 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8483
8484 tw32(GRC_MODE,
8485 tp->grc_mode |
8486 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8487
8488 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8489 val = tr32(GRC_MISC_CFG);
8490 val &= ~0xff;
8491 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8492 tw32(GRC_MISC_CFG, val);
8493
8494 /* Initialize MBUF/DESC pool. */
8495 if (tg3_flag(tp, 5750_PLUS)) {
8496 /* Do nothing. */
8497 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8498 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8500 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8501 else
8502 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8503 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8504 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8505 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8506 int fw_len;
8507
8508 fw_len = tp->fw_len;
8509 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8510 tw32(BUFMGR_MB_POOL_ADDR,
8511 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8512 tw32(BUFMGR_MB_POOL_SIZE,
8513 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8514 }
8515
8516 if (tp->dev->mtu <= ETH_DATA_LEN) {
8517 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8518 tp->bufmgr_config.mbuf_read_dma_low_water);
8519 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8520 tp->bufmgr_config.mbuf_mac_rx_low_water);
8521 tw32(BUFMGR_MB_HIGH_WATER,
8522 tp->bufmgr_config.mbuf_high_water);
8523 } else {
8524 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8525 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8526 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8527 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8528 tw32(BUFMGR_MB_HIGH_WATER,
8529 tp->bufmgr_config.mbuf_high_water_jumbo);
8530 }
8531 tw32(BUFMGR_DMA_LOW_WATER,
8532 tp->bufmgr_config.dma_low_water);
8533 tw32(BUFMGR_DMA_HIGH_WATER,
8534 tp->bufmgr_config.dma_high_water);
8535
8536 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8537 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8538 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8539 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8540 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8541 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8542 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8543 tw32(BUFMGR_MODE, val);
8544 for (i = 0; i < 2000; i++) {
8545 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8546 break;
8547 udelay(10);
8548 }
8549 if (i >= 2000) {
8550 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8551 return -ENODEV;
8552 }
8553
8554 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8555 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8556
8557 tg3_setup_rxbd_thresholds(tp);
8558
8559 /* Initialize TG3_BDINFO's at:
8560 * RCVDBDI_STD_BD: standard eth size rx ring
8561 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8562 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8563 *
8564 * like so:
8565 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8566 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8567 * ring attribute flags
8568 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8569 *
8570 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8571 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8572 *
8573 * The size of each ring is fixed in the firmware, but the location is
8574 * configurable.
8575 */
8576 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8577 ((u64) tpr->rx_std_mapping >> 32));
8578 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8579 ((u64) tpr->rx_std_mapping & 0xffffffff));
8580 if (!tg3_flag(tp, 5717_PLUS))
8581 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8582 NIC_SRAM_RX_BUFFER_DESC);
8583
8584 /* Disable the mini ring */
8585 if (!tg3_flag(tp, 5705_PLUS))
8586 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8587 BDINFO_FLAGS_DISABLED);
8588
8589 /* Program the jumbo buffer descriptor ring control
8590 * blocks on those devices that have them.
8591 */
8592 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8593 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8594
8595 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8596 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8597 ((u64) tpr->rx_jmb_mapping >> 32));
8598 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8599 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8600 val = TG3_RX_JMB_RING_SIZE(tp) <<
8601 BDINFO_FLAGS_MAXLEN_SHIFT;
8602 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8603 val | BDINFO_FLAGS_USE_EXT_RECV);
8604 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8605 tg3_flag(tp, 57765_CLASS))
8606 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8607 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8608 } else {
8609 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8610 BDINFO_FLAGS_DISABLED);
8611 }
8612
8613 if (tg3_flag(tp, 57765_PLUS)) {
8614 val = TG3_RX_STD_RING_SIZE(tp);
8615 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8616 val |= (TG3_RX_STD_DMA_SZ << 2);
8617 } else
8618 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8619 } else
8620 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8621
8622 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8623
8624 tpr->rx_std_prod_idx = tp->rx_pending;
8625 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8626
8627 tpr->rx_jmb_prod_idx =
8628 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8629 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8630
8631 tg3_rings_reset(tp);
8632
8633 /* Initialize MAC address and backoff seed. */
8634 __tg3_set_mac_addr(tp, 0);
8635
8636 /* MTU + ethernet header + FCS + optional VLAN tag */
8637 tw32(MAC_RX_MTU_SIZE,
8638 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8639
8640 /* The slot time is changed by tg3_setup_phy if we
8641 * run at gigabit with half duplex.
8642 */
8643 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8644 (6 << TX_LENGTHS_IPG_SHIFT) |
8645 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8646
8647 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8648 val |= tr32(MAC_TX_LENGTHS) &
8649 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8650 TX_LENGTHS_CNT_DWN_VAL_MSK);
8651
8652 tw32(MAC_TX_LENGTHS, val);
8653
8654 /* Receive rules. */
8655 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8656 tw32(RCVLPC_CONFIG, 0x0181);
8657
8658 /* Calculate RDMAC_MODE setting early, we need it to determine
8659 * the RCVLPC_STATE_ENABLE mask.
8660 */
8661 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8662 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8663 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8664 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8665 RDMAC_MODE_LNGREAD_ENAB);
8666
8667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8668 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8669
8670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8672 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8673 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8674 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8675 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8676
8677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8678 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8679 if (tg3_flag(tp, TSO_CAPABLE) &&
8680 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8681 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8682 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8683 !tg3_flag(tp, IS_5788)) {
8684 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8685 }
8686 }
8687
8688 if (tg3_flag(tp, PCI_EXPRESS))
8689 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8690
8691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
8692 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
8693
8694 if (tg3_flag(tp, HW_TSO_1) ||
8695 tg3_flag(tp, HW_TSO_2) ||
8696 tg3_flag(tp, HW_TSO_3))
8697 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8698
8699 if (tg3_flag(tp, 57765_PLUS) ||
8700 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8701 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8702 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8703
8704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8705 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8706
8707 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8708 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8709 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8710 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8711 tg3_flag(tp, 57765_PLUS)) {
8712 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8713 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8714 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8715 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8716 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8717 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8718 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8719 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8720 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8721 }
8722 tw32(TG3_RDMA_RSRVCTRL_REG,
8723 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8724 }
8725
8726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8727 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8728 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8729 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8730 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8731 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8732 }
8733
8734 /* Receive/send statistics. */
8735 if (tg3_flag(tp, 5750_PLUS)) {
8736 val = tr32(RCVLPC_STATS_ENABLE);
8737 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8738 tw32(RCVLPC_STATS_ENABLE, val);
8739 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8740 tg3_flag(tp, TSO_CAPABLE)) {
8741 val = tr32(RCVLPC_STATS_ENABLE);
8742 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8743 tw32(RCVLPC_STATS_ENABLE, val);
8744 } else {
8745 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8746 }
8747 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8748 tw32(SNDDATAI_STATSENAB, 0xffffff);
8749 tw32(SNDDATAI_STATSCTRL,
8750 (SNDDATAI_SCTRL_ENABLE |
8751 SNDDATAI_SCTRL_FASTUPD));
8752
8753 /* Setup host coalescing engine. */
8754 tw32(HOSTCC_MODE, 0);
8755 for (i = 0; i < 2000; i++) {
8756 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8757 break;
8758 udelay(10);
8759 }
8760
8761 __tg3_set_coalesce(tp, &tp->coal);
8762
8763 if (!tg3_flag(tp, 5705_PLUS)) {
8764 /* Status/statistics block address. See tg3_timer,
8765 * the tg3_periodic_fetch_stats call there, and
8766 * tg3_get_stats to see how this works for 5705/5750 chips.
8767 */
8768 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8769 ((u64) tp->stats_mapping >> 32));
8770 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8771 ((u64) tp->stats_mapping & 0xffffffff));
8772 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8773
8774 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8775
8776 /* Clear statistics and status block memory areas */
8777 for (i = NIC_SRAM_STATS_BLK;
8778 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8779 i += sizeof(u32)) {
8780 tg3_write_mem(tp, i, 0);
8781 udelay(40);
8782 }
8783 }
8784
8785 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8786
8787 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8788 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8789 if (!tg3_flag(tp, 5705_PLUS))
8790 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8791
8792 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8793 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8794 /* reset to prevent losing 1st rx packet intermittently */
8795 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8796 udelay(10);
8797 }
8798
8799 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8800 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8801 MAC_MODE_FHDE_ENABLE;
8802 if (tg3_flag(tp, ENABLE_APE))
8803 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8804 if (!tg3_flag(tp, 5705_PLUS) &&
8805 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8806 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8807 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8808 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8809 udelay(40);
8810
8811 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8812 * If TG3_FLAG_IS_NIC is zero, we should read the
8813 * register to preserve the GPIO settings for LOMs. The GPIOs,
8814 * whether used as inputs or outputs, are set by boot code after
8815 * reset.
8816 */
8817 if (!tg3_flag(tp, IS_NIC)) {
8818 u32 gpio_mask;
8819
8820 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8821 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8822 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8823
8824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8825 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8826 GRC_LCLCTRL_GPIO_OUTPUT3;
8827
8828 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8829 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8830
8831 tp->grc_local_ctrl &= ~gpio_mask;
8832 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8833
8834 /* GPIO1 must be driven high for eeprom write protect */
8835 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8836 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8837 GRC_LCLCTRL_GPIO_OUTPUT1);
8838 }
8839 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8840 udelay(100);
8841
8842 if (tg3_flag(tp, USING_MSIX)) {
8843 val = tr32(MSGINT_MODE);
8844 val |= MSGINT_MODE_ENABLE;
8845 if (tp->irq_cnt > 1)
8846 val |= MSGINT_MODE_MULTIVEC_EN;
8847 if (!tg3_flag(tp, 1SHOT_MSI))
8848 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8849 tw32(MSGINT_MODE, val);
8850 }
8851
8852 if (!tg3_flag(tp, 5705_PLUS)) {
8853 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8854 udelay(40);
8855 }
8856
8857 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8858 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8859 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8860 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8861 WDMAC_MODE_LNGREAD_ENAB);
8862
8863 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8864 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8865 if (tg3_flag(tp, TSO_CAPABLE) &&
8866 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8867 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8868 /* nothing */
8869 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8870 !tg3_flag(tp, IS_5788)) {
8871 val |= WDMAC_MODE_RX_ACCEL;
8872 }
8873 }
8874
8875 /* Enable host coalescing bug fix */
8876 if (tg3_flag(tp, 5755_PLUS))
8877 val |= WDMAC_MODE_STATUS_TAG_FIX;
8878
8879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8880 val |= WDMAC_MODE_BURST_ALL_DATA;
8881
8882 tw32_f(WDMAC_MODE, val);
8883 udelay(40);
8884
8885 if (tg3_flag(tp, PCIX_MODE)) {
8886 u16 pcix_cmd;
8887
8888 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8889 &pcix_cmd);
8890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8891 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8892 pcix_cmd |= PCI_X_CMD_READ_2K;
8893 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8894 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8895 pcix_cmd |= PCI_X_CMD_READ_2K;
8896 }
8897 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8898 pcix_cmd);
8899 }
8900
8901 tw32_f(RDMAC_MODE, rdmac_mode);
8902 udelay(40);
8903
8904 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8905 if (!tg3_flag(tp, 5705_PLUS))
8906 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8907
8908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8909 tw32(SNDDATAC_MODE,
8910 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8911 else
8912 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8913
8914 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8915 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8916 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8917 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8918 val |= RCVDBDI_MODE_LRG_RING_SZ;
8919 tw32(RCVDBDI_MODE, val);
8920 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8921 if (tg3_flag(tp, HW_TSO_1) ||
8922 tg3_flag(tp, HW_TSO_2) ||
8923 tg3_flag(tp, HW_TSO_3))
8924 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8925 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8926 if (tg3_flag(tp, ENABLE_TSS))
8927 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8928 tw32(SNDBDI_MODE, val);
8929 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8930
8931 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8932 err = tg3_load_5701_a0_firmware_fix(tp);
8933 if (err)
8934 return err;
8935 }
8936
8937 if (tg3_flag(tp, TSO_CAPABLE)) {
8938 err = tg3_load_tso_firmware(tp);
8939 if (err)
8940 return err;
8941 }
8942
8943 tp->tx_mode = TX_MODE_ENABLE;
8944
8945 if (tg3_flag(tp, 5755_PLUS) ||
8946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8947 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8948
8949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8950 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8951 tp->tx_mode &= ~val;
8952 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8953 }
8954
8955 tw32_f(MAC_TX_MODE, tp->tx_mode);
8956 udelay(100);
8957
8958 if (tg3_flag(tp, ENABLE_RSS)) {
8959 tg3_rss_write_indir_tbl(tp);
8960
8961 /* Setup the "secret" hash key. */
8962 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8963 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8964 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8965 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8966 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8967 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8968 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8969 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8970 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8971 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8972 }
8973
8974 tp->rx_mode = RX_MODE_ENABLE;
8975 if (tg3_flag(tp, 5755_PLUS))
8976 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8977
8978 if (tg3_flag(tp, ENABLE_RSS))
8979 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8980 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8981 RX_MODE_RSS_IPV6_HASH_EN |
8982 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8983 RX_MODE_RSS_IPV4_HASH_EN |
8984 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8985
8986 tw32_f(MAC_RX_MODE, tp->rx_mode);
8987 udelay(10);
8988
8989 tw32(MAC_LED_CTRL, tp->led_ctrl);
8990
8991 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8992 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8993 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8994 udelay(10);
8995 }
8996 tw32_f(MAC_RX_MODE, tp->rx_mode);
8997 udelay(10);
8998
8999 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9000 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9001 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9002 /* Set drive transmission level to 1.2V */
9003 /* only if the signal pre-emphasis bit is not set */
9004 val = tr32(MAC_SERDES_CFG);
9005 val &= 0xfffff000;
9006 val |= 0x880;
9007 tw32(MAC_SERDES_CFG, val);
9008 }
9009 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9010 tw32(MAC_SERDES_CFG, 0x616000);
9011 }
9012
9013 /* Prevent chip from dropping frames when flow control
9014 * is enabled.
9015 */
9016 if (tg3_flag(tp, 57765_CLASS))
9017 val = 1;
9018 else
9019 val = 2;
9020 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9021
9022 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9023 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9024 /* Use hardware link auto-negotiation */
9025 tg3_flag_set(tp, HW_AUTONEG);
9026 }
9027
9028 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9030 u32 tmp;
9031
9032 tmp = tr32(SERDES_RX_CTRL);
9033 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9034 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9035 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9036 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9037 }
9038
9039 if (!tg3_flag(tp, USE_PHYLIB)) {
9040 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9041 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9042 tp->link_config.speed = tp->link_config.orig_speed;
9043 tp->link_config.duplex = tp->link_config.orig_duplex;
9044 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9045 }
9046
9047 err = tg3_setup_phy(tp, 0);
9048 if (err)
9049 return err;
9050
9051 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9052 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9053 u32 tmp;
9054
9055 /* Clear CRC stats. */
9056 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9057 tg3_writephy(tp, MII_TG3_TEST1,
9058 tmp | MII_TG3_TEST1_CRC_EN);
9059 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9060 }
9061 }
9062 }
9063
9064 __tg3_set_rx_mode(tp->dev);
9065
9066 /* Initialize receive rules. */
9067 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9068 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9069 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9070 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9071
9072 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9073 limit = 8;
9074 else
9075 limit = 16;
9076 if (tg3_flag(tp, ENABLE_ASF))
9077 limit -= 4;
9078 switch (limit) {
9079 case 16:
9080 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9081 case 15:
9082 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9083 case 14:
9084 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9085 case 13:
9086 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9087 case 12:
9088 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9089 case 11:
9090 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9091 case 10:
9092 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9093 case 9:
9094 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9095 case 8:
9096 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9097 case 7:
9098 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9099 case 6:
9100 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9101 case 5:
9102 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9103 case 4:
9104 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9105 case 3:
9106 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9107 case 2:
9108 case 1:
9109
9110 default:
9111 break;
9112 }
9113
9114 if (tg3_flag(tp, ENABLE_APE))
9115 /* Write our heartbeat update interval to APE. */
9116 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9117 APE_HOST_HEARTBEAT_INT_DISABLE);
9118
9119 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9120
9121 return 0;
9122 }
9123
9124 /* Called at device open time to get the chip ready for
9125 * packet processing. Invoked with tp->lock held.
9126 */
tg3_init_hw(struct tg3 * tp,int reset_phy)9127 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9128 {
9129 tg3_switch_clocks(tp);
9130
9131 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9132
9133 return tg3_reset_hw(tp, reset_phy);
9134 }
9135
9136 #define TG3_STAT_ADD32(PSTAT, REG) \
9137 do { u32 __val = tr32(REG); \
9138 (PSTAT)->low += __val; \
9139 if ((PSTAT)->low < __val) \
9140 (PSTAT)->high += 1; \
9141 } while (0)
9142
tg3_periodic_fetch_stats(struct tg3 * tp)9143 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9144 {
9145 struct tg3_hw_stats *sp = tp->hw_stats;
9146
9147 if (!netif_carrier_ok(tp->dev))
9148 return;
9149
9150 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9151 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9152 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9153 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9154 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9155 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9156 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9157 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9158 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9159 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9160 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9161 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9162 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9163
9164 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9165 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9166 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9167 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9168 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9169 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9170 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9171 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9172 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9173 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9174 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9175 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9176 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9177 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9178
9179 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9180 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9181 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9182 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9183 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9184 } else {
9185 u32 val = tr32(HOSTCC_FLOW_ATTN);
9186 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9187 if (val) {
9188 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9189 sp->rx_discards.low += val;
9190 if (sp->rx_discards.low < val)
9191 sp->rx_discards.high += 1;
9192 }
9193 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9194 }
9195 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9196 }
9197
tg3_chk_missed_msi(struct tg3 * tp)9198 static void tg3_chk_missed_msi(struct tg3 *tp)
9199 {
9200 u32 i;
9201
9202 for (i = 0; i < tp->irq_cnt; i++) {
9203 struct tg3_napi *tnapi = &tp->napi[i];
9204
9205 if (tg3_has_work(tnapi)) {
9206 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9207 tnapi->last_tx_cons == tnapi->tx_cons) {
9208 if (tnapi->chk_msi_cnt < 1) {
9209 tnapi->chk_msi_cnt++;
9210 return;
9211 }
9212 tg3_msi(0, tnapi);
9213 }
9214 }
9215 tnapi->chk_msi_cnt = 0;
9216 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9217 tnapi->last_tx_cons = tnapi->tx_cons;
9218 }
9219 }
9220
tg3_timer(unsigned long __opaque)9221 static void tg3_timer(unsigned long __opaque)
9222 {
9223 struct tg3 *tp = (struct tg3 *) __opaque;
9224
9225 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9226 goto restart_timer;
9227
9228 spin_lock(&tp->lock);
9229
9230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9231 tg3_flag(tp, 57765_CLASS))
9232 tg3_chk_missed_msi(tp);
9233
9234 if (!tg3_flag(tp, TAGGED_STATUS)) {
9235 /* All of this garbage is because when using non-tagged
9236 * IRQ status the mailbox/status_block protocol the chip
9237 * uses with the cpu is race prone.
9238 */
9239 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9240 tw32(GRC_LOCAL_CTRL,
9241 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9242 } else {
9243 tw32(HOSTCC_MODE, tp->coalesce_mode |
9244 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9245 }
9246
9247 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9248 spin_unlock(&tp->lock);
9249 tg3_reset_task_schedule(tp);
9250 goto restart_timer;
9251 }
9252 }
9253
9254 /* This part only runs once per second. */
9255 if (!--tp->timer_counter) {
9256 if (tg3_flag(tp, 5705_PLUS))
9257 tg3_periodic_fetch_stats(tp);
9258
9259 if (tp->setlpicnt && !--tp->setlpicnt)
9260 tg3_phy_eee_enable(tp);
9261
9262 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9263 u32 mac_stat;
9264 int phy_event;
9265
9266 mac_stat = tr32(MAC_STATUS);
9267
9268 phy_event = 0;
9269 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9270 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9271 phy_event = 1;
9272 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9273 phy_event = 1;
9274
9275 if (phy_event)
9276 tg3_setup_phy(tp, 0);
9277 } else if (tg3_flag(tp, POLL_SERDES)) {
9278 u32 mac_stat = tr32(MAC_STATUS);
9279 int need_setup = 0;
9280
9281 if (netif_carrier_ok(tp->dev) &&
9282 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9283 need_setup = 1;
9284 }
9285 if (!netif_carrier_ok(tp->dev) &&
9286 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9287 MAC_STATUS_SIGNAL_DET))) {
9288 need_setup = 1;
9289 }
9290 if (need_setup) {
9291 if (!tp->serdes_counter) {
9292 tw32_f(MAC_MODE,
9293 (tp->mac_mode &
9294 ~MAC_MODE_PORT_MODE_MASK));
9295 udelay(40);
9296 tw32_f(MAC_MODE, tp->mac_mode);
9297 udelay(40);
9298 }
9299 tg3_setup_phy(tp, 0);
9300 }
9301 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9302 tg3_flag(tp, 5780_CLASS)) {
9303 tg3_serdes_parallel_detect(tp);
9304 }
9305
9306 tp->timer_counter = tp->timer_multiplier;
9307 }
9308
9309 /* Heartbeat is only sent once every 2 seconds.
9310 *
9311 * The heartbeat is to tell the ASF firmware that the host
9312 * driver is still alive. In the event that the OS crashes,
9313 * ASF needs to reset the hardware to free up the FIFO space
9314 * that may be filled with rx packets destined for the host.
9315 * If the FIFO is full, ASF will no longer function properly.
9316 *
9317 * Unintended resets have been reported on real time kernels
9318 * where the timer doesn't run on time. Netpoll will also have
9319 * same problem.
9320 *
9321 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9322 * to check the ring condition when the heartbeat is expiring
9323 * before doing the reset. This will prevent most unintended
9324 * resets.
9325 */
9326 if (!--tp->asf_counter) {
9327 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9328 tg3_wait_for_event_ack(tp);
9329
9330 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9331 FWCMD_NICDRV_ALIVE3);
9332 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9333 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9334 TG3_FW_UPDATE_TIMEOUT_SEC);
9335
9336 tg3_generate_fw_event(tp);
9337 }
9338 tp->asf_counter = tp->asf_multiplier;
9339 }
9340
9341 spin_unlock(&tp->lock);
9342
9343 restart_timer:
9344 tp->timer.expires = jiffies + tp->timer_offset;
9345 add_timer(&tp->timer);
9346 }
9347
tg3_request_irq(struct tg3 * tp,int irq_num)9348 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9349 {
9350 irq_handler_t fn;
9351 unsigned long flags;
9352 char *name;
9353 struct tg3_napi *tnapi = &tp->napi[irq_num];
9354
9355 if (tp->irq_cnt == 1)
9356 name = tp->dev->name;
9357 else {
9358 name = &tnapi->irq_lbl[0];
9359 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9360 name[IFNAMSIZ-1] = 0;
9361 }
9362
9363 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9364 fn = tg3_msi;
9365 if (tg3_flag(tp, 1SHOT_MSI))
9366 fn = tg3_msi_1shot;
9367 flags = 0;
9368 } else {
9369 fn = tg3_interrupt;
9370 if (tg3_flag(tp, TAGGED_STATUS))
9371 fn = tg3_interrupt_tagged;
9372 flags = IRQF_SHARED;
9373 }
9374
9375 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9376 }
9377
tg3_test_interrupt(struct tg3 * tp)9378 static int tg3_test_interrupt(struct tg3 *tp)
9379 {
9380 struct tg3_napi *tnapi = &tp->napi[0];
9381 struct net_device *dev = tp->dev;
9382 int err, i, intr_ok = 0;
9383 u32 val;
9384
9385 if (!netif_running(dev))
9386 return -ENODEV;
9387
9388 tg3_disable_ints(tp);
9389
9390 free_irq(tnapi->irq_vec, tnapi);
9391
9392 /*
9393 * Turn off MSI one shot mode. Otherwise this test has no
9394 * observable way to know whether the interrupt was delivered.
9395 */
9396 if (tg3_flag(tp, 57765_PLUS)) {
9397 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9398 tw32(MSGINT_MODE, val);
9399 }
9400
9401 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9402 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9403 if (err)
9404 return err;
9405
9406 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9407 tg3_enable_ints(tp);
9408
9409 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9410 tnapi->coal_now);
9411
9412 for (i = 0; i < 5; i++) {
9413 u32 int_mbox, misc_host_ctrl;
9414
9415 int_mbox = tr32_mailbox(tnapi->int_mbox);
9416 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9417
9418 if ((int_mbox != 0) ||
9419 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9420 intr_ok = 1;
9421 break;
9422 }
9423
9424 if (tg3_flag(tp, 57765_PLUS) &&
9425 tnapi->hw_status->status_tag != tnapi->last_tag)
9426 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9427
9428 msleep(10);
9429 }
9430
9431 tg3_disable_ints(tp);
9432
9433 free_irq(tnapi->irq_vec, tnapi);
9434
9435 err = tg3_request_irq(tp, 0);
9436
9437 if (err)
9438 return err;
9439
9440 if (intr_ok) {
9441 /* Reenable MSI one shot mode. */
9442 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9443 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9444 tw32(MSGINT_MODE, val);
9445 }
9446 return 0;
9447 }
9448
9449 return -EIO;
9450 }
9451
9452 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9453 * successfully restored
9454 */
tg3_test_msi(struct tg3 * tp)9455 static int tg3_test_msi(struct tg3 *tp)
9456 {
9457 int err;
9458 u16 pci_cmd;
9459
9460 if (!tg3_flag(tp, USING_MSI))
9461 return 0;
9462
9463 /* Turn off SERR reporting in case MSI terminates with Master
9464 * Abort.
9465 */
9466 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9467 pci_write_config_word(tp->pdev, PCI_COMMAND,
9468 pci_cmd & ~PCI_COMMAND_SERR);
9469
9470 err = tg3_test_interrupt(tp);
9471
9472 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9473
9474 if (!err)
9475 return 0;
9476
9477 /* other failures */
9478 if (err != -EIO)
9479 return err;
9480
9481 /* MSI test failed, go back to INTx mode */
9482 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9483 "to INTx mode. Please report this failure to the PCI "
9484 "maintainer and include system chipset information\n");
9485
9486 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9487
9488 pci_disable_msi(tp->pdev);
9489
9490 tg3_flag_clear(tp, USING_MSI);
9491 tp->napi[0].irq_vec = tp->pdev->irq;
9492
9493 err = tg3_request_irq(tp, 0);
9494 if (err)
9495 return err;
9496
9497 /* Need to reset the chip because the MSI cycle may have terminated
9498 * with Master Abort.
9499 */
9500 tg3_full_lock(tp, 1);
9501
9502 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9503 err = tg3_init_hw(tp, 1);
9504
9505 tg3_full_unlock(tp);
9506
9507 if (err)
9508 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9509
9510 return err;
9511 }
9512
tg3_request_firmware(struct tg3 * tp)9513 static int tg3_request_firmware(struct tg3 *tp)
9514 {
9515 const __be32 *fw_data;
9516
9517 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9518 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9519 tp->fw_needed);
9520 return -ENOENT;
9521 }
9522
9523 fw_data = (void *)tp->fw->data;
9524
9525 /* Firmware blob starts with version numbers, followed by
9526 * start address and _full_ length including BSS sections
9527 * (which must be longer than the actual data, of course
9528 */
9529
9530 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9531 if (tp->fw_len < (tp->fw->size - 12)) {
9532 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9533 tp->fw_len, tp->fw_needed);
9534 release_firmware(tp->fw);
9535 tp->fw = NULL;
9536 return -EINVAL;
9537 }
9538
9539 /* We no longer need firmware; we have it. */
9540 tp->fw_needed = NULL;
9541 return 0;
9542 }
9543
tg3_enable_msix(struct tg3 * tp)9544 static bool tg3_enable_msix(struct tg3 *tp)
9545 {
9546 int i, rc;
9547 struct msix_entry msix_ent[tp->irq_max];
9548
9549 tp->irq_cnt = num_online_cpus();
9550 if (tp->irq_cnt > 1) {
9551 /* We want as many rx rings enabled as there are cpus.
9552 * In multiqueue MSI-X mode, the first MSI-X vector
9553 * only deals with link interrupts, etc, so we add
9554 * one to the number of vectors we are requesting.
9555 */
9556 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9557 }
9558
9559 for (i = 0; i < tp->irq_max; i++) {
9560 msix_ent[i].entry = i;
9561 msix_ent[i].vector = 0;
9562 }
9563
9564 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9565 if (rc < 0) {
9566 return false;
9567 } else if (rc != 0) {
9568 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9569 return false;
9570 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9571 tp->irq_cnt, rc);
9572 tp->irq_cnt = rc;
9573 }
9574
9575 for (i = 0; i < tp->irq_max; i++)
9576 tp->napi[i].irq_vec = msix_ent[i].vector;
9577
9578 netif_set_real_num_tx_queues(tp->dev, 1);
9579 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9580 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9581 pci_disable_msix(tp->pdev);
9582 return false;
9583 }
9584
9585 if (tp->irq_cnt > 1) {
9586 tg3_flag_set(tp, ENABLE_RSS);
9587
9588 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9590 tg3_flag_set(tp, ENABLE_TSS);
9591 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9592 }
9593 }
9594
9595 return true;
9596 }
9597
tg3_ints_init(struct tg3 * tp)9598 static void tg3_ints_init(struct tg3 *tp)
9599 {
9600 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9601 !tg3_flag(tp, TAGGED_STATUS)) {
9602 /* All MSI supporting chips should support tagged
9603 * status. Assert that this is the case.
9604 */
9605 netdev_warn(tp->dev,
9606 "MSI without TAGGED_STATUS? Not using MSI\n");
9607 goto defcfg;
9608 }
9609
9610 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9611 tg3_flag_set(tp, USING_MSIX);
9612 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9613 tg3_flag_set(tp, USING_MSI);
9614
9615 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9616 u32 msi_mode = tr32(MSGINT_MODE);
9617 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9618 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9619 if (!tg3_flag(tp, 1SHOT_MSI))
9620 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9621 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9622 }
9623 defcfg:
9624 if (!tg3_flag(tp, USING_MSIX)) {
9625 tp->irq_cnt = 1;
9626 tp->napi[0].irq_vec = tp->pdev->irq;
9627 netif_set_real_num_tx_queues(tp->dev, 1);
9628 netif_set_real_num_rx_queues(tp->dev, 1);
9629 }
9630 }
9631
tg3_ints_fini(struct tg3 * tp)9632 static void tg3_ints_fini(struct tg3 *tp)
9633 {
9634 if (tg3_flag(tp, USING_MSIX))
9635 pci_disable_msix(tp->pdev);
9636 else if (tg3_flag(tp, USING_MSI))
9637 pci_disable_msi(tp->pdev);
9638 tg3_flag_clear(tp, USING_MSI);
9639 tg3_flag_clear(tp, USING_MSIX);
9640 tg3_flag_clear(tp, ENABLE_RSS);
9641 tg3_flag_clear(tp, ENABLE_TSS);
9642 }
9643
tg3_open(struct net_device * dev)9644 static int tg3_open(struct net_device *dev)
9645 {
9646 struct tg3 *tp = netdev_priv(dev);
9647 int i, err;
9648
9649 if (tp->fw_needed) {
9650 err = tg3_request_firmware(tp);
9651 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9652 if (err)
9653 return err;
9654 } else if (err) {
9655 netdev_warn(tp->dev, "TSO capability disabled\n");
9656 tg3_flag_clear(tp, TSO_CAPABLE);
9657 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9658 netdev_notice(tp->dev, "TSO capability restored\n");
9659 tg3_flag_set(tp, TSO_CAPABLE);
9660 }
9661 }
9662
9663 netif_carrier_off(tp->dev);
9664
9665 err = tg3_power_up(tp);
9666 if (err)
9667 return err;
9668
9669 tg3_full_lock(tp, 0);
9670
9671 tg3_disable_ints(tp);
9672 tg3_flag_clear(tp, INIT_COMPLETE);
9673
9674 tg3_full_unlock(tp);
9675
9676 /*
9677 * Setup interrupts first so we know how
9678 * many NAPI resources to allocate
9679 */
9680 tg3_ints_init(tp);
9681
9682 tg3_rss_check_indir_tbl(tp);
9683
9684 /* The placement of this call is tied
9685 * to the setup and use of Host TX descriptors.
9686 */
9687 err = tg3_alloc_consistent(tp);
9688 if (err)
9689 goto err_out1;
9690
9691 tg3_napi_init(tp);
9692
9693 tg3_napi_enable(tp);
9694
9695 for (i = 0; i < tp->irq_cnt; i++) {
9696 struct tg3_napi *tnapi = &tp->napi[i];
9697 err = tg3_request_irq(tp, i);
9698 if (err) {
9699 for (i--; i >= 0; i--) {
9700 tnapi = &tp->napi[i];
9701 free_irq(tnapi->irq_vec, tnapi);
9702 }
9703 goto err_out2;
9704 }
9705 }
9706
9707 tg3_full_lock(tp, 0);
9708
9709 err = tg3_init_hw(tp, 1);
9710 if (err) {
9711 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9712 tg3_free_rings(tp);
9713 } else {
9714 if (tg3_flag(tp, TAGGED_STATUS) &&
9715 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9716 !tg3_flag(tp, 57765_CLASS))
9717 tp->timer_offset = HZ;
9718 else
9719 tp->timer_offset = HZ / 10;
9720
9721 BUG_ON(tp->timer_offset > HZ);
9722 tp->timer_counter = tp->timer_multiplier =
9723 (HZ / tp->timer_offset);
9724 tp->asf_counter = tp->asf_multiplier =
9725 ((HZ / tp->timer_offset) * 2);
9726
9727 init_timer(&tp->timer);
9728 tp->timer.expires = jiffies + tp->timer_offset;
9729 tp->timer.data = (unsigned long) tp;
9730 tp->timer.function = tg3_timer;
9731 }
9732
9733 tg3_full_unlock(tp);
9734
9735 if (err)
9736 goto err_out3;
9737
9738 if (tg3_flag(tp, USING_MSI)) {
9739 err = tg3_test_msi(tp);
9740
9741 if (err) {
9742 tg3_full_lock(tp, 0);
9743 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9744 tg3_free_rings(tp);
9745 tg3_full_unlock(tp);
9746
9747 goto err_out2;
9748 }
9749
9750 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9751 u32 val = tr32(PCIE_TRANSACTION_CFG);
9752
9753 tw32(PCIE_TRANSACTION_CFG,
9754 val | PCIE_TRANS_CFG_1SHOT_MSI);
9755 }
9756 }
9757
9758 tg3_phy_start(tp);
9759
9760 tg3_full_lock(tp, 0);
9761
9762 add_timer(&tp->timer);
9763 tg3_flag_set(tp, INIT_COMPLETE);
9764 tg3_enable_ints(tp);
9765
9766 tg3_full_unlock(tp);
9767
9768 netif_tx_start_all_queues(dev);
9769
9770 /*
9771 * Reset loopback feature if it was turned on while the device was down
9772 * make sure that it's installed properly now.
9773 */
9774 if (dev->features & NETIF_F_LOOPBACK)
9775 tg3_set_loopback(dev, dev->features);
9776
9777 return 0;
9778
9779 err_out3:
9780 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9781 struct tg3_napi *tnapi = &tp->napi[i];
9782 free_irq(tnapi->irq_vec, tnapi);
9783 }
9784
9785 err_out2:
9786 tg3_napi_disable(tp);
9787 tg3_napi_fini(tp);
9788 tg3_free_consistent(tp);
9789
9790 err_out1:
9791 tg3_ints_fini(tp);
9792 tg3_frob_aux_power(tp, false);
9793 pci_set_power_state(tp->pdev, PCI_D3hot);
9794 return err;
9795 }
9796
tg3_close(struct net_device * dev)9797 static int tg3_close(struct net_device *dev)
9798 {
9799 int i;
9800 struct tg3 *tp = netdev_priv(dev);
9801
9802 tg3_napi_disable(tp);
9803 tg3_reset_task_cancel(tp);
9804
9805 netif_tx_stop_all_queues(dev);
9806
9807 del_timer_sync(&tp->timer);
9808
9809 tg3_phy_stop(tp);
9810
9811 tg3_full_lock(tp, 1);
9812
9813 tg3_disable_ints(tp);
9814
9815 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9816 tg3_free_rings(tp);
9817 tg3_flag_clear(tp, INIT_COMPLETE);
9818
9819 tg3_full_unlock(tp);
9820
9821 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9822 struct tg3_napi *tnapi = &tp->napi[i];
9823 free_irq(tnapi->irq_vec, tnapi);
9824 }
9825
9826 tg3_ints_fini(tp);
9827
9828 /* Clear stats across close / open calls */
9829 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
9830 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
9831
9832 tg3_napi_fini(tp);
9833
9834 tg3_free_consistent(tp);
9835
9836 tg3_power_down(tp);
9837
9838 netif_carrier_off(tp->dev);
9839
9840 return 0;
9841 }
9842
get_stat64(tg3_stat64_t * val)9843 static inline u64 get_stat64(tg3_stat64_t *val)
9844 {
9845 return ((u64)val->high << 32) | ((u64)val->low);
9846 }
9847
tg3_calc_crc_errors(struct tg3 * tp)9848 static u64 tg3_calc_crc_errors(struct tg3 *tp)
9849 {
9850 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9851
9852 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9853 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9854 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9855 u32 val;
9856
9857 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9858 tg3_writephy(tp, MII_TG3_TEST1,
9859 val | MII_TG3_TEST1_CRC_EN);
9860 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9861 } else
9862 val = 0;
9863
9864 tp->phy_crc_errors += val;
9865
9866 return tp->phy_crc_errors;
9867 }
9868
9869 return get_stat64(&hw_stats->rx_fcs_errors);
9870 }
9871
9872 #define ESTAT_ADD(member) \
9873 estats->member = old_estats->member + \
9874 get_stat64(&hw_stats->member)
9875
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)9876 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
9877 {
9878 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9879 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9880
9881 if (!hw_stats)
9882 return;
9883
9884 ESTAT_ADD(rx_octets);
9885 ESTAT_ADD(rx_fragments);
9886 ESTAT_ADD(rx_ucast_packets);
9887 ESTAT_ADD(rx_mcast_packets);
9888 ESTAT_ADD(rx_bcast_packets);
9889 ESTAT_ADD(rx_fcs_errors);
9890 ESTAT_ADD(rx_align_errors);
9891 ESTAT_ADD(rx_xon_pause_rcvd);
9892 ESTAT_ADD(rx_xoff_pause_rcvd);
9893 ESTAT_ADD(rx_mac_ctrl_rcvd);
9894 ESTAT_ADD(rx_xoff_entered);
9895 ESTAT_ADD(rx_frame_too_long_errors);
9896 ESTAT_ADD(rx_jabbers);
9897 ESTAT_ADD(rx_undersize_packets);
9898 ESTAT_ADD(rx_in_length_errors);
9899 ESTAT_ADD(rx_out_length_errors);
9900 ESTAT_ADD(rx_64_or_less_octet_packets);
9901 ESTAT_ADD(rx_65_to_127_octet_packets);
9902 ESTAT_ADD(rx_128_to_255_octet_packets);
9903 ESTAT_ADD(rx_256_to_511_octet_packets);
9904 ESTAT_ADD(rx_512_to_1023_octet_packets);
9905 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9906 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9907 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9908 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9909 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9910
9911 ESTAT_ADD(tx_octets);
9912 ESTAT_ADD(tx_collisions);
9913 ESTAT_ADD(tx_xon_sent);
9914 ESTAT_ADD(tx_xoff_sent);
9915 ESTAT_ADD(tx_flow_control);
9916 ESTAT_ADD(tx_mac_errors);
9917 ESTAT_ADD(tx_single_collisions);
9918 ESTAT_ADD(tx_mult_collisions);
9919 ESTAT_ADD(tx_deferred);
9920 ESTAT_ADD(tx_excessive_collisions);
9921 ESTAT_ADD(tx_late_collisions);
9922 ESTAT_ADD(tx_collide_2times);
9923 ESTAT_ADD(tx_collide_3times);
9924 ESTAT_ADD(tx_collide_4times);
9925 ESTAT_ADD(tx_collide_5times);
9926 ESTAT_ADD(tx_collide_6times);
9927 ESTAT_ADD(tx_collide_7times);
9928 ESTAT_ADD(tx_collide_8times);
9929 ESTAT_ADD(tx_collide_9times);
9930 ESTAT_ADD(tx_collide_10times);
9931 ESTAT_ADD(tx_collide_11times);
9932 ESTAT_ADD(tx_collide_12times);
9933 ESTAT_ADD(tx_collide_13times);
9934 ESTAT_ADD(tx_collide_14times);
9935 ESTAT_ADD(tx_collide_15times);
9936 ESTAT_ADD(tx_ucast_packets);
9937 ESTAT_ADD(tx_mcast_packets);
9938 ESTAT_ADD(tx_bcast_packets);
9939 ESTAT_ADD(tx_carrier_sense_errors);
9940 ESTAT_ADD(tx_discards);
9941 ESTAT_ADD(tx_errors);
9942
9943 ESTAT_ADD(dma_writeq_full);
9944 ESTAT_ADD(dma_write_prioq_full);
9945 ESTAT_ADD(rxbds_empty);
9946 ESTAT_ADD(rx_discards);
9947 ESTAT_ADD(rx_errors);
9948 ESTAT_ADD(rx_threshold_hit);
9949
9950 ESTAT_ADD(dma_readq_full);
9951 ESTAT_ADD(dma_read_prioq_full);
9952 ESTAT_ADD(tx_comp_queue_full);
9953
9954 ESTAT_ADD(ring_set_send_prod_index);
9955 ESTAT_ADD(ring_status_update);
9956 ESTAT_ADD(nic_irqs);
9957 ESTAT_ADD(nic_avoided_irqs);
9958 ESTAT_ADD(nic_tx_threshold_hit);
9959
9960 ESTAT_ADD(mbuf_lwm_thresh_hit);
9961 }
9962
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)9963 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
9964 {
9965 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9966 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9967
9968 stats->rx_packets = old_stats->rx_packets +
9969 get_stat64(&hw_stats->rx_ucast_packets) +
9970 get_stat64(&hw_stats->rx_mcast_packets) +
9971 get_stat64(&hw_stats->rx_bcast_packets);
9972
9973 stats->tx_packets = old_stats->tx_packets +
9974 get_stat64(&hw_stats->tx_ucast_packets) +
9975 get_stat64(&hw_stats->tx_mcast_packets) +
9976 get_stat64(&hw_stats->tx_bcast_packets);
9977
9978 stats->rx_bytes = old_stats->rx_bytes +
9979 get_stat64(&hw_stats->rx_octets);
9980 stats->tx_bytes = old_stats->tx_bytes +
9981 get_stat64(&hw_stats->tx_octets);
9982
9983 stats->rx_errors = old_stats->rx_errors +
9984 get_stat64(&hw_stats->rx_errors);
9985 stats->tx_errors = old_stats->tx_errors +
9986 get_stat64(&hw_stats->tx_errors) +
9987 get_stat64(&hw_stats->tx_mac_errors) +
9988 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9989 get_stat64(&hw_stats->tx_discards);
9990
9991 stats->multicast = old_stats->multicast +
9992 get_stat64(&hw_stats->rx_mcast_packets);
9993 stats->collisions = old_stats->collisions +
9994 get_stat64(&hw_stats->tx_collisions);
9995
9996 stats->rx_length_errors = old_stats->rx_length_errors +
9997 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9998 get_stat64(&hw_stats->rx_undersize_packets);
9999
10000 stats->rx_over_errors = old_stats->rx_over_errors +
10001 get_stat64(&hw_stats->rxbds_empty);
10002 stats->rx_frame_errors = old_stats->rx_frame_errors +
10003 get_stat64(&hw_stats->rx_align_errors);
10004 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10005 get_stat64(&hw_stats->tx_discards);
10006 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10007 get_stat64(&hw_stats->tx_carrier_sense_errors);
10008
10009 stats->rx_crc_errors = old_stats->rx_crc_errors +
10010 tg3_calc_crc_errors(tp);
10011
10012 stats->rx_missed_errors = old_stats->rx_missed_errors +
10013 get_stat64(&hw_stats->rx_discards);
10014
10015 stats->rx_dropped = tp->rx_dropped;
10016 stats->tx_dropped = tp->tx_dropped;
10017 }
10018
calc_crc(unsigned char * buf,int len)10019 static inline u32 calc_crc(unsigned char *buf, int len)
10020 {
10021 u32 reg;
10022 u32 tmp;
10023 int j, k;
10024
10025 reg = 0xffffffff;
10026
10027 for (j = 0; j < len; j++) {
10028 reg ^= buf[j];
10029
10030 for (k = 0; k < 8; k++) {
10031 tmp = reg & 0x01;
10032
10033 reg >>= 1;
10034
10035 if (tmp)
10036 reg ^= 0xedb88320;
10037 }
10038 }
10039
10040 return ~reg;
10041 }
10042
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)10043 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10044 {
10045 /* accept or reject all multicast frames */
10046 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10047 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10048 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10049 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10050 }
10051
__tg3_set_rx_mode(struct net_device * dev)10052 static void __tg3_set_rx_mode(struct net_device *dev)
10053 {
10054 struct tg3 *tp = netdev_priv(dev);
10055 u32 rx_mode;
10056
10057 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10058 RX_MODE_KEEP_VLAN_TAG);
10059
10060 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10061 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10062 * flag clear.
10063 */
10064 if (!tg3_flag(tp, ENABLE_ASF))
10065 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10066 #endif
10067
10068 if (dev->flags & IFF_PROMISC) {
10069 /* Promiscuous mode. */
10070 rx_mode |= RX_MODE_PROMISC;
10071 } else if (dev->flags & IFF_ALLMULTI) {
10072 /* Accept all multicast. */
10073 tg3_set_multi(tp, 1);
10074 } else if (netdev_mc_empty(dev)) {
10075 /* Reject all multicast. */
10076 tg3_set_multi(tp, 0);
10077 } else {
10078 /* Accept one or more multicast(s). */
10079 struct netdev_hw_addr *ha;
10080 u32 mc_filter[4] = { 0, };
10081 u32 regidx;
10082 u32 bit;
10083 u32 crc;
10084
10085 netdev_for_each_mc_addr(ha, dev) {
10086 crc = calc_crc(ha->addr, ETH_ALEN);
10087 bit = ~crc & 0x7f;
10088 regidx = (bit & 0x60) >> 5;
10089 bit &= 0x1f;
10090 mc_filter[regidx] |= (1 << bit);
10091 }
10092
10093 tw32(MAC_HASH_REG_0, mc_filter[0]);
10094 tw32(MAC_HASH_REG_1, mc_filter[1]);
10095 tw32(MAC_HASH_REG_2, mc_filter[2]);
10096 tw32(MAC_HASH_REG_3, mc_filter[3]);
10097 }
10098
10099 if (rx_mode != tp->rx_mode) {
10100 tp->rx_mode = rx_mode;
10101 tw32_f(MAC_RX_MODE, rx_mode);
10102 udelay(10);
10103 }
10104 }
10105
tg3_set_rx_mode(struct net_device * dev)10106 static void tg3_set_rx_mode(struct net_device *dev)
10107 {
10108 struct tg3 *tp = netdev_priv(dev);
10109
10110 if (!netif_running(dev))
10111 return;
10112
10113 tg3_full_lock(tp, 0);
10114 __tg3_set_rx_mode(dev);
10115 tg3_full_unlock(tp);
10116 }
10117
tg3_get_regs_len(struct net_device * dev)10118 static int tg3_get_regs_len(struct net_device *dev)
10119 {
10120 return TG3_REG_BLK_SIZE;
10121 }
10122
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)10123 static void tg3_get_regs(struct net_device *dev,
10124 struct ethtool_regs *regs, void *_p)
10125 {
10126 struct tg3 *tp = netdev_priv(dev);
10127
10128 regs->version = 0;
10129
10130 memset(_p, 0, TG3_REG_BLK_SIZE);
10131
10132 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10133 return;
10134
10135 tg3_full_lock(tp, 0);
10136
10137 tg3_dump_legacy_regs(tp, (u32 *)_p);
10138
10139 tg3_full_unlock(tp);
10140 }
10141
tg3_get_eeprom_len(struct net_device * dev)10142 static int tg3_get_eeprom_len(struct net_device *dev)
10143 {
10144 struct tg3 *tp = netdev_priv(dev);
10145
10146 return tp->nvram_size;
10147 }
10148
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)10149 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10150 {
10151 struct tg3 *tp = netdev_priv(dev);
10152 int ret;
10153 u8 *pd;
10154 u32 i, offset, len, b_offset, b_count;
10155 __be32 val;
10156
10157 if (tg3_flag(tp, NO_NVRAM))
10158 return -EINVAL;
10159
10160 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10161 return -EAGAIN;
10162
10163 offset = eeprom->offset;
10164 len = eeprom->len;
10165 eeprom->len = 0;
10166
10167 eeprom->magic = TG3_EEPROM_MAGIC;
10168
10169 if (offset & 3) {
10170 /* adjustments to start on required 4 byte boundary */
10171 b_offset = offset & 3;
10172 b_count = 4 - b_offset;
10173 if (b_count > len) {
10174 /* i.e. offset=1 len=2 */
10175 b_count = len;
10176 }
10177 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10178 if (ret)
10179 return ret;
10180 memcpy(data, ((char *)&val) + b_offset, b_count);
10181 len -= b_count;
10182 offset += b_count;
10183 eeprom->len += b_count;
10184 }
10185
10186 /* read bytes up to the last 4 byte boundary */
10187 pd = &data[eeprom->len];
10188 for (i = 0; i < (len - (len & 3)); i += 4) {
10189 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10190 if (ret) {
10191 eeprom->len += i;
10192 return ret;
10193 }
10194 memcpy(pd + i, &val, 4);
10195 }
10196 eeprom->len += i;
10197
10198 if (len & 3) {
10199 /* read last bytes not ending on 4 byte boundary */
10200 pd = &data[eeprom->len];
10201 b_count = len & 3;
10202 b_offset = offset + len - b_count;
10203 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10204 if (ret)
10205 return ret;
10206 memcpy(pd, &val, b_count);
10207 eeprom->len += b_count;
10208 }
10209 return 0;
10210 }
10211
10212 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10213
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)10214 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10215 {
10216 struct tg3 *tp = netdev_priv(dev);
10217 int ret;
10218 u32 offset, len, b_offset, odd_len;
10219 u8 *buf;
10220 __be32 start, end;
10221
10222 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10223 return -EAGAIN;
10224
10225 if (tg3_flag(tp, NO_NVRAM) ||
10226 eeprom->magic != TG3_EEPROM_MAGIC)
10227 return -EINVAL;
10228
10229 offset = eeprom->offset;
10230 len = eeprom->len;
10231
10232 if ((b_offset = (offset & 3))) {
10233 /* adjustments to start on required 4 byte boundary */
10234 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10235 if (ret)
10236 return ret;
10237 len += b_offset;
10238 offset &= ~3;
10239 if (len < 4)
10240 len = 4;
10241 }
10242
10243 odd_len = 0;
10244 if (len & 3) {
10245 /* adjustments to end on required 4 byte boundary */
10246 odd_len = 1;
10247 len = (len + 3) & ~3;
10248 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10249 if (ret)
10250 return ret;
10251 }
10252
10253 buf = data;
10254 if (b_offset || odd_len) {
10255 buf = kmalloc(len, GFP_KERNEL);
10256 if (!buf)
10257 return -ENOMEM;
10258 if (b_offset)
10259 memcpy(buf, &start, 4);
10260 if (odd_len)
10261 memcpy(buf+len-4, &end, 4);
10262 memcpy(buf + b_offset, data, eeprom->len);
10263 }
10264
10265 ret = tg3_nvram_write_block(tp, offset, len, buf);
10266
10267 if (buf != data)
10268 kfree(buf);
10269
10270 return ret;
10271 }
10272
tg3_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)10273 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10274 {
10275 struct tg3 *tp = netdev_priv(dev);
10276
10277 if (tg3_flag(tp, USE_PHYLIB)) {
10278 struct phy_device *phydev;
10279 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10280 return -EAGAIN;
10281 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10282 return phy_ethtool_gset(phydev, cmd);
10283 }
10284
10285 cmd->supported = (SUPPORTED_Autoneg);
10286
10287 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10288 cmd->supported |= (SUPPORTED_1000baseT_Half |
10289 SUPPORTED_1000baseT_Full);
10290
10291 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10292 cmd->supported |= (SUPPORTED_100baseT_Half |
10293 SUPPORTED_100baseT_Full |
10294 SUPPORTED_10baseT_Half |
10295 SUPPORTED_10baseT_Full |
10296 SUPPORTED_TP);
10297 cmd->port = PORT_TP;
10298 } else {
10299 cmd->supported |= SUPPORTED_FIBRE;
10300 cmd->port = PORT_FIBRE;
10301 }
10302
10303 cmd->advertising = tp->link_config.advertising;
10304 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10305 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10306 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10307 cmd->advertising |= ADVERTISED_Pause;
10308 } else {
10309 cmd->advertising |= ADVERTISED_Pause |
10310 ADVERTISED_Asym_Pause;
10311 }
10312 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10313 cmd->advertising |= ADVERTISED_Asym_Pause;
10314 }
10315 }
10316 if (netif_running(dev) && netif_carrier_ok(dev)) {
10317 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10318 cmd->duplex = tp->link_config.active_duplex;
10319 cmd->lp_advertising = tp->link_config.rmt_adv;
10320 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10321 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10322 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10323 else
10324 cmd->eth_tp_mdix = ETH_TP_MDI;
10325 }
10326 } else {
10327 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10328 cmd->duplex = DUPLEX_INVALID;
10329 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10330 }
10331 cmd->phy_address = tp->phy_addr;
10332 cmd->transceiver = XCVR_INTERNAL;
10333 cmd->autoneg = tp->link_config.autoneg;
10334 cmd->maxtxpkt = 0;
10335 cmd->maxrxpkt = 0;
10336 return 0;
10337 }
10338
tg3_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)10339 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10340 {
10341 struct tg3 *tp = netdev_priv(dev);
10342 u32 speed = ethtool_cmd_speed(cmd);
10343
10344 if (tg3_flag(tp, USE_PHYLIB)) {
10345 struct phy_device *phydev;
10346 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10347 return -EAGAIN;
10348 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10349 return phy_ethtool_sset(phydev, cmd);
10350 }
10351
10352 if (cmd->autoneg != AUTONEG_ENABLE &&
10353 cmd->autoneg != AUTONEG_DISABLE)
10354 return -EINVAL;
10355
10356 if (cmd->autoneg == AUTONEG_DISABLE &&
10357 cmd->duplex != DUPLEX_FULL &&
10358 cmd->duplex != DUPLEX_HALF)
10359 return -EINVAL;
10360
10361 if (cmd->autoneg == AUTONEG_ENABLE) {
10362 u32 mask = ADVERTISED_Autoneg |
10363 ADVERTISED_Pause |
10364 ADVERTISED_Asym_Pause;
10365
10366 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10367 mask |= ADVERTISED_1000baseT_Half |
10368 ADVERTISED_1000baseT_Full;
10369
10370 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10371 mask |= ADVERTISED_100baseT_Half |
10372 ADVERTISED_100baseT_Full |
10373 ADVERTISED_10baseT_Half |
10374 ADVERTISED_10baseT_Full |
10375 ADVERTISED_TP;
10376 else
10377 mask |= ADVERTISED_FIBRE;
10378
10379 if (cmd->advertising & ~mask)
10380 return -EINVAL;
10381
10382 mask &= (ADVERTISED_1000baseT_Half |
10383 ADVERTISED_1000baseT_Full |
10384 ADVERTISED_100baseT_Half |
10385 ADVERTISED_100baseT_Full |
10386 ADVERTISED_10baseT_Half |
10387 ADVERTISED_10baseT_Full);
10388
10389 cmd->advertising &= mask;
10390 } else {
10391 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10392 if (speed != SPEED_1000)
10393 return -EINVAL;
10394
10395 if (cmd->duplex != DUPLEX_FULL)
10396 return -EINVAL;
10397 } else {
10398 if (speed != SPEED_100 &&
10399 speed != SPEED_10)
10400 return -EINVAL;
10401 }
10402 }
10403
10404 tg3_full_lock(tp, 0);
10405
10406 tp->link_config.autoneg = cmd->autoneg;
10407 if (cmd->autoneg == AUTONEG_ENABLE) {
10408 tp->link_config.advertising = (cmd->advertising |
10409 ADVERTISED_Autoneg);
10410 tp->link_config.speed = SPEED_INVALID;
10411 tp->link_config.duplex = DUPLEX_INVALID;
10412 } else {
10413 tp->link_config.advertising = 0;
10414 tp->link_config.speed = speed;
10415 tp->link_config.duplex = cmd->duplex;
10416 }
10417
10418 tp->link_config.orig_speed = tp->link_config.speed;
10419 tp->link_config.orig_duplex = tp->link_config.duplex;
10420 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10421
10422 if (netif_running(dev))
10423 tg3_setup_phy(tp, 1);
10424
10425 tg3_full_unlock(tp);
10426
10427 return 0;
10428 }
10429
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)10430 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10431 {
10432 struct tg3 *tp = netdev_priv(dev);
10433
10434 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10435 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10436 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10437 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10438 }
10439
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)10440 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10441 {
10442 struct tg3 *tp = netdev_priv(dev);
10443
10444 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10445 wol->supported = WAKE_MAGIC;
10446 else
10447 wol->supported = 0;
10448 wol->wolopts = 0;
10449 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10450 wol->wolopts = WAKE_MAGIC;
10451 memset(&wol->sopass, 0, sizeof(wol->sopass));
10452 }
10453
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)10454 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10455 {
10456 struct tg3 *tp = netdev_priv(dev);
10457 struct device *dp = &tp->pdev->dev;
10458
10459 if (wol->wolopts & ~WAKE_MAGIC)
10460 return -EINVAL;
10461 if ((wol->wolopts & WAKE_MAGIC) &&
10462 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10463 return -EINVAL;
10464
10465 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10466
10467 spin_lock_bh(&tp->lock);
10468 if (device_may_wakeup(dp))
10469 tg3_flag_set(tp, WOL_ENABLE);
10470 else
10471 tg3_flag_clear(tp, WOL_ENABLE);
10472 spin_unlock_bh(&tp->lock);
10473
10474 return 0;
10475 }
10476
tg3_get_msglevel(struct net_device * dev)10477 static u32 tg3_get_msglevel(struct net_device *dev)
10478 {
10479 struct tg3 *tp = netdev_priv(dev);
10480 return tp->msg_enable;
10481 }
10482
tg3_set_msglevel(struct net_device * dev,u32 value)10483 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10484 {
10485 struct tg3 *tp = netdev_priv(dev);
10486 tp->msg_enable = value;
10487 }
10488
tg3_nway_reset(struct net_device * dev)10489 static int tg3_nway_reset(struct net_device *dev)
10490 {
10491 struct tg3 *tp = netdev_priv(dev);
10492 int r;
10493
10494 if (!netif_running(dev))
10495 return -EAGAIN;
10496
10497 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10498 return -EINVAL;
10499
10500 if (tg3_flag(tp, USE_PHYLIB)) {
10501 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10502 return -EAGAIN;
10503 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10504 } else {
10505 u32 bmcr;
10506
10507 spin_lock_bh(&tp->lock);
10508 r = -EINVAL;
10509 tg3_readphy(tp, MII_BMCR, &bmcr);
10510 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10511 ((bmcr & BMCR_ANENABLE) ||
10512 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10513 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10514 BMCR_ANENABLE);
10515 r = 0;
10516 }
10517 spin_unlock_bh(&tp->lock);
10518 }
10519
10520 return r;
10521 }
10522
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)10523 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10524 {
10525 struct tg3 *tp = netdev_priv(dev);
10526
10527 ering->rx_max_pending = tp->rx_std_ring_mask;
10528 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10529 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10530 else
10531 ering->rx_jumbo_max_pending = 0;
10532
10533 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10534
10535 ering->rx_pending = tp->rx_pending;
10536 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10537 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10538 else
10539 ering->rx_jumbo_pending = 0;
10540
10541 ering->tx_pending = tp->napi[0].tx_pending;
10542 }
10543
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)10544 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10545 {
10546 struct tg3 *tp = netdev_priv(dev);
10547 int i, irq_sync = 0, err = 0;
10548
10549 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10550 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10551 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10552 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10553 (tg3_flag(tp, TSO_BUG) &&
10554 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10555 return -EINVAL;
10556
10557 if (netif_running(dev)) {
10558 tg3_phy_stop(tp);
10559 tg3_netif_stop(tp);
10560 irq_sync = 1;
10561 }
10562
10563 tg3_full_lock(tp, irq_sync);
10564
10565 tp->rx_pending = ering->rx_pending;
10566
10567 if (tg3_flag(tp, MAX_RXPEND_64) &&
10568 tp->rx_pending > 63)
10569 tp->rx_pending = 63;
10570 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10571
10572 for (i = 0; i < tp->irq_max; i++)
10573 tp->napi[i].tx_pending = ering->tx_pending;
10574
10575 if (netif_running(dev)) {
10576 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10577 err = tg3_restart_hw(tp, 1);
10578 if (!err)
10579 tg3_netif_start(tp);
10580 }
10581
10582 tg3_full_unlock(tp);
10583
10584 if (irq_sync && !err)
10585 tg3_phy_start(tp);
10586
10587 return err;
10588 }
10589
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)10590 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10591 {
10592 struct tg3 *tp = netdev_priv(dev);
10593
10594 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10595
10596 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10597 epause->rx_pause = 1;
10598 else
10599 epause->rx_pause = 0;
10600
10601 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10602 epause->tx_pause = 1;
10603 else
10604 epause->tx_pause = 0;
10605 }
10606
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)10607 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10608 {
10609 struct tg3 *tp = netdev_priv(dev);
10610 int err = 0;
10611
10612 if (tg3_flag(tp, USE_PHYLIB)) {
10613 u32 newadv;
10614 struct phy_device *phydev;
10615
10616 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10617
10618 if (!(phydev->supported & SUPPORTED_Pause) ||
10619 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10620 (epause->rx_pause != epause->tx_pause)))
10621 return -EINVAL;
10622
10623 tp->link_config.flowctrl = 0;
10624 if (epause->rx_pause) {
10625 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10626
10627 if (epause->tx_pause) {
10628 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10629 newadv = ADVERTISED_Pause;
10630 } else
10631 newadv = ADVERTISED_Pause |
10632 ADVERTISED_Asym_Pause;
10633 } else if (epause->tx_pause) {
10634 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10635 newadv = ADVERTISED_Asym_Pause;
10636 } else
10637 newadv = 0;
10638
10639 if (epause->autoneg)
10640 tg3_flag_set(tp, PAUSE_AUTONEG);
10641 else
10642 tg3_flag_clear(tp, PAUSE_AUTONEG);
10643
10644 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10645 u32 oldadv = phydev->advertising &
10646 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10647 if (oldadv != newadv) {
10648 phydev->advertising &=
10649 ~(ADVERTISED_Pause |
10650 ADVERTISED_Asym_Pause);
10651 phydev->advertising |= newadv;
10652 if (phydev->autoneg) {
10653 /*
10654 * Always renegotiate the link to
10655 * inform our link partner of our
10656 * flow control settings, even if the
10657 * flow control is forced. Let
10658 * tg3_adjust_link() do the final
10659 * flow control setup.
10660 */
10661 return phy_start_aneg(phydev);
10662 }
10663 }
10664
10665 if (!epause->autoneg)
10666 tg3_setup_flow_control(tp, 0, 0);
10667 } else {
10668 tp->link_config.orig_advertising &=
10669 ~(ADVERTISED_Pause |
10670 ADVERTISED_Asym_Pause);
10671 tp->link_config.orig_advertising |= newadv;
10672 }
10673 } else {
10674 int irq_sync = 0;
10675
10676 if (netif_running(dev)) {
10677 tg3_netif_stop(tp);
10678 irq_sync = 1;
10679 }
10680
10681 tg3_full_lock(tp, irq_sync);
10682
10683 if (epause->autoneg)
10684 tg3_flag_set(tp, PAUSE_AUTONEG);
10685 else
10686 tg3_flag_clear(tp, PAUSE_AUTONEG);
10687 if (epause->rx_pause)
10688 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10689 else
10690 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10691 if (epause->tx_pause)
10692 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10693 else
10694 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10695
10696 if (netif_running(dev)) {
10697 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10698 err = tg3_restart_hw(tp, 1);
10699 if (!err)
10700 tg3_netif_start(tp);
10701 }
10702
10703 tg3_full_unlock(tp);
10704 }
10705
10706 return err;
10707 }
10708
tg3_get_sset_count(struct net_device * dev,int sset)10709 static int tg3_get_sset_count(struct net_device *dev, int sset)
10710 {
10711 switch (sset) {
10712 case ETH_SS_TEST:
10713 return TG3_NUM_TEST;
10714 case ETH_SS_STATS:
10715 return TG3_NUM_STATS;
10716 default:
10717 return -EOPNOTSUPP;
10718 }
10719 }
10720
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)10721 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10722 u32 *rules __always_unused)
10723 {
10724 struct tg3 *tp = netdev_priv(dev);
10725
10726 if (!tg3_flag(tp, SUPPORT_MSIX))
10727 return -EOPNOTSUPP;
10728
10729 switch (info->cmd) {
10730 case ETHTOOL_GRXRINGS:
10731 if (netif_running(tp->dev))
10732 info->data = tp->irq_cnt;
10733 else {
10734 info->data = num_online_cpus();
10735 if (info->data > TG3_IRQ_MAX_VECS_RSS)
10736 info->data = TG3_IRQ_MAX_VECS_RSS;
10737 }
10738
10739 /* The first interrupt vector only
10740 * handles link interrupts.
10741 */
10742 info->data -= 1;
10743 return 0;
10744
10745 default:
10746 return -EOPNOTSUPP;
10747 }
10748 }
10749
tg3_get_rxfh_indir_size(struct net_device * dev)10750 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10751 {
10752 u32 size = 0;
10753 struct tg3 *tp = netdev_priv(dev);
10754
10755 if (tg3_flag(tp, SUPPORT_MSIX))
10756 size = TG3_RSS_INDIR_TBL_SIZE;
10757
10758 return size;
10759 }
10760
tg3_get_rxfh_indir(struct net_device * dev,u32 * indir)10761 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10762 {
10763 struct tg3 *tp = netdev_priv(dev);
10764 int i;
10765
10766 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10767 indir[i] = tp->rss_ind_tbl[i];
10768
10769 return 0;
10770 }
10771
tg3_set_rxfh_indir(struct net_device * dev,const u32 * indir)10772 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10773 {
10774 struct tg3 *tp = netdev_priv(dev);
10775 size_t i;
10776
10777 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10778 tp->rss_ind_tbl[i] = indir[i];
10779
10780 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10781 return 0;
10782
10783 /* It is legal to write the indirection
10784 * table while the device is running.
10785 */
10786 tg3_full_lock(tp, 0);
10787 tg3_rss_write_indir_tbl(tp);
10788 tg3_full_unlock(tp);
10789
10790 return 0;
10791 }
10792
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)10793 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10794 {
10795 switch (stringset) {
10796 case ETH_SS_STATS:
10797 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10798 break;
10799 case ETH_SS_TEST:
10800 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10801 break;
10802 default:
10803 WARN_ON(1); /* we need a WARN() */
10804 break;
10805 }
10806 }
10807
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)10808 static int tg3_set_phys_id(struct net_device *dev,
10809 enum ethtool_phys_id_state state)
10810 {
10811 struct tg3 *tp = netdev_priv(dev);
10812
10813 if (!netif_running(tp->dev))
10814 return -EAGAIN;
10815
10816 switch (state) {
10817 case ETHTOOL_ID_ACTIVE:
10818 return 1; /* cycle on/off once per second */
10819
10820 case ETHTOOL_ID_ON:
10821 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10822 LED_CTRL_1000MBPS_ON |
10823 LED_CTRL_100MBPS_ON |
10824 LED_CTRL_10MBPS_ON |
10825 LED_CTRL_TRAFFIC_OVERRIDE |
10826 LED_CTRL_TRAFFIC_BLINK |
10827 LED_CTRL_TRAFFIC_LED);
10828 break;
10829
10830 case ETHTOOL_ID_OFF:
10831 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10832 LED_CTRL_TRAFFIC_OVERRIDE);
10833 break;
10834
10835 case ETHTOOL_ID_INACTIVE:
10836 tw32(MAC_LED_CTRL, tp->led_ctrl);
10837 break;
10838 }
10839
10840 return 0;
10841 }
10842
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)10843 static void tg3_get_ethtool_stats(struct net_device *dev,
10844 struct ethtool_stats *estats, u64 *tmp_stats)
10845 {
10846 struct tg3 *tp = netdev_priv(dev);
10847
10848 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
10849 }
10850
tg3_vpd_readblock(struct tg3 * tp,u32 * vpdlen)10851 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10852 {
10853 int i;
10854 __be32 *buf;
10855 u32 offset = 0, len = 0;
10856 u32 magic, val;
10857
10858 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10859 return NULL;
10860
10861 if (magic == TG3_EEPROM_MAGIC) {
10862 for (offset = TG3_NVM_DIR_START;
10863 offset < TG3_NVM_DIR_END;
10864 offset += TG3_NVM_DIRENT_SIZE) {
10865 if (tg3_nvram_read(tp, offset, &val))
10866 return NULL;
10867
10868 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10869 TG3_NVM_DIRTYPE_EXTVPD)
10870 break;
10871 }
10872
10873 if (offset != TG3_NVM_DIR_END) {
10874 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10875 if (tg3_nvram_read(tp, offset + 4, &offset))
10876 return NULL;
10877
10878 offset = tg3_nvram_logical_addr(tp, offset);
10879 }
10880 }
10881
10882 if (!offset || !len) {
10883 offset = TG3_NVM_VPD_OFF;
10884 len = TG3_NVM_VPD_LEN;
10885 }
10886
10887 buf = kmalloc(len, GFP_KERNEL);
10888 if (buf == NULL)
10889 return NULL;
10890
10891 if (magic == TG3_EEPROM_MAGIC) {
10892 for (i = 0; i < len; i += 4) {
10893 /* The data is in little-endian format in NVRAM.
10894 * Use the big-endian read routines to preserve
10895 * the byte order as it exists in NVRAM.
10896 */
10897 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10898 goto error;
10899 }
10900 } else {
10901 u8 *ptr;
10902 ssize_t cnt;
10903 unsigned int pos = 0;
10904
10905 ptr = (u8 *)&buf[0];
10906 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10907 cnt = pci_read_vpd(tp->pdev, pos,
10908 len - pos, ptr);
10909 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10910 cnt = 0;
10911 else if (cnt < 0)
10912 goto error;
10913 }
10914 if (pos != len)
10915 goto error;
10916 }
10917
10918 *vpdlen = len;
10919
10920 return buf;
10921
10922 error:
10923 kfree(buf);
10924 return NULL;
10925 }
10926
10927 #define NVRAM_TEST_SIZE 0x100
10928 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10929 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10930 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10931 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10932 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10933 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10934 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10935 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10936
tg3_test_nvram(struct tg3 * tp)10937 static int tg3_test_nvram(struct tg3 *tp)
10938 {
10939 u32 csum, magic, len;
10940 __be32 *buf;
10941 int i, j, k, err = 0, size;
10942
10943 if (tg3_flag(tp, NO_NVRAM))
10944 return 0;
10945
10946 if (tg3_nvram_read(tp, 0, &magic) != 0)
10947 return -EIO;
10948
10949 if (magic == TG3_EEPROM_MAGIC)
10950 size = NVRAM_TEST_SIZE;
10951 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10952 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10953 TG3_EEPROM_SB_FORMAT_1) {
10954 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10955 case TG3_EEPROM_SB_REVISION_0:
10956 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10957 break;
10958 case TG3_EEPROM_SB_REVISION_2:
10959 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10960 break;
10961 case TG3_EEPROM_SB_REVISION_3:
10962 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10963 break;
10964 case TG3_EEPROM_SB_REVISION_4:
10965 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10966 break;
10967 case TG3_EEPROM_SB_REVISION_5:
10968 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10969 break;
10970 case TG3_EEPROM_SB_REVISION_6:
10971 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10972 break;
10973 default:
10974 return -EIO;
10975 }
10976 } else
10977 return 0;
10978 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10979 size = NVRAM_SELFBOOT_HW_SIZE;
10980 else
10981 return -EIO;
10982
10983 buf = kmalloc(size, GFP_KERNEL);
10984 if (buf == NULL)
10985 return -ENOMEM;
10986
10987 err = -EIO;
10988 for (i = 0, j = 0; i < size; i += 4, j++) {
10989 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10990 if (err)
10991 break;
10992 }
10993 if (i < size)
10994 goto out;
10995
10996 /* Selfboot format */
10997 magic = be32_to_cpu(buf[0]);
10998 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10999 TG3_EEPROM_MAGIC_FW) {
11000 u8 *buf8 = (u8 *) buf, csum8 = 0;
11001
11002 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11003 TG3_EEPROM_SB_REVISION_2) {
11004 /* For rev 2, the csum doesn't include the MBA. */
11005 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11006 csum8 += buf8[i];
11007 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11008 csum8 += buf8[i];
11009 } else {
11010 for (i = 0; i < size; i++)
11011 csum8 += buf8[i];
11012 }
11013
11014 if (csum8 == 0) {
11015 err = 0;
11016 goto out;
11017 }
11018
11019 err = -EIO;
11020 goto out;
11021 }
11022
11023 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11024 TG3_EEPROM_MAGIC_HW) {
11025 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11026 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11027 u8 *buf8 = (u8 *) buf;
11028
11029 /* Separate the parity bits and the data bytes. */
11030 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11031 if ((i == 0) || (i == 8)) {
11032 int l;
11033 u8 msk;
11034
11035 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11036 parity[k++] = buf8[i] & msk;
11037 i++;
11038 } else if (i == 16) {
11039 int l;
11040 u8 msk;
11041
11042 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11043 parity[k++] = buf8[i] & msk;
11044 i++;
11045
11046 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11047 parity[k++] = buf8[i] & msk;
11048 i++;
11049 }
11050 data[j++] = buf8[i];
11051 }
11052
11053 err = -EIO;
11054 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11055 u8 hw8 = hweight8(data[i]);
11056
11057 if ((hw8 & 0x1) && parity[i])
11058 goto out;
11059 else if (!(hw8 & 0x1) && !parity[i])
11060 goto out;
11061 }
11062 err = 0;
11063 goto out;
11064 }
11065
11066 err = -EIO;
11067
11068 /* Bootstrap checksum at offset 0x10 */
11069 csum = calc_crc((unsigned char *) buf, 0x10);
11070 if (csum != le32_to_cpu(buf[0x10/4]))
11071 goto out;
11072
11073 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11074 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11075 if (csum != le32_to_cpu(buf[0xfc/4]))
11076 goto out;
11077
11078 kfree(buf);
11079
11080 buf = tg3_vpd_readblock(tp, &len);
11081 if (!buf)
11082 return -ENOMEM;
11083
11084 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11085 if (i > 0) {
11086 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11087 if (j < 0)
11088 goto out;
11089
11090 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11091 goto out;
11092
11093 i += PCI_VPD_LRDT_TAG_SIZE;
11094 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11095 PCI_VPD_RO_KEYWORD_CHKSUM);
11096 if (j > 0) {
11097 u8 csum8 = 0;
11098
11099 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11100
11101 for (i = 0; i <= j; i++)
11102 csum8 += ((u8 *)buf)[i];
11103
11104 if (csum8)
11105 goto out;
11106 }
11107 }
11108
11109 err = 0;
11110
11111 out:
11112 kfree(buf);
11113 return err;
11114 }
11115
11116 #define TG3_SERDES_TIMEOUT_SEC 2
11117 #define TG3_COPPER_TIMEOUT_SEC 6
11118
tg3_test_link(struct tg3 * tp)11119 static int tg3_test_link(struct tg3 *tp)
11120 {
11121 int i, max;
11122
11123 if (!netif_running(tp->dev))
11124 return -ENODEV;
11125
11126 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11127 max = TG3_SERDES_TIMEOUT_SEC;
11128 else
11129 max = TG3_COPPER_TIMEOUT_SEC;
11130
11131 for (i = 0; i < max; i++) {
11132 if (netif_carrier_ok(tp->dev))
11133 return 0;
11134
11135 if (msleep_interruptible(1000))
11136 break;
11137 }
11138
11139 return -EIO;
11140 }
11141
11142 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)11143 static int tg3_test_registers(struct tg3 *tp)
11144 {
11145 int i, is_5705, is_5750;
11146 u32 offset, read_mask, write_mask, val, save_val, read_val;
11147 static struct {
11148 u16 offset;
11149 u16 flags;
11150 #define TG3_FL_5705 0x1
11151 #define TG3_FL_NOT_5705 0x2
11152 #define TG3_FL_NOT_5788 0x4
11153 #define TG3_FL_NOT_5750 0x8
11154 u32 read_mask;
11155 u32 write_mask;
11156 } reg_tbl[] = {
11157 /* MAC Control Registers */
11158 { MAC_MODE, TG3_FL_NOT_5705,
11159 0x00000000, 0x00ef6f8c },
11160 { MAC_MODE, TG3_FL_5705,
11161 0x00000000, 0x01ef6b8c },
11162 { MAC_STATUS, TG3_FL_NOT_5705,
11163 0x03800107, 0x00000000 },
11164 { MAC_STATUS, TG3_FL_5705,
11165 0x03800100, 0x00000000 },
11166 { MAC_ADDR_0_HIGH, 0x0000,
11167 0x00000000, 0x0000ffff },
11168 { MAC_ADDR_0_LOW, 0x0000,
11169 0x00000000, 0xffffffff },
11170 { MAC_RX_MTU_SIZE, 0x0000,
11171 0x00000000, 0x0000ffff },
11172 { MAC_TX_MODE, 0x0000,
11173 0x00000000, 0x00000070 },
11174 { MAC_TX_LENGTHS, 0x0000,
11175 0x00000000, 0x00003fff },
11176 { MAC_RX_MODE, TG3_FL_NOT_5705,
11177 0x00000000, 0x000007fc },
11178 { MAC_RX_MODE, TG3_FL_5705,
11179 0x00000000, 0x000007dc },
11180 { MAC_HASH_REG_0, 0x0000,
11181 0x00000000, 0xffffffff },
11182 { MAC_HASH_REG_1, 0x0000,
11183 0x00000000, 0xffffffff },
11184 { MAC_HASH_REG_2, 0x0000,
11185 0x00000000, 0xffffffff },
11186 { MAC_HASH_REG_3, 0x0000,
11187 0x00000000, 0xffffffff },
11188
11189 /* Receive Data and Receive BD Initiator Control Registers. */
11190 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11191 0x00000000, 0xffffffff },
11192 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11193 0x00000000, 0xffffffff },
11194 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11195 0x00000000, 0x00000003 },
11196 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11197 0x00000000, 0xffffffff },
11198 { RCVDBDI_STD_BD+0, 0x0000,
11199 0x00000000, 0xffffffff },
11200 { RCVDBDI_STD_BD+4, 0x0000,
11201 0x00000000, 0xffffffff },
11202 { RCVDBDI_STD_BD+8, 0x0000,
11203 0x00000000, 0xffff0002 },
11204 { RCVDBDI_STD_BD+0xc, 0x0000,
11205 0x00000000, 0xffffffff },
11206
11207 /* Receive BD Initiator Control Registers. */
11208 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11209 0x00000000, 0xffffffff },
11210 { RCVBDI_STD_THRESH, TG3_FL_5705,
11211 0x00000000, 0x000003ff },
11212 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11213 0x00000000, 0xffffffff },
11214
11215 /* Host Coalescing Control Registers. */
11216 { HOSTCC_MODE, TG3_FL_NOT_5705,
11217 0x00000000, 0x00000004 },
11218 { HOSTCC_MODE, TG3_FL_5705,
11219 0x00000000, 0x000000f6 },
11220 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11221 0x00000000, 0xffffffff },
11222 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11223 0x00000000, 0x000003ff },
11224 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11225 0x00000000, 0xffffffff },
11226 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11227 0x00000000, 0x000003ff },
11228 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11229 0x00000000, 0xffffffff },
11230 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11231 0x00000000, 0x000000ff },
11232 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11233 0x00000000, 0xffffffff },
11234 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11235 0x00000000, 0x000000ff },
11236 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11237 0x00000000, 0xffffffff },
11238 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11239 0x00000000, 0xffffffff },
11240 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11241 0x00000000, 0xffffffff },
11242 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11243 0x00000000, 0x000000ff },
11244 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11245 0x00000000, 0xffffffff },
11246 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11247 0x00000000, 0x000000ff },
11248 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11249 0x00000000, 0xffffffff },
11250 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11251 0x00000000, 0xffffffff },
11252 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11253 0x00000000, 0xffffffff },
11254 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11255 0x00000000, 0xffffffff },
11256 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11257 0x00000000, 0xffffffff },
11258 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11259 0xffffffff, 0x00000000 },
11260 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11261 0xffffffff, 0x00000000 },
11262
11263 /* Buffer Manager Control Registers. */
11264 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11265 0x00000000, 0x007fff80 },
11266 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11267 0x00000000, 0x007fffff },
11268 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11269 0x00000000, 0x0000003f },
11270 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11271 0x00000000, 0x000001ff },
11272 { BUFMGR_MB_HIGH_WATER, 0x0000,
11273 0x00000000, 0x000001ff },
11274 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11275 0xffffffff, 0x00000000 },
11276 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11277 0xffffffff, 0x00000000 },
11278
11279 /* Mailbox Registers */
11280 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11281 0x00000000, 0x000001ff },
11282 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11283 0x00000000, 0x000001ff },
11284 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11285 0x00000000, 0x000007ff },
11286 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11287 0x00000000, 0x000001ff },
11288
11289 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11290 };
11291
11292 is_5705 = is_5750 = 0;
11293 if (tg3_flag(tp, 5705_PLUS)) {
11294 is_5705 = 1;
11295 if (tg3_flag(tp, 5750_PLUS))
11296 is_5750 = 1;
11297 }
11298
11299 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11300 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11301 continue;
11302
11303 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11304 continue;
11305
11306 if (tg3_flag(tp, IS_5788) &&
11307 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11308 continue;
11309
11310 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11311 continue;
11312
11313 offset = (u32) reg_tbl[i].offset;
11314 read_mask = reg_tbl[i].read_mask;
11315 write_mask = reg_tbl[i].write_mask;
11316
11317 /* Save the original register content */
11318 save_val = tr32(offset);
11319
11320 /* Determine the read-only value. */
11321 read_val = save_val & read_mask;
11322
11323 /* Write zero to the register, then make sure the read-only bits
11324 * are not changed and the read/write bits are all zeros.
11325 */
11326 tw32(offset, 0);
11327
11328 val = tr32(offset);
11329
11330 /* Test the read-only and read/write bits. */
11331 if (((val & read_mask) != read_val) || (val & write_mask))
11332 goto out;
11333
11334 /* Write ones to all the bits defined by RdMask and WrMask, then
11335 * make sure the read-only bits are not changed and the
11336 * read/write bits are all ones.
11337 */
11338 tw32(offset, read_mask | write_mask);
11339
11340 val = tr32(offset);
11341
11342 /* Test the read-only bits. */
11343 if ((val & read_mask) != read_val)
11344 goto out;
11345
11346 /* Test the read/write bits. */
11347 if ((val & write_mask) != write_mask)
11348 goto out;
11349
11350 tw32(offset, save_val);
11351 }
11352
11353 return 0;
11354
11355 out:
11356 if (netif_msg_hw(tp))
11357 netdev_err(tp->dev,
11358 "Register test failed at offset %x\n", offset);
11359 tw32(offset, save_val);
11360 return -EIO;
11361 }
11362
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)11363 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11364 {
11365 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11366 int i;
11367 u32 j;
11368
11369 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11370 for (j = 0; j < len; j += 4) {
11371 u32 val;
11372
11373 tg3_write_mem(tp, offset + j, test_pattern[i]);
11374 tg3_read_mem(tp, offset + j, &val);
11375 if (val != test_pattern[i])
11376 return -EIO;
11377 }
11378 }
11379 return 0;
11380 }
11381
tg3_test_memory(struct tg3 * tp)11382 static int tg3_test_memory(struct tg3 *tp)
11383 {
11384 static struct mem_entry {
11385 u32 offset;
11386 u32 len;
11387 } mem_tbl_570x[] = {
11388 { 0x00000000, 0x00b50},
11389 { 0x00002000, 0x1c000},
11390 { 0xffffffff, 0x00000}
11391 }, mem_tbl_5705[] = {
11392 { 0x00000100, 0x0000c},
11393 { 0x00000200, 0x00008},
11394 { 0x00004000, 0x00800},
11395 { 0x00006000, 0x01000},
11396 { 0x00008000, 0x02000},
11397 { 0x00010000, 0x0e000},
11398 { 0xffffffff, 0x00000}
11399 }, mem_tbl_5755[] = {
11400 { 0x00000200, 0x00008},
11401 { 0x00004000, 0x00800},
11402 { 0x00006000, 0x00800},
11403 { 0x00008000, 0x02000},
11404 { 0x00010000, 0x0c000},
11405 { 0xffffffff, 0x00000}
11406 }, mem_tbl_5906[] = {
11407 { 0x00000200, 0x00008},
11408 { 0x00004000, 0x00400},
11409 { 0x00006000, 0x00400},
11410 { 0x00008000, 0x01000},
11411 { 0x00010000, 0x01000},
11412 { 0xffffffff, 0x00000}
11413 }, mem_tbl_5717[] = {
11414 { 0x00000200, 0x00008},
11415 { 0x00010000, 0x0a000},
11416 { 0x00020000, 0x13c00},
11417 { 0xffffffff, 0x00000}
11418 }, mem_tbl_57765[] = {
11419 { 0x00000200, 0x00008},
11420 { 0x00004000, 0x00800},
11421 { 0x00006000, 0x09800},
11422 { 0x00010000, 0x0a000},
11423 { 0xffffffff, 0x00000}
11424 };
11425 struct mem_entry *mem_tbl;
11426 int err = 0;
11427 int i;
11428
11429 if (tg3_flag(tp, 5717_PLUS))
11430 mem_tbl = mem_tbl_5717;
11431 else if (tg3_flag(tp, 57765_CLASS))
11432 mem_tbl = mem_tbl_57765;
11433 else if (tg3_flag(tp, 5755_PLUS))
11434 mem_tbl = mem_tbl_5755;
11435 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11436 mem_tbl = mem_tbl_5906;
11437 else if (tg3_flag(tp, 5705_PLUS))
11438 mem_tbl = mem_tbl_5705;
11439 else
11440 mem_tbl = mem_tbl_570x;
11441
11442 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11443 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11444 if (err)
11445 break;
11446 }
11447
11448 return err;
11449 }
11450
11451 #define TG3_TSO_MSS 500
11452
11453 #define TG3_TSO_IP_HDR_LEN 20
11454 #define TG3_TSO_TCP_HDR_LEN 20
11455 #define TG3_TSO_TCP_OPT_LEN 12
11456
11457 static const u8 tg3_tso_header[] = {
11458 0x08, 0x00,
11459 0x45, 0x00, 0x00, 0x00,
11460 0x00, 0x00, 0x40, 0x00,
11461 0x40, 0x06, 0x00, 0x00,
11462 0x0a, 0x00, 0x00, 0x01,
11463 0x0a, 0x00, 0x00, 0x02,
11464 0x0d, 0x00, 0xe0, 0x00,
11465 0x00, 0x00, 0x01, 0x00,
11466 0x00, 0x00, 0x02, 0x00,
11467 0x80, 0x10, 0x10, 0x00,
11468 0x14, 0x09, 0x00, 0x00,
11469 0x01, 0x01, 0x08, 0x0a,
11470 0x11, 0x11, 0x11, 0x11,
11471 0x11, 0x11, 0x11, 0x11,
11472 };
11473
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)11474 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11475 {
11476 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11477 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11478 u32 budget;
11479 struct sk_buff *skb;
11480 u8 *tx_data, *rx_data;
11481 dma_addr_t map;
11482 int num_pkts, tx_len, rx_len, i, err;
11483 struct tg3_rx_buffer_desc *desc;
11484 struct tg3_napi *tnapi, *rnapi;
11485 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11486
11487 tnapi = &tp->napi[0];
11488 rnapi = &tp->napi[0];
11489 if (tp->irq_cnt > 1) {
11490 if (tg3_flag(tp, ENABLE_RSS))
11491 rnapi = &tp->napi[1];
11492 if (tg3_flag(tp, ENABLE_TSS))
11493 tnapi = &tp->napi[1];
11494 }
11495 coal_now = tnapi->coal_now | rnapi->coal_now;
11496
11497 err = -EIO;
11498
11499 tx_len = pktsz;
11500 skb = netdev_alloc_skb(tp->dev, tx_len);
11501 if (!skb)
11502 return -ENOMEM;
11503
11504 tx_data = skb_put(skb, tx_len);
11505 memcpy(tx_data, tp->dev->dev_addr, 6);
11506 memset(tx_data + 6, 0x0, 8);
11507
11508 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11509
11510 if (tso_loopback) {
11511 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11512
11513 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11514 TG3_TSO_TCP_OPT_LEN;
11515
11516 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11517 sizeof(tg3_tso_header));
11518 mss = TG3_TSO_MSS;
11519
11520 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11521 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11522
11523 /* Set the total length field in the IP header */
11524 iph->tot_len = htons((u16)(mss + hdr_len));
11525
11526 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11527 TXD_FLAG_CPU_POST_DMA);
11528
11529 if (tg3_flag(tp, HW_TSO_1) ||
11530 tg3_flag(tp, HW_TSO_2) ||
11531 tg3_flag(tp, HW_TSO_3)) {
11532 struct tcphdr *th;
11533 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11534 th = (struct tcphdr *)&tx_data[val];
11535 th->check = 0;
11536 } else
11537 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11538
11539 if (tg3_flag(tp, HW_TSO_3)) {
11540 mss |= (hdr_len & 0xc) << 12;
11541 if (hdr_len & 0x10)
11542 base_flags |= 0x00000010;
11543 base_flags |= (hdr_len & 0x3e0) << 5;
11544 } else if (tg3_flag(tp, HW_TSO_2))
11545 mss |= hdr_len << 9;
11546 else if (tg3_flag(tp, HW_TSO_1) ||
11547 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11548 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11549 } else {
11550 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11551 }
11552
11553 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11554 } else {
11555 num_pkts = 1;
11556 data_off = ETH_HLEN;
11557 }
11558
11559 for (i = data_off; i < tx_len; i++)
11560 tx_data[i] = (u8) (i & 0xff);
11561
11562 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11563 if (pci_dma_mapping_error(tp->pdev, map)) {
11564 dev_kfree_skb(skb);
11565 return -EIO;
11566 }
11567
11568 val = tnapi->tx_prod;
11569 tnapi->tx_buffers[val].skb = skb;
11570 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11571
11572 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11573 rnapi->coal_now);
11574
11575 udelay(10);
11576
11577 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11578
11579 budget = tg3_tx_avail(tnapi);
11580 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11581 base_flags | TXD_FLAG_END, mss, 0)) {
11582 tnapi->tx_buffers[val].skb = NULL;
11583 dev_kfree_skb(skb);
11584 return -EIO;
11585 }
11586
11587 tnapi->tx_prod++;
11588
11589 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11590 tr32_mailbox(tnapi->prodmbox);
11591
11592 udelay(10);
11593
11594 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11595 for (i = 0; i < 35; i++) {
11596 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11597 coal_now);
11598
11599 udelay(10);
11600
11601 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11602 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11603 if ((tx_idx == tnapi->tx_prod) &&
11604 (rx_idx == (rx_start_idx + num_pkts)))
11605 break;
11606 }
11607
11608 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11609 dev_kfree_skb(skb);
11610
11611 if (tx_idx != tnapi->tx_prod)
11612 goto out;
11613
11614 if (rx_idx != rx_start_idx + num_pkts)
11615 goto out;
11616
11617 val = data_off;
11618 while (rx_idx != rx_start_idx) {
11619 desc = &rnapi->rx_rcb[rx_start_idx++];
11620 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11621 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11622
11623 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11624 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11625 goto out;
11626
11627 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11628 - ETH_FCS_LEN;
11629
11630 if (!tso_loopback) {
11631 if (rx_len != tx_len)
11632 goto out;
11633
11634 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11635 if (opaque_key != RXD_OPAQUE_RING_STD)
11636 goto out;
11637 } else {
11638 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11639 goto out;
11640 }
11641 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11642 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11643 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11644 goto out;
11645 }
11646
11647 if (opaque_key == RXD_OPAQUE_RING_STD) {
11648 rx_data = tpr->rx_std_buffers[desc_idx].data;
11649 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11650 mapping);
11651 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11652 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11653 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11654 mapping);
11655 } else
11656 goto out;
11657
11658 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11659 PCI_DMA_FROMDEVICE);
11660
11661 rx_data += TG3_RX_OFFSET(tp);
11662 for (i = data_off; i < rx_len; i++, val++) {
11663 if (*(rx_data + i) != (u8) (val & 0xff))
11664 goto out;
11665 }
11666 }
11667
11668 err = 0;
11669
11670 /* tg3_free_rings will unmap and free the rx_data */
11671 out:
11672 return err;
11673 }
11674
11675 #define TG3_STD_LOOPBACK_FAILED 1
11676 #define TG3_JMB_LOOPBACK_FAILED 2
11677 #define TG3_TSO_LOOPBACK_FAILED 4
11678 #define TG3_LOOPBACK_FAILED \
11679 (TG3_STD_LOOPBACK_FAILED | \
11680 TG3_JMB_LOOPBACK_FAILED | \
11681 TG3_TSO_LOOPBACK_FAILED)
11682
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)11683 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11684 {
11685 int err = -EIO;
11686 u32 eee_cap;
11687
11688 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11689 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11690
11691 if (!netif_running(tp->dev)) {
11692 data[0] = TG3_LOOPBACK_FAILED;
11693 data[1] = TG3_LOOPBACK_FAILED;
11694 if (do_extlpbk)
11695 data[2] = TG3_LOOPBACK_FAILED;
11696 goto done;
11697 }
11698
11699 err = tg3_reset_hw(tp, 1);
11700 if (err) {
11701 data[0] = TG3_LOOPBACK_FAILED;
11702 data[1] = TG3_LOOPBACK_FAILED;
11703 if (do_extlpbk)
11704 data[2] = TG3_LOOPBACK_FAILED;
11705 goto done;
11706 }
11707
11708 if (tg3_flag(tp, ENABLE_RSS)) {
11709 int i;
11710
11711 /* Reroute all rx packets to the 1st queue */
11712 for (i = MAC_RSS_INDIR_TBL_0;
11713 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11714 tw32(i, 0x0);
11715 }
11716
11717 /* HW errata - mac loopback fails in some cases on 5780.
11718 * Normal traffic and PHY loopback are not affected by
11719 * errata. Also, the MAC loopback test is deprecated for
11720 * all newer ASIC revisions.
11721 */
11722 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11723 !tg3_flag(tp, CPMU_PRESENT)) {
11724 tg3_mac_loopback(tp, true);
11725
11726 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11727 data[0] |= TG3_STD_LOOPBACK_FAILED;
11728
11729 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11730 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11731 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11732
11733 tg3_mac_loopback(tp, false);
11734 }
11735
11736 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11737 !tg3_flag(tp, USE_PHYLIB)) {
11738 int i;
11739
11740 tg3_phy_lpbk_set(tp, 0, false);
11741
11742 /* Wait for link */
11743 for (i = 0; i < 100; i++) {
11744 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11745 break;
11746 mdelay(1);
11747 }
11748
11749 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11750 data[1] |= TG3_STD_LOOPBACK_FAILED;
11751 if (tg3_flag(tp, TSO_CAPABLE) &&
11752 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11753 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11754 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11755 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11756 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11757
11758 if (do_extlpbk) {
11759 tg3_phy_lpbk_set(tp, 0, true);
11760
11761 /* All link indications report up, but the hardware
11762 * isn't really ready for about 20 msec. Double it
11763 * to be sure.
11764 */
11765 mdelay(40);
11766
11767 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11768 data[2] |= TG3_STD_LOOPBACK_FAILED;
11769 if (tg3_flag(tp, TSO_CAPABLE) &&
11770 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11771 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11772 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11773 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11774 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11775 }
11776
11777 /* Re-enable gphy autopowerdown. */
11778 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11779 tg3_phy_toggle_apd(tp, true);
11780 }
11781
11782 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11783
11784 done:
11785 tp->phy_flags |= eee_cap;
11786
11787 return err;
11788 }
11789
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)11790 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11791 u64 *data)
11792 {
11793 struct tg3 *tp = netdev_priv(dev);
11794 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11795
11796 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11797 tg3_power_up(tp)) {
11798 etest->flags |= ETH_TEST_FL_FAILED;
11799 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11800 return;
11801 }
11802
11803 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11804
11805 if (tg3_test_nvram(tp) != 0) {
11806 etest->flags |= ETH_TEST_FL_FAILED;
11807 data[0] = 1;
11808 }
11809 if (!doextlpbk && tg3_test_link(tp)) {
11810 etest->flags |= ETH_TEST_FL_FAILED;
11811 data[1] = 1;
11812 }
11813 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11814 int err, err2 = 0, irq_sync = 0;
11815
11816 if (netif_running(dev)) {
11817 tg3_phy_stop(tp);
11818 tg3_netif_stop(tp);
11819 irq_sync = 1;
11820 }
11821
11822 tg3_full_lock(tp, irq_sync);
11823
11824 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11825 err = tg3_nvram_lock(tp);
11826 tg3_halt_cpu(tp, RX_CPU_BASE);
11827 if (!tg3_flag(tp, 5705_PLUS))
11828 tg3_halt_cpu(tp, TX_CPU_BASE);
11829 if (!err)
11830 tg3_nvram_unlock(tp);
11831
11832 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11833 tg3_phy_reset(tp);
11834
11835 if (tg3_test_registers(tp) != 0) {
11836 etest->flags |= ETH_TEST_FL_FAILED;
11837 data[2] = 1;
11838 }
11839
11840 if (tg3_test_memory(tp) != 0) {
11841 etest->flags |= ETH_TEST_FL_FAILED;
11842 data[3] = 1;
11843 }
11844
11845 if (doextlpbk)
11846 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11847
11848 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11849 etest->flags |= ETH_TEST_FL_FAILED;
11850
11851 tg3_full_unlock(tp);
11852
11853 if (tg3_test_interrupt(tp) != 0) {
11854 etest->flags |= ETH_TEST_FL_FAILED;
11855 data[7] = 1;
11856 }
11857
11858 tg3_full_lock(tp, 0);
11859
11860 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11861 if (netif_running(dev)) {
11862 tg3_flag_set(tp, INIT_COMPLETE);
11863 err2 = tg3_restart_hw(tp, 1);
11864 if (!err2)
11865 tg3_netif_start(tp);
11866 }
11867
11868 tg3_full_unlock(tp);
11869
11870 if (irq_sync && !err2)
11871 tg3_phy_start(tp);
11872 }
11873 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11874 tg3_power_down(tp);
11875
11876 }
11877
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)11878 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11879 {
11880 struct mii_ioctl_data *data = if_mii(ifr);
11881 struct tg3 *tp = netdev_priv(dev);
11882 int err;
11883
11884 if (tg3_flag(tp, USE_PHYLIB)) {
11885 struct phy_device *phydev;
11886 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11887 return -EAGAIN;
11888 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11889 return phy_mii_ioctl(phydev, ifr, cmd);
11890 }
11891
11892 switch (cmd) {
11893 case SIOCGMIIPHY:
11894 data->phy_id = tp->phy_addr;
11895
11896 /* fallthru */
11897 case SIOCGMIIREG: {
11898 u32 mii_regval;
11899
11900 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11901 break; /* We have no PHY */
11902
11903 if (!netif_running(dev))
11904 return -EAGAIN;
11905
11906 spin_lock_bh(&tp->lock);
11907 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11908 spin_unlock_bh(&tp->lock);
11909
11910 data->val_out = mii_regval;
11911
11912 return err;
11913 }
11914
11915 case SIOCSMIIREG:
11916 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11917 break; /* We have no PHY */
11918
11919 if (!netif_running(dev))
11920 return -EAGAIN;
11921
11922 spin_lock_bh(&tp->lock);
11923 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11924 spin_unlock_bh(&tp->lock);
11925
11926 return err;
11927
11928 default:
11929 /* do nothing */
11930 break;
11931 }
11932 return -EOPNOTSUPP;
11933 }
11934
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)11935 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11936 {
11937 struct tg3 *tp = netdev_priv(dev);
11938
11939 memcpy(ec, &tp->coal, sizeof(*ec));
11940 return 0;
11941 }
11942
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)11943 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11944 {
11945 struct tg3 *tp = netdev_priv(dev);
11946 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11947 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11948
11949 if (!tg3_flag(tp, 5705_PLUS)) {
11950 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11951 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11952 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11953 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11954 }
11955
11956 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11957 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11958 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11959 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11960 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11961 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11962 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11963 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11964 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11965 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11966 return -EINVAL;
11967
11968 /* No rx interrupts will be generated if both are zero */
11969 if ((ec->rx_coalesce_usecs == 0) &&
11970 (ec->rx_max_coalesced_frames == 0))
11971 return -EINVAL;
11972
11973 /* No tx interrupts will be generated if both are zero */
11974 if ((ec->tx_coalesce_usecs == 0) &&
11975 (ec->tx_max_coalesced_frames == 0))
11976 return -EINVAL;
11977
11978 /* Only copy relevant parameters, ignore all others. */
11979 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11980 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11981 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11982 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11983 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11984 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11985 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11986 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11987 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11988
11989 if (netif_running(dev)) {
11990 tg3_full_lock(tp, 0);
11991 __tg3_set_coalesce(tp, &tp->coal);
11992 tg3_full_unlock(tp);
11993 }
11994 return 0;
11995 }
11996
11997 static const struct ethtool_ops tg3_ethtool_ops = {
11998 .get_settings = tg3_get_settings,
11999 .set_settings = tg3_set_settings,
12000 .get_drvinfo = tg3_get_drvinfo,
12001 .get_regs_len = tg3_get_regs_len,
12002 .get_regs = tg3_get_regs,
12003 .get_wol = tg3_get_wol,
12004 .set_wol = tg3_set_wol,
12005 .get_msglevel = tg3_get_msglevel,
12006 .set_msglevel = tg3_set_msglevel,
12007 .nway_reset = tg3_nway_reset,
12008 .get_link = ethtool_op_get_link,
12009 .get_eeprom_len = tg3_get_eeprom_len,
12010 .get_eeprom = tg3_get_eeprom,
12011 .set_eeprom = tg3_set_eeprom,
12012 .get_ringparam = tg3_get_ringparam,
12013 .set_ringparam = tg3_set_ringparam,
12014 .get_pauseparam = tg3_get_pauseparam,
12015 .set_pauseparam = tg3_set_pauseparam,
12016 .self_test = tg3_self_test,
12017 .get_strings = tg3_get_strings,
12018 .set_phys_id = tg3_set_phys_id,
12019 .get_ethtool_stats = tg3_get_ethtool_stats,
12020 .get_coalesce = tg3_get_coalesce,
12021 .set_coalesce = tg3_set_coalesce,
12022 .get_sset_count = tg3_get_sset_count,
12023 .get_rxnfc = tg3_get_rxnfc,
12024 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12025 .get_rxfh_indir = tg3_get_rxfh_indir,
12026 .set_rxfh_indir = tg3_set_rxfh_indir,
12027 };
12028
tg3_get_eeprom_size(struct tg3 * tp)12029 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12030 {
12031 u32 cursize, val, magic;
12032
12033 tp->nvram_size = EEPROM_CHIP_SIZE;
12034
12035 if (tg3_nvram_read(tp, 0, &magic) != 0)
12036 return;
12037
12038 if ((magic != TG3_EEPROM_MAGIC) &&
12039 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12040 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12041 return;
12042
12043 /*
12044 * Size the chip by reading offsets at increasing powers of two.
12045 * When we encounter our validation signature, we know the addressing
12046 * has wrapped around, and thus have our chip size.
12047 */
12048 cursize = 0x10;
12049
12050 while (cursize < tp->nvram_size) {
12051 if (tg3_nvram_read(tp, cursize, &val) != 0)
12052 return;
12053
12054 if (val == magic)
12055 break;
12056
12057 cursize <<= 1;
12058 }
12059
12060 tp->nvram_size = cursize;
12061 }
12062
tg3_get_nvram_size(struct tg3 * tp)12063 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12064 {
12065 u32 val;
12066
12067 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12068 return;
12069
12070 /* Selfboot format */
12071 if (val != TG3_EEPROM_MAGIC) {
12072 tg3_get_eeprom_size(tp);
12073 return;
12074 }
12075
12076 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12077 if (val != 0) {
12078 /* This is confusing. We want to operate on the
12079 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12080 * call will read from NVRAM and byteswap the data
12081 * according to the byteswapping settings for all
12082 * other register accesses. This ensures the data we
12083 * want will always reside in the lower 16-bits.
12084 * However, the data in NVRAM is in LE format, which
12085 * means the data from the NVRAM read will always be
12086 * opposite the endianness of the CPU. The 16-bit
12087 * byteswap then brings the data to CPU endianness.
12088 */
12089 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12090 return;
12091 }
12092 }
12093 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12094 }
12095
tg3_get_nvram_info(struct tg3 * tp)12096 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12097 {
12098 u32 nvcfg1;
12099
12100 nvcfg1 = tr32(NVRAM_CFG1);
12101 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12102 tg3_flag_set(tp, FLASH);
12103 } else {
12104 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12105 tw32(NVRAM_CFG1, nvcfg1);
12106 }
12107
12108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12109 tg3_flag(tp, 5780_CLASS)) {
12110 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12111 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12112 tp->nvram_jedecnum = JEDEC_ATMEL;
12113 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12114 tg3_flag_set(tp, NVRAM_BUFFERED);
12115 break;
12116 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12117 tp->nvram_jedecnum = JEDEC_ATMEL;
12118 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12119 break;
12120 case FLASH_VENDOR_ATMEL_EEPROM:
12121 tp->nvram_jedecnum = JEDEC_ATMEL;
12122 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12123 tg3_flag_set(tp, NVRAM_BUFFERED);
12124 break;
12125 case FLASH_VENDOR_ST:
12126 tp->nvram_jedecnum = JEDEC_ST;
12127 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12128 tg3_flag_set(tp, NVRAM_BUFFERED);
12129 break;
12130 case FLASH_VENDOR_SAIFUN:
12131 tp->nvram_jedecnum = JEDEC_SAIFUN;
12132 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12133 break;
12134 case FLASH_VENDOR_SST_SMALL:
12135 case FLASH_VENDOR_SST_LARGE:
12136 tp->nvram_jedecnum = JEDEC_SST;
12137 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12138 break;
12139 }
12140 } else {
12141 tp->nvram_jedecnum = JEDEC_ATMEL;
12142 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12143 tg3_flag_set(tp, NVRAM_BUFFERED);
12144 }
12145 }
12146
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)12147 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12148 {
12149 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12150 case FLASH_5752PAGE_SIZE_256:
12151 tp->nvram_pagesize = 256;
12152 break;
12153 case FLASH_5752PAGE_SIZE_512:
12154 tp->nvram_pagesize = 512;
12155 break;
12156 case FLASH_5752PAGE_SIZE_1K:
12157 tp->nvram_pagesize = 1024;
12158 break;
12159 case FLASH_5752PAGE_SIZE_2K:
12160 tp->nvram_pagesize = 2048;
12161 break;
12162 case FLASH_5752PAGE_SIZE_4K:
12163 tp->nvram_pagesize = 4096;
12164 break;
12165 case FLASH_5752PAGE_SIZE_264:
12166 tp->nvram_pagesize = 264;
12167 break;
12168 case FLASH_5752PAGE_SIZE_528:
12169 tp->nvram_pagesize = 528;
12170 break;
12171 }
12172 }
12173
tg3_get_5752_nvram_info(struct tg3 * tp)12174 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12175 {
12176 u32 nvcfg1;
12177
12178 nvcfg1 = tr32(NVRAM_CFG1);
12179
12180 /* NVRAM protection for TPM */
12181 if (nvcfg1 & (1 << 27))
12182 tg3_flag_set(tp, PROTECTED_NVRAM);
12183
12184 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12185 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12186 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12187 tp->nvram_jedecnum = JEDEC_ATMEL;
12188 tg3_flag_set(tp, NVRAM_BUFFERED);
12189 break;
12190 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12191 tp->nvram_jedecnum = JEDEC_ATMEL;
12192 tg3_flag_set(tp, NVRAM_BUFFERED);
12193 tg3_flag_set(tp, FLASH);
12194 break;
12195 case FLASH_5752VENDOR_ST_M45PE10:
12196 case FLASH_5752VENDOR_ST_M45PE20:
12197 case FLASH_5752VENDOR_ST_M45PE40:
12198 tp->nvram_jedecnum = JEDEC_ST;
12199 tg3_flag_set(tp, NVRAM_BUFFERED);
12200 tg3_flag_set(tp, FLASH);
12201 break;
12202 }
12203
12204 if (tg3_flag(tp, FLASH)) {
12205 tg3_nvram_get_pagesize(tp, nvcfg1);
12206 } else {
12207 /* For eeprom, set pagesize to maximum eeprom size */
12208 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12209
12210 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12211 tw32(NVRAM_CFG1, nvcfg1);
12212 }
12213 }
12214
tg3_get_5755_nvram_info(struct tg3 * tp)12215 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12216 {
12217 u32 nvcfg1, protect = 0;
12218
12219 nvcfg1 = tr32(NVRAM_CFG1);
12220
12221 /* NVRAM protection for TPM */
12222 if (nvcfg1 & (1 << 27)) {
12223 tg3_flag_set(tp, PROTECTED_NVRAM);
12224 protect = 1;
12225 }
12226
12227 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12228 switch (nvcfg1) {
12229 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12230 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12231 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12232 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12233 tp->nvram_jedecnum = JEDEC_ATMEL;
12234 tg3_flag_set(tp, NVRAM_BUFFERED);
12235 tg3_flag_set(tp, FLASH);
12236 tp->nvram_pagesize = 264;
12237 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12238 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12239 tp->nvram_size = (protect ? 0x3e200 :
12240 TG3_NVRAM_SIZE_512KB);
12241 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12242 tp->nvram_size = (protect ? 0x1f200 :
12243 TG3_NVRAM_SIZE_256KB);
12244 else
12245 tp->nvram_size = (protect ? 0x1f200 :
12246 TG3_NVRAM_SIZE_128KB);
12247 break;
12248 case FLASH_5752VENDOR_ST_M45PE10:
12249 case FLASH_5752VENDOR_ST_M45PE20:
12250 case FLASH_5752VENDOR_ST_M45PE40:
12251 tp->nvram_jedecnum = JEDEC_ST;
12252 tg3_flag_set(tp, NVRAM_BUFFERED);
12253 tg3_flag_set(tp, FLASH);
12254 tp->nvram_pagesize = 256;
12255 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12256 tp->nvram_size = (protect ?
12257 TG3_NVRAM_SIZE_64KB :
12258 TG3_NVRAM_SIZE_128KB);
12259 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12260 tp->nvram_size = (protect ?
12261 TG3_NVRAM_SIZE_64KB :
12262 TG3_NVRAM_SIZE_256KB);
12263 else
12264 tp->nvram_size = (protect ?
12265 TG3_NVRAM_SIZE_128KB :
12266 TG3_NVRAM_SIZE_512KB);
12267 break;
12268 }
12269 }
12270
tg3_get_5787_nvram_info(struct tg3 * tp)12271 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12272 {
12273 u32 nvcfg1;
12274
12275 nvcfg1 = tr32(NVRAM_CFG1);
12276
12277 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12278 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12279 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12280 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12281 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12282 tp->nvram_jedecnum = JEDEC_ATMEL;
12283 tg3_flag_set(tp, NVRAM_BUFFERED);
12284 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12285
12286 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12287 tw32(NVRAM_CFG1, nvcfg1);
12288 break;
12289 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12290 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12291 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12292 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12293 tp->nvram_jedecnum = JEDEC_ATMEL;
12294 tg3_flag_set(tp, NVRAM_BUFFERED);
12295 tg3_flag_set(tp, FLASH);
12296 tp->nvram_pagesize = 264;
12297 break;
12298 case FLASH_5752VENDOR_ST_M45PE10:
12299 case FLASH_5752VENDOR_ST_M45PE20:
12300 case FLASH_5752VENDOR_ST_M45PE40:
12301 tp->nvram_jedecnum = JEDEC_ST;
12302 tg3_flag_set(tp, NVRAM_BUFFERED);
12303 tg3_flag_set(tp, FLASH);
12304 tp->nvram_pagesize = 256;
12305 break;
12306 }
12307 }
12308
tg3_get_5761_nvram_info(struct tg3 * tp)12309 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12310 {
12311 u32 nvcfg1, protect = 0;
12312
12313 nvcfg1 = tr32(NVRAM_CFG1);
12314
12315 /* NVRAM protection for TPM */
12316 if (nvcfg1 & (1 << 27)) {
12317 tg3_flag_set(tp, PROTECTED_NVRAM);
12318 protect = 1;
12319 }
12320
12321 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12322 switch (nvcfg1) {
12323 case FLASH_5761VENDOR_ATMEL_ADB021D:
12324 case FLASH_5761VENDOR_ATMEL_ADB041D:
12325 case FLASH_5761VENDOR_ATMEL_ADB081D:
12326 case FLASH_5761VENDOR_ATMEL_ADB161D:
12327 case FLASH_5761VENDOR_ATMEL_MDB021D:
12328 case FLASH_5761VENDOR_ATMEL_MDB041D:
12329 case FLASH_5761VENDOR_ATMEL_MDB081D:
12330 case FLASH_5761VENDOR_ATMEL_MDB161D:
12331 tp->nvram_jedecnum = JEDEC_ATMEL;
12332 tg3_flag_set(tp, NVRAM_BUFFERED);
12333 tg3_flag_set(tp, FLASH);
12334 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12335 tp->nvram_pagesize = 256;
12336 break;
12337 case FLASH_5761VENDOR_ST_A_M45PE20:
12338 case FLASH_5761VENDOR_ST_A_M45PE40:
12339 case FLASH_5761VENDOR_ST_A_M45PE80:
12340 case FLASH_5761VENDOR_ST_A_M45PE16:
12341 case FLASH_5761VENDOR_ST_M_M45PE20:
12342 case FLASH_5761VENDOR_ST_M_M45PE40:
12343 case FLASH_5761VENDOR_ST_M_M45PE80:
12344 case FLASH_5761VENDOR_ST_M_M45PE16:
12345 tp->nvram_jedecnum = JEDEC_ST;
12346 tg3_flag_set(tp, NVRAM_BUFFERED);
12347 tg3_flag_set(tp, FLASH);
12348 tp->nvram_pagesize = 256;
12349 break;
12350 }
12351
12352 if (protect) {
12353 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12354 } else {
12355 switch (nvcfg1) {
12356 case FLASH_5761VENDOR_ATMEL_ADB161D:
12357 case FLASH_5761VENDOR_ATMEL_MDB161D:
12358 case FLASH_5761VENDOR_ST_A_M45PE16:
12359 case FLASH_5761VENDOR_ST_M_M45PE16:
12360 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12361 break;
12362 case FLASH_5761VENDOR_ATMEL_ADB081D:
12363 case FLASH_5761VENDOR_ATMEL_MDB081D:
12364 case FLASH_5761VENDOR_ST_A_M45PE80:
12365 case FLASH_5761VENDOR_ST_M_M45PE80:
12366 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12367 break;
12368 case FLASH_5761VENDOR_ATMEL_ADB041D:
12369 case FLASH_5761VENDOR_ATMEL_MDB041D:
12370 case FLASH_5761VENDOR_ST_A_M45PE40:
12371 case FLASH_5761VENDOR_ST_M_M45PE40:
12372 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12373 break;
12374 case FLASH_5761VENDOR_ATMEL_ADB021D:
12375 case FLASH_5761VENDOR_ATMEL_MDB021D:
12376 case FLASH_5761VENDOR_ST_A_M45PE20:
12377 case FLASH_5761VENDOR_ST_M_M45PE20:
12378 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12379 break;
12380 }
12381 }
12382 }
12383
tg3_get_5906_nvram_info(struct tg3 * tp)12384 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12385 {
12386 tp->nvram_jedecnum = JEDEC_ATMEL;
12387 tg3_flag_set(tp, NVRAM_BUFFERED);
12388 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12389 }
12390
tg3_get_57780_nvram_info(struct tg3 * tp)12391 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12392 {
12393 u32 nvcfg1;
12394
12395 nvcfg1 = tr32(NVRAM_CFG1);
12396
12397 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12398 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12399 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12400 tp->nvram_jedecnum = JEDEC_ATMEL;
12401 tg3_flag_set(tp, NVRAM_BUFFERED);
12402 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12403
12404 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12405 tw32(NVRAM_CFG1, nvcfg1);
12406 return;
12407 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12408 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12409 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12410 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12411 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12412 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12413 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12414 tp->nvram_jedecnum = JEDEC_ATMEL;
12415 tg3_flag_set(tp, NVRAM_BUFFERED);
12416 tg3_flag_set(tp, FLASH);
12417
12418 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12419 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12420 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12421 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12422 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12423 break;
12424 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12425 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12426 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12427 break;
12428 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12429 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12430 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12431 break;
12432 }
12433 break;
12434 case FLASH_5752VENDOR_ST_M45PE10:
12435 case FLASH_5752VENDOR_ST_M45PE20:
12436 case FLASH_5752VENDOR_ST_M45PE40:
12437 tp->nvram_jedecnum = JEDEC_ST;
12438 tg3_flag_set(tp, NVRAM_BUFFERED);
12439 tg3_flag_set(tp, FLASH);
12440
12441 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12442 case FLASH_5752VENDOR_ST_M45PE10:
12443 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12444 break;
12445 case FLASH_5752VENDOR_ST_M45PE20:
12446 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12447 break;
12448 case FLASH_5752VENDOR_ST_M45PE40:
12449 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12450 break;
12451 }
12452 break;
12453 default:
12454 tg3_flag_set(tp, NO_NVRAM);
12455 return;
12456 }
12457
12458 tg3_nvram_get_pagesize(tp, nvcfg1);
12459 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12460 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12461 }
12462
12463
tg3_get_5717_nvram_info(struct tg3 * tp)12464 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12465 {
12466 u32 nvcfg1;
12467
12468 nvcfg1 = tr32(NVRAM_CFG1);
12469
12470 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12471 case FLASH_5717VENDOR_ATMEL_EEPROM:
12472 case FLASH_5717VENDOR_MICRO_EEPROM:
12473 tp->nvram_jedecnum = JEDEC_ATMEL;
12474 tg3_flag_set(tp, NVRAM_BUFFERED);
12475 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12476
12477 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12478 tw32(NVRAM_CFG1, nvcfg1);
12479 return;
12480 case FLASH_5717VENDOR_ATMEL_MDB011D:
12481 case FLASH_5717VENDOR_ATMEL_ADB011B:
12482 case FLASH_5717VENDOR_ATMEL_ADB011D:
12483 case FLASH_5717VENDOR_ATMEL_MDB021D:
12484 case FLASH_5717VENDOR_ATMEL_ADB021B:
12485 case FLASH_5717VENDOR_ATMEL_ADB021D:
12486 case FLASH_5717VENDOR_ATMEL_45USPT:
12487 tp->nvram_jedecnum = JEDEC_ATMEL;
12488 tg3_flag_set(tp, NVRAM_BUFFERED);
12489 tg3_flag_set(tp, FLASH);
12490
12491 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12492 case FLASH_5717VENDOR_ATMEL_MDB021D:
12493 /* Detect size with tg3_nvram_get_size() */
12494 break;
12495 case FLASH_5717VENDOR_ATMEL_ADB021B:
12496 case FLASH_5717VENDOR_ATMEL_ADB021D:
12497 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12498 break;
12499 default:
12500 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12501 break;
12502 }
12503 break;
12504 case FLASH_5717VENDOR_ST_M_M25PE10:
12505 case FLASH_5717VENDOR_ST_A_M25PE10:
12506 case FLASH_5717VENDOR_ST_M_M45PE10:
12507 case FLASH_5717VENDOR_ST_A_M45PE10:
12508 case FLASH_5717VENDOR_ST_M_M25PE20:
12509 case FLASH_5717VENDOR_ST_A_M25PE20:
12510 case FLASH_5717VENDOR_ST_M_M45PE20:
12511 case FLASH_5717VENDOR_ST_A_M45PE20:
12512 case FLASH_5717VENDOR_ST_25USPT:
12513 case FLASH_5717VENDOR_ST_45USPT:
12514 tp->nvram_jedecnum = JEDEC_ST;
12515 tg3_flag_set(tp, NVRAM_BUFFERED);
12516 tg3_flag_set(tp, FLASH);
12517
12518 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12519 case FLASH_5717VENDOR_ST_M_M25PE20:
12520 case FLASH_5717VENDOR_ST_M_M45PE20:
12521 /* Detect size with tg3_nvram_get_size() */
12522 break;
12523 case FLASH_5717VENDOR_ST_A_M25PE20:
12524 case FLASH_5717VENDOR_ST_A_M45PE20:
12525 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12526 break;
12527 default:
12528 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12529 break;
12530 }
12531 break;
12532 default:
12533 tg3_flag_set(tp, NO_NVRAM);
12534 return;
12535 }
12536
12537 tg3_nvram_get_pagesize(tp, nvcfg1);
12538 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12539 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12540 }
12541
tg3_get_5720_nvram_info(struct tg3 * tp)12542 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12543 {
12544 u32 nvcfg1, nvmpinstrp;
12545
12546 nvcfg1 = tr32(NVRAM_CFG1);
12547 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12548
12549 switch (nvmpinstrp) {
12550 case FLASH_5720_EEPROM_HD:
12551 case FLASH_5720_EEPROM_LD:
12552 tp->nvram_jedecnum = JEDEC_ATMEL;
12553 tg3_flag_set(tp, NVRAM_BUFFERED);
12554
12555 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12556 tw32(NVRAM_CFG1, nvcfg1);
12557 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12558 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12559 else
12560 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12561 return;
12562 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12563 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12564 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12565 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12566 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12567 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12568 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12569 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12570 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12571 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12572 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12573 case FLASH_5720VENDOR_ATMEL_45USPT:
12574 tp->nvram_jedecnum = JEDEC_ATMEL;
12575 tg3_flag_set(tp, NVRAM_BUFFERED);
12576 tg3_flag_set(tp, FLASH);
12577
12578 switch (nvmpinstrp) {
12579 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12580 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12581 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12582 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12583 break;
12584 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12585 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12586 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12587 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12588 break;
12589 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12590 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12591 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12592 break;
12593 default:
12594 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12595 break;
12596 }
12597 break;
12598 case FLASH_5720VENDOR_M_ST_M25PE10:
12599 case FLASH_5720VENDOR_M_ST_M45PE10:
12600 case FLASH_5720VENDOR_A_ST_M25PE10:
12601 case FLASH_5720VENDOR_A_ST_M45PE10:
12602 case FLASH_5720VENDOR_M_ST_M25PE20:
12603 case FLASH_5720VENDOR_M_ST_M45PE20:
12604 case FLASH_5720VENDOR_A_ST_M25PE20:
12605 case FLASH_5720VENDOR_A_ST_M45PE20:
12606 case FLASH_5720VENDOR_M_ST_M25PE40:
12607 case FLASH_5720VENDOR_M_ST_M45PE40:
12608 case FLASH_5720VENDOR_A_ST_M25PE40:
12609 case FLASH_5720VENDOR_A_ST_M45PE40:
12610 case FLASH_5720VENDOR_M_ST_M25PE80:
12611 case FLASH_5720VENDOR_M_ST_M45PE80:
12612 case FLASH_5720VENDOR_A_ST_M25PE80:
12613 case FLASH_5720VENDOR_A_ST_M45PE80:
12614 case FLASH_5720VENDOR_ST_25USPT:
12615 case FLASH_5720VENDOR_ST_45USPT:
12616 tp->nvram_jedecnum = JEDEC_ST;
12617 tg3_flag_set(tp, NVRAM_BUFFERED);
12618 tg3_flag_set(tp, FLASH);
12619
12620 switch (nvmpinstrp) {
12621 case FLASH_5720VENDOR_M_ST_M25PE20:
12622 case FLASH_5720VENDOR_M_ST_M45PE20:
12623 case FLASH_5720VENDOR_A_ST_M25PE20:
12624 case FLASH_5720VENDOR_A_ST_M45PE20:
12625 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12626 break;
12627 case FLASH_5720VENDOR_M_ST_M25PE40:
12628 case FLASH_5720VENDOR_M_ST_M45PE40:
12629 case FLASH_5720VENDOR_A_ST_M25PE40:
12630 case FLASH_5720VENDOR_A_ST_M45PE40:
12631 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12632 break;
12633 case FLASH_5720VENDOR_M_ST_M25PE80:
12634 case FLASH_5720VENDOR_M_ST_M45PE80:
12635 case FLASH_5720VENDOR_A_ST_M25PE80:
12636 case FLASH_5720VENDOR_A_ST_M45PE80:
12637 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12638 break;
12639 default:
12640 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12641 break;
12642 }
12643 break;
12644 default:
12645 tg3_flag_set(tp, NO_NVRAM);
12646 return;
12647 }
12648
12649 tg3_nvram_get_pagesize(tp, nvcfg1);
12650 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12651 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12652 }
12653
12654 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)12655 static void __devinit tg3_nvram_init(struct tg3 *tp)
12656 {
12657 tw32_f(GRC_EEPROM_ADDR,
12658 (EEPROM_ADDR_FSM_RESET |
12659 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12660 EEPROM_ADDR_CLKPERD_SHIFT)));
12661
12662 msleep(1);
12663
12664 /* Enable seeprom accesses. */
12665 tw32_f(GRC_LOCAL_CTRL,
12666 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12667 udelay(100);
12668
12669 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12670 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12671 tg3_flag_set(tp, NVRAM);
12672
12673 if (tg3_nvram_lock(tp)) {
12674 netdev_warn(tp->dev,
12675 "Cannot get nvram lock, %s failed\n",
12676 __func__);
12677 return;
12678 }
12679 tg3_enable_nvram_access(tp);
12680
12681 tp->nvram_size = 0;
12682
12683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12684 tg3_get_5752_nvram_info(tp);
12685 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12686 tg3_get_5755_nvram_info(tp);
12687 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12689 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12690 tg3_get_5787_nvram_info(tp);
12691 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12692 tg3_get_5761_nvram_info(tp);
12693 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12694 tg3_get_5906_nvram_info(tp);
12695 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12696 tg3_flag(tp, 57765_CLASS))
12697 tg3_get_57780_nvram_info(tp);
12698 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12700 tg3_get_5717_nvram_info(tp);
12701 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12702 tg3_get_5720_nvram_info(tp);
12703 else
12704 tg3_get_nvram_info(tp);
12705
12706 if (tp->nvram_size == 0)
12707 tg3_get_nvram_size(tp);
12708
12709 tg3_disable_nvram_access(tp);
12710 tg3_nvram_unlock(tp);
12711
12712 } else {
12713 tg3_flag_clear(tp, NVRAM);
12714 tg3_flag_clear(tp, NVRAM_BUFFERED);
12715
12716 tg3_get_eeprom_size(tp);
12717 }
12718 }
12719
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)12720 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12721 u32 offset, u32 len, u8 *buf)
12722 {
12723 int i, j, rc = 0;
12724 u32 val;
12725
12726 for (i = 0; i < len; i += 4) {
12727 u32 addr;
12728 __be32 data;
12729
12730 addr = offset + i;
12731
12732 memcpy(&data, buf + i, 4);
12733
12734 /*
12735 * The SEEPROM interface expects the data to always be opposite
12736 * the native endian format. We accomplish this by reversing
12737 * all the operations that would have been performed on the
12738 * data from a call to tg3_nvram_read_be32().
12739 */
12740 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12741
12742 val = tr32(GRC_EEPROM_ADDR);
12743 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12744
12745 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12746 EEPROM_ADDR_READ);
12747 tw32(GRC_EEPROM_ADDR, val |
12748 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12749 (addr & EEPROM_ADDR_ADDR_MASK) |
12750 EEPROM_ADDR_START |
12751 EEPROM_ADDR_WRITE);
12752
12753 for (j = 0; j < 1000; j++) {
12754 val = tr32(GRC_EEPROM_ADDR);
12755
12756 if (val & EEPROM_ADDR_COMPLETE)
12757 break;
12758 msleep(1);
12759 }
12760 if (!(val & EEPROM_ADDR_COMPLETE)) {
12761 rc = -EBUSY;
12762 break;
12763 }
12764 }
12765
12766 return rc;
12767 }
12768
12769 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)12770 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12771 u8 *buf)
12772 {
12773 int ret = 0;
12774 u32 pagesize = tp->nvram_pagesize;
12775 u32 pagemask = pagesize - 1;
12776 u32 nvram_cmd;
12777 u8 *tmp;
12778
12779 tmp = kmalloc(pagesize, GFP_KERNEL);
12780 if (tmp == NULL)
12781 return -ENOMEM;
12782
12783 while (len) {
12784 int j;
12785 u32 phy_addr, page_off, size;
12786
12787 phy_addr = offset & ~pagemask;
12788
12789 for (j = 0; j < pagesize; j += 4) {
12790 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12791 (__be32 *) (tmp + j));
12792 if (ret)
12793 break;
12794 }
12795 if (ret)
12796 break;
12797
12798 page_off = offset & pagemask;
12799 size = pagesize;
12800 if (len < size)
12801 size = len;
12802
12803 len -= size;
12804
12805 memcpy(tmp + page_off, buf, size);
12806
12807 offset = offset + (pagesize - page_off);
12808
12809 tg3_enable_nvram_access(tp);
12810
12811 /*
12812 * Before we can erase the flash page, we need
12813 * to issue a special "write enable" command.
12814 */
12815 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12816
12817 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12818 break;
12819
12820 /* Erase the target page */
12821 tw32(NVRAM_ADDR, phy_addr);
12822
12823 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12824 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12825
12826 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12827 break;
12828
12829 /* Issue another write enable to start the write. */
12830 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12831
12832 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12833 break;
12834
12835 for (j = 0; j < pagesize; j += 4) {
12836 __be32 data;
12837
12838 data = *((__be32 *) (tmp + j));
12839
12840 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12841
12842 tw32(NVRAM_ADDR, phy_addr + j);
12843
12844 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12845 NVRAM_CMD_WR;
12846
12847 if (j == 0)
12848 nvram_cmd |= NVRAM_CMD_FIRST;
12849 else if (j == (pagesize - 4))
12850 nvram_cmd |= NVRAM_CMD_LAST;
12851
12852 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12853 break;
12854 }
12855 if (ret)
12856 break;
12857 }
12858
12859 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12860 tg3_nvram_exec_cmd(tp, nvram_cmd);
12861
12862 kfree(tmp);
12863
12864 return ret;
12865 }
12866
12867 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)12868 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12869 u8 *buf)
12870 {
12871 int i, ret = 0;
12872
12873 for (i = 0; i < len; i += 4, offset += 4) {
12874 u32 page_off, phy_addr, nvram_cmd;
12875 __be32 data;
12876
12877 memcpy(&data, buf + i, 4);
12878 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12879
12880 page_off = offset % tp->nvram_pagesize;
12881
12882 phy_addr = tg3_nvram_phys_addr(tp, offset);
12883
12884 tw32(NVRAM_ADDR, phy_addr);
12885
12886 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12887
12888 if (page_off == 0 || i == 0)
12889 nvram_cmd |= NVRAM_CMD_FIRST;
12890 if (page_off == (tp->nvram_pagesize - 4))
12891 nvram_cmd |= NVRAM_CMD_LAST;
12892
12893 if (i == (len - 4))
12894 nvram_cmd |= NVRAM_CMD_LAST;
12895
12896 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12897 !tg3_flag(tp, 5755_PLUS) &&
12898 (tp->nvram_jedecnum == JEDEC_ST) &&
12899 (nvram_cmd & NVRAM_CMD_FIRST)) {
12900
12901 if ((ret = tg3_nvram_exec_cmd(tp,
12902 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12903 NVRAM_CMD_DONE)))
12904
12905 break;
12906 }
12907 if (!tg3_flag(tp, FLASH)) {
12908 /* We always do complete word writes to eeprom. */
12909 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12910 }
12911
12912 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12913 break;
12914 }
12915 return ret;
12916 }
12917
12918 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)12919 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12920 {
12921 int ret;
12922
12923 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12924 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12925 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12926 udelay(40);
12927 }
12928
12929 if (!tg3_flag(tp, NVRAM)) {
12930 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12931 } else {
12932 u32 grc_mode;
12933
12934 ret = tg3_nvram_lock(tp);
12935 if (ret)
12936 return ret;
12937
12938 tg3_enable_nvram_access(tp);
12939 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12940 tw32(NVRAM_WRITE1, 0x406);
12941
12942 grc_mode = tr32(GRC_MODE);
12943 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12944
12945 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12946 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12947 buf);
12948 } else {
12949 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12950 buf);
12951 }
12952
12953 grc_mode = tr32(GRC_MODE);
12954 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12955
12956 tg3_disable_nvram_access(tp);
12957 tg3_nvram_unlock(tp);
12958 }
12959
12960 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12961 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12962 udelay(40);
12963 }
12964
12965 return ret;
12966 }
12967
12968 struct subsys_tbl_ent {
12969 u16 subsys_vendor, subsys_devid;
12970 u32 phy_id;
12971 };
12972
12973 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12974 /* Broadcom boards. */
12975 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12976 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12977 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12978 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12979 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12980 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12981 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12982 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12983 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12984 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12985 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12986 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12987 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12988 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12989 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12990 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12991 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12992 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12993 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12994 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12995 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12996 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12997
12998 /* 3com boards. */
12999 { TG3PCI_SUBVENDOR_ID_3COM,
13000 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13001 { TG3PCI_SUBVENDOR_ID_3COM,
13002 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13003 { TG3PCI_SUBVENDOR_ID_3COM,
13004 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13005 { TG3PCI_SUBVENDOR_ID_3COM,
13006 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13007 { TG3PCI_SUBVENDOR_ID_3COM,
13008 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13009
13010 /* DELL boards. */
13011 { TG3PCI_SUBVENDOR_ID_DELL,
13012 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13013 { TG3PCI_SUBVENDOR_ID_DELL,
13014 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13015 { TG3PCI_SUBVENDOR_ID_DELL,
13016 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13017 { TG3PCI_SUBVENDOR_ID_DELL,
13018 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13019
13020 /* Compaq boards. */
13021 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13022 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13023 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13024 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13025 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13026 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13027 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13028 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13029 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13030 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13031
13032 /* IBM boards. */
13033 { TG3PCI_SUBVENDOR_ID_IBM,
13034 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13035 };
13036
tg3_lookup_by_subsys(struct tg3 * tp)13037 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13038 {
13039 int i;
13040
13041 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13042 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13043 tp->pdev->subsystem_vendor) &&
13044 (subsys_id_to_phy_id[i].subsys_devid ==
13045 tp->pdev->subsystem_device))
13046 return &subsys_id_to_phy_id[i];
13047 }
13048 return NULL;
13049 }
13050
tg3_get_eeprom_hw_cfg(struct tg3 * tp)13051 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13052 {
13053 u32 val;
13054
13055 tp->phy_id = TG3_PHY_ID_INVALID;
13056 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13057
13058 /* Assume an onboard device and WOL capable by default. */
13059 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13060 tg3_flag_set(tp, WOL_CAP);
13061
13062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13063 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13064 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13065 tg3_flag_set(tp, IS_NIC);
13066 }
13067 val = tr32(VCPU_CFGSHDW);
13068 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13069 tg3_flag_set(tp, ASPM_WORKAROUND);
13070 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13071 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13072 tg3_flag_set(tp, WOL_ENABLE);
13073 device_set_wakeup_enable(&tp->pdev->dev, true);
13074 }
13075 goto done;
13076 }
13077
13078 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13079 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13080 u32 nic_cfg, led_cfg;
13081 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13082 int eeprom_phy_serdes = 0;
13083
13084 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13085 tp->nic_sram_data_cfg = nic_cfg;
13086
13087 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13088 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13089 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13090 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13091 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13092 (ver > 0) && (ver < 0x100))
13093 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13094
13095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13096 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13097
13098 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13099 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13100 eeprom_phy_serdes = 1;
13101
13102 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13103 if (nic_phy_id != 0) {
13104 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13105 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13106
13107 eeprom_phy_id = (id1 >> 16) << 10;
13108 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13109 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13110 } else
13111 eeprom_phy_id = 0;
13112
13113 tp->phy_id = eeprom_phy_id;
13114 if (eeprom_phy_serdes) {
13115 if (!tg3_flag(tp, 5705_PLUS))
13116 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13117 else
13118 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13119 }
13120
13121 if (tg3_flag(tp, 5750_PLUS))
13122 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13123 SHASTA_EXT_LED_MODE_MASK);
13124 else
13125 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13126
13127 switch (led_cfg) {
13128 default:
13129 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13130 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13131 break;
13132
13133 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13134 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13135 break;
13136
13137 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13138 tp->led_ctrl = LED_CTRL_MODE_MAC;
13139
13140 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13141 * read on some older 5700/5701 bootcode.
13142 */
13143 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13144 ASIC_REV_5700 ||
13145 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13146 ASIC_REV_5701)
13147 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13148
13149 break;
13150
13151 case SHASTA_EXT_LED_SHARED:
13152 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13153 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13154 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13155 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13156 LED_CTRL_MODE_PHY_2);
13157 break;
13158
13159 case SHASTA_EXT_LED_MAC:
13160 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13161 break;
13162
13163 case SHASTA_EXT_LED_COMBO:
13164 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13165 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13166 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13167 LED_CTRL_MODE_PHY_2);
13168 break;
13169
13170 }
13171
13172 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13174 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13175 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13176
13177 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13178 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13179
13180 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13181 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13182 if ((tp->pdev->subsystem_vendor ==
13183 PCI_VENDOR_ID_ARIMA) &&
13184 (tp->pdev->subsystem_device == 0x205a ||
13185 tp->pdev->subsystem_device == 0x2063))
13186 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13187 } else {
13188 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13189 tg3_flag_set(tp, IS_NIC);
13190 }
13191
13192 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13193 tg3_flag_set(tp, ENABLE_ASF);
13194 if (tg3_flag(tp, 5750_PLUS))
13195 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13196 }
13197
13198 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13199 tg3_flag(tp, 5750_PLUS))
13200 tg3_flag_set(tp, ENABLE_APE);
13201
13202 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13203 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13204 tg3_flag_clear(tp, WOL_CAP);
13205
13206 if (tg3_flag(tp, WOL_CAP) &&
13207 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13208 tg3_flag_set(tp, WOL_ENABLE);
13209 device_set_wakeup_enable(&tp->pdev->dev, true);
13210 }
13211
13212 if (cfg2 & (1 << 17))
13213 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13214
13215 /* serdes signal pre-emphasis in register 0x590 set by */
13216 /* bootcode if bit 18 is set */
13217 if (cfg2 & (1 << 18))
13218 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13219
13220 if ((tg3_flag(tp, 57765_PLUS) ||
13221 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13222 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13223 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13224 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13225
13226 if (tg3_flag(tp, PCI_EXPRESS) &&
13227 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13228 !tg3_flag(tp, 57765_PLUS)) {
13229 u32 cfg3;
13230
13231 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13232 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13233 tg3_flag_set(tp, ASPM_WORKAROUND);
13234 }
13235
13236 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13237 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13238 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13239 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13240 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13241 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13242 }
13243 done:
13244 if (tg3_flag(tp, WOL_CAP))
13245 device_set_wakeup_enable(&tp->pdev->dev,
13246 tg3_flag(tp, WOL_ENABLE));
13247 else
13248 device_set_wakeup_capable(&tp->pdev->dev, false);
13249 }
13250
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)13251 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13252 {
13253 int i;
13254 u32 val;
13255
13256 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13257 tw32(OTP_CTRL, cmd);
13258
13259 /* Wait for up to 1 ms for command to execute. */
13260 for (i = 0; i < 100; i++) {
13261 val = tr32(OTP_STATUS);
13262 if (val & OTP_STATUS_CMD_DONE)
13263 break;
13264 udelay(10);
13265 }
13266
13267 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13268 }
13269
13270 /* Read the gphy configuration from the OTP region of the chip. The gphy
13271 * configuration is a 32-bit value that straddles the alignment boundary.
13272 * We do two 32-bit reads and then shift and merge the results.
13273 */
tg3_read_otp_phycfg(struct tg3 * tp)13274 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13275 {
13276 u32 bhalf_otp, thalf_otp;
13277
13278 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13279
13280 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13281 return 0;
13282
13283 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13284
13285 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13286 return 0;
13287
13288 thalf_otp = tr32(OTP_READ_DATA);
13289
13290 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13291
13292 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13293 return 0;
13294
13295 bhalf_otp = tr32(OTP_READ_DATA);
13296
13297 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13298 }
13299
tg3_phy_init_link_config(struct tg3 * tp)13300 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13301 {
13302 u32 adv = ADVERTISED_Autoneg;
13303
13304 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13305 adv |= ADVERTISED_1000baseT_Half |
13306 ADVERTISED_1000baseT_Full;
13307
13308 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13309 adv |= ADVERTISED_100baseT_Half |
13310 ADVERTISED_100baseT_Full |
13311 ADVERTISED_10baseT_Half |
13312 ADVERTISED_10baseT_Full |
13313 ADVERTISED_TP;
13314 else
13315 adv |= ADVERTISED_FIBRE;
13316
13317 tp->link_config.advertising = adv;
13318 tp->link_config.speed = SPEED_INVALID;
13319 tp->link_config.duplex = DUPLEX_INVALID;
13320 tp->link_config.autoneg = AUTONEG_ENABLE;
13321 tp->link_config.active_speed = SPEED_INVALID;
13322 tp->link_config.active_duplex = DUPLEX_INVALID;
13323 tp->link_config.orig_speed = SPEED_INVALID;
13324 tp->link_config.orig_duplex = DUPLEX_INVALID;
13325 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13326 }
13327
tg3_phy_probe(struct tg3 * tp)13328 static int __devinit tg3_phy_probe(struct tg3 *tp)
13329 {
13330 u32 hw_phy_id_1, hw_phy_id_2;
13331 u32 hw_phy_id, hw_phy_id_masked;
13332 int err;
13333
13334 /* flow control autonegotiation is default behavior */
13335 tg3_flag_set(tp, PAUSE_AUTONEG);
13336 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13337
13338 if (tg3_flag(tp, USE_PHYLIB))
13339 return tg3_phy_init(tp);
13340
13341 /* Reading the PHY ID register can conflict with ASF
13342 * firmware access to the PHY hardware.
13343 */
13344 err = 0;
13345 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13346 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13347 } else {
13348 /* Now read the physical PHY_ID from the chip and verify
13349 * that it is sane. If it doesn't look good, we fall back
13350 * to either the hard-coded table based PHY_ID and failing
13351 * that the value found in the eeprom area.
13352 */
13353 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13354 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13355
13356 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13357 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13358 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13359
13360 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13361 }
13362
13363 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13364 tp->phy_id = hw_phy_id;
13365 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13366 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13367 else
13368 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13369 } else {
13370 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13371 /* Do nothing, phy ID already set up in
13372 * tg3_get_eeprom_hw_cfg().
13373 */
13374 } else {
13375 struct subsys_tbl_ent *p;
13376
13377 /* No eeprom signature? Try the hardcoded
13378 * subsys device table.
13379 */
13380 p = tg3_lookup_by_subsys(tp);
13381 if (!p)
13382 return -ENODEV;
13383
13384 tp->phy_id = p->phy_id;
13385 if (!tp->phy_id ||
13386 tp->phy_id == TG3_PHY_ID_BCM8002)
13387 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13388 }
13389 }
13390
13391 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13392 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13393 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13394 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13395 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13396 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13397 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13398 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13399
13400 tg3_phy_init_link_config(tp);
13401
13402 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13403 !tg3_flag(tp, ENABLE_APE) &&
13404 !tg3_flag(tp, ENABLE_ASF)) {
13405 u32 bmsr, dummy;
13406
13407 tg3_readphy(tp, MII_BMSR, &bmsr);
13408 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13409 (bmsr & BMSR_LSTATUS))
13410 goto skip_phy_reset;
13411
13412 err = tg3_phy_reset(tp);
13413 if (err)
13414 return err;
13415
13416 tg3_phy_set_wirespeed(tp);
13417
13418 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13419 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13420 tp->link_config.flowctrl);
13421
13422 tg3_writephy(tp, MII_BMCR,
13423 BMCR_ANENABLE | BMCR_ANRESTART);
13424 }
13425 }
13426
13427 skip_phy_reset:
13428 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13429 err = tg3_init_5401phy_dsp(tp);
13430 if (err)
13431 return err;
13432
13433 err = tg3_init_5401phy_dsp(tp);
13434 }
13435
13436 return err;
13437 }
13438
tg3_read_vpd(struct tg3 * tp)13439 static void __devinit tg3_read_vpd(struct tg3 *tp)
13440 {
13441 u8 *vpd_data;
13442 unsigned int block_end, rosize, len;
13443 u32 vpdlen;
13444 int j, i = 0;
13445
13446 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13447 if (!vpd_data)
13448 goto out_no_vpd;
13449
13450 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13451 if (i < 0)
13452 goto out_not_found;
13453
13454 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13455 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13456 i += PCI_VPD_LRDT_TAG_SIZE;
13457
13458 if (block_end > vpdlen)
13459 goto out_not_found;
13460
13461 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13462 PCI_VPD_RO_KEYWORD_MFR_ID);
13463 if (j > 0) {
13464 len = pci_vpd_info_field_size(&vpd_data[j]);
13465
13466 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13467 if (j + len > block_end || len != 4 ||
13468 memcmp(&vpd_data[j], "1028", 4))
13469 goto partno;
13470
13471 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13472 PCI_VPD_RO_KEYWORD_VENDOR0);
13473 if (j < 0)
13474 goto partno;
13475
13476 len = pci_vpd_info_field_size(&vpd_data[j]);
13477
13478 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13479 if (j + len > block_end)
13480 goto partno;
13481
13482 memcpy(tp->fw_ver, &vpd_data[j], len);
13483 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13484 }
13485
13486 partno:
13487 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13488 PCI_VPD_RO_KEYWORD_PARTNO);
13489 if (i < 0)
13490 goto out_not_found;
13491
13492 len = pci_vpd_info_field_size(&vpd_data[i]);
13493
13494 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13495 if (len > TG3_BPN_SIZE ||
13496 (len + i) > vpdlen)
13497 goto out_not_found;
13498
13499 memcpy(tp->board_part_number, &vpd_data[i], len);
13500
13501 out_not_found:
13502 kfree(vpd_data);
13503 if (tp->board_part_number[0])
13504 return;
13505
13506 out_no_vpd:
13507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13508 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13509 strcpy(tp->board_part_number, "BCM5717");
13510 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13511 strcpy(tp->board_part_number, "BCM5718");
13512 else
13513 goto nomatch;
13514 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13515 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13516 strcpy(tp->board_part_number, "BCM57780");
13517 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13518 strcpy(tp->board_part_number, "BCM57760");
13519 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13520 strcpy(tp->board_part_number, "BCM57790");
13521 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13522 strcpy(tp->board_part_number, "BCM57788");
13523 else
13524 goto nomatch;
13525 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13526 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13527 strcpy(tp->board_part_number, "BCM57761");
13528 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13529 strcpy(tp->board_part_number, "BCM57765");
13530 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13531 strcpy(tp->board_part_number, "BCM57781");
13532 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13533 strcpy(tp->board_part_number, "BCM57785");
13534 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13535 strcpy(tp->board_part_number, "BCM57791");
13536 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13537 strcpy(tp->board_part_number, "BCM57795");
13538 else
13539 goto nomatch;
13540 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13541 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13542 strcpy(tp->board_part_number, "BCM57762");
13543 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13544 strcpy(tp->board_part_number, "BCM57766");
13545 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13546 strcpy(tp->board_part_number, "BCM57782");
13547 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13548 strcpy(tp->board_part_number, "BCM57786");
13549 else
13550 goto nomatch;
13551 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13552 strcpy(tp->board_part_number, "BCM95906");
13553 } else {
13554 nomatch:
13555 strcpy(tp->board_part_number, "none");
13556 }
13557 }
13558
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)13559 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13560 {
13561 u32 val;
13562
13563 if (tg3_nvram_read(tp, offset, &val) ||
13564 (val & 0xfc000000) != 0x0c000000 ||
13565 tg3_nvram_read(tp, offset + 4, &val) ||
13566 val != 0)
13567 return 0;
13568
13569 return 1;
13570 }
13571
tg3_read_bc_ver(struct tg3 * tp)13572 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13573 {
13574 u32 val, offset, start, ver_offset;
13575 int i, dst_off;
13576 bool newver = false;
13577
13578 if (tg3_nvram_read(tp, 0xc, &offset) ||
13579 tg3_nvram_read(tp, 0x4, &start))
13580 return;
13581
13582 offset = tg3_nvram_logical_addr(tp, offset);
13583
13584 if (tg3_nvram_read(tp, offset, &val))
13585 return;
13586
13587 if ((val & 0xfc000000) == 0x0c000000) {
13588 if (tg3_nvram_read(tp, offset + 4, &val))
13589 return;
13590
13591 if (val == 0)
13592 newver = true;
13593 }
13594
13595 dst_off = strlen(tp->fw_ver);
13596
13597 if (newver) {
13598 if (TG3_VER_SIZE - dst_off < 16 ||
13599 tg3_nvram_read(tp, offset + 8, &ver_offset))
13600 return;
13601
13602 offset = offset + ver_offset - start;
13603 for (i = 0; i < 16; i += 4) {
13604 __be32 v;
13605 if (tg3_nvram_read_be32(tp, offset + i, &v))
13606 return;
13607
13608 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13609 }
13610 } else {
13611 u32 major, minor;
13612
13613 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13614 return;
13615
13616 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13617 TG3_NVM_BCVER_MAJSFT;
13618 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13619 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13620 "v%d.%02d", major, minor);
13621 }
13622 }
13623
tg3_read_hwsb_ver(struct tg3 * tp)13624 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13625 {
13626 u32 val, major, minor;
13627
13628 /* Use native endian representation */
13629 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13630 return;
13631
13632 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13633 TG3_NVM_HWSB_CFG1_MAJSFT;
13634 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13635 TG3_NVM_HWSB_CFG1_MINSFT;
13636
13637 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13638 }
13639
tg3_read_sb_ver(struct tg3 * tp,u32 val)13640 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13641 {
13642 u32 offset, major, minor, build;
13643
13644 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13645
13646 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13647 return;
13648
13649 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13650 case TG3_EEPROM_SB_REVISION_0:
13651 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13652 break;
13653 case TG3_EEPROM_SB_REVISION_2:
13654 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13655 break;
13656 case TG3_EEPROM_SB_REVISION_3:
13657 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13658 break;
13659 case TG3_EEPROM_SB_REVISION_4:
13660 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13661 break;
13662 case TG3_EEPROM_SB_REVISION_5:
13663 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13664 break;
13665 case TG3_EEPROM_SB_REVISION_6:
13666 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13667 break;
13668 default:
13669 return;
13670 }
13671
13672 if (tg3_nvram_read(tp, offset, &val))
13673 return;
13674
13675 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13676 TG3_EEPROM_SB_EDH_BLD_SHFT;
13677 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13678 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13679 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13680
13681 if (minor > 99 || build > 26)
13682 return;
13683
13684 offset = strlen(tp->fw_ver);
13685 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13686 " v%d.%02d", major, minor);
13687
13688 if (build > 0) {
13689 offset = strlen(tp->fw_ver);
13690 if (offset < TG3_VER_SIZE - 1)
13691 tp->fw_ver[offset] = 'a' + build - 1;
13692 }
13693 }
13694
tg3_read_mgmtfw_ver(struct tg3 * tp)13695 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13696 {
13697 u32 val, offset, start;
13698 int i, vlen;
13699
13700 for (offset = TG3_NVM_DIR_START;
13701 offset < TG3_NVM_DIR_END;
13702 offset += TG3_NVM_DIRENT_SIZE) {
13703 if (tg3_nvram_read(tp, offset, &val))
13704 return;
13705
13706 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13707 break;
13708 }
13709
13710 if (offset == TG3_NVM_DIR_END)
13711 return;
13712
13713 if (!tg3_flag(tp, 5705_PLUS))
13714 start = 0x08000000;
13715 else if (tg3_nvram_read(tp, offset - 4, &start))
13716 return;
13717
13718 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13719 !tg3_fw_img_is_valid(tp, offset) ||
13720 tg3_nvram_read(tp, offset + 8, &val))
13721 return;
13722
13723 offset += val - start;
13724
13725 vlen = strlen(tp->fw_ver);
13726
13727 tp->fw_ver[vlen++] = ',';
13728 tp->fw_ver[vlen++] = ' ';
13729
13730 for (i = 0; i < 4; i++) {
13731 __be32 v;
13732 if (tg3_nvram_read_be32(tp, offset, &v))
13733 return;
13734
13735 offset += sizeof(v);
13736
13737 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13738 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13739 break;
13740 }
13741
13742 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13743 vlen += sizeof(v);
13744 }
13745 }
13746
tg3_read_dash_ver(struct tg3 * tp)13747 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13748 {
13749 int vlen;
13750 u32 apedata;
13751 char *fwtype;
13752
13753 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13754 return;
13755
13756 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13757 if (apedata != APE_SEG_SIG_MAGIC)
13758 return;
13759
13760 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13761 if (!(apedata & APE_FW_STATUS_READY))
13762 return;
13763
13764 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13765
13766 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13767 tg3_flag_set(tp, APE_HAS_NCSI);
13768 fwtype = "NCSI";
13769 } else {
13770 fwtype = "DASH";
13771 }
13772
13773 vlen = strlen(tp->fw_ver);
13774
13775 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13776 fwtype,
13777 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13778 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13779 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13780 (apedata & APE_FW_VERSION_BLDMSK));
13781 }
13782
tg3_read_fw_ver(struct tg3 * tp)13783 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13784 {
13785 u32 val;
13786 bool vpd_vers = false;
13787
13788 if (tp->fw_ver[0] != 0)
13789 vpd_vers = true;
13790
13791 if (tg3_flag(tp, NO_NVRAM)) {
13792 strcat(tp->fw_ver, "sb");
13793 return;
13794 }
13795
13796 if (tg3_nvram_read(tp, 0, &val))
13797 return;
13798
13799 if (val == TG3_EEPROM_MAGIC)
13800 tg3_read_bc_ver(tp);
13801 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13802 tg3_read_sb_ver(tp, val);
13803 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13804 tg3_read_hwsb_ver(tp);
13805 else
13806 return;
13807
13808 if (vpd_vers)
13809 goto done;
13810
13811 if (tg3_flag(tp, ENABLE_APE)) {
13812 if (tg3_flag(tp, ENABLE_ASF))
13813 tg3_read_dash_ver(tp);
13814 } else if (tg3_flag(tp, ENABLE_ASF)) {
13815 tg3_read_mgmtfw_ver(tp);
13816 }
13817
13818 done:
13819 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13820 }
13821
13822 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13823
tg3_rx_ret_ring_size(struct tg3 * tp)13824 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13825 {
13826 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13827 return TG3_RX_RET_MAX_SIZE_5717;
13828 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13829 return TG3_RX_RET_MAX_SIZE_5700;
13830 else
13831 return TG3_RX_RET_MAX_SIZE_5705;
13832 }
13833
13834 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13835 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13836 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13837 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13838 { },
13839 };
13840
tg3_get_invariants(struct tg3 * tp)13841 static int __devinit tg3_get_invariants(struct tg3 *tp)
13842 {
13843 u32 misc_ctrl_reg;
13844 u32 pci_state_reg, grc_misc_cfg;
13845 u32 val;
13846 u16 pci_cmd;
13847 int err;
13848
13849 /* Force memory write invalidate off. If we leave it on,
13850 * then on 5700_BX chips we have to enable a workaround.
13851 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13852 * to match the cacheline size. The Broadcom driver have this
13853 * workaround but turns MWI off all the times so never uses
13854 * it. This seems to suggest that the workaround is insufficient.
13855 */
13856 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13857 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13858 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13859
13860 /* Important! -- Make sure register accesses are byteswapped
13861 * correctly. Also, for those chips that require it, make
13862 * sure that indirect register accesses are enabled before
13863 * the first operation.
13864 */
13865 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13866 &misc_ctrl_reg);
13867 tp->misc_host_ctrl |= (misc_ctrl_reg &
13868 MISC_HOST_CTRL_CHIPREV);
13869 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13870 tp->misc_host_ctrl);
13871
13872 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13873 MISC_HOST_CTRL_CHIPREV_SHIFT);
13874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13875 u32 prod_id_asic_rev;
13876
13877 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13878 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13879 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13880 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13881 pci_read_config_dword(tp->pdev,
13882 TG3PCI_GEN2_PRODID_ASICREV,
13883 &prod_id_asic_rev);
13884 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13885 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13886 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13887 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13888 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13889 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13890 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13891 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13892 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13893 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13894 pci_read_config_dword(tp->pdev,
13895 TG3PCI_GEN15_PRODID_ASICREV,
13896 &prod_id_asic_rev);
13897 else
13898 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13899 &prod_id_asic_rev);
13900
13901 tp->pci_chip_rev_id = prod_id_asic_rev;
13902 }
13903
13904 /* Wrong chip ID in 5752 A0. This code can be removed later
13905 * as A0 is not in production.
13906 */
13907 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13908 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13909
13910 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13911 * we need to disable memory and use config. cycles
13912 * only to access all registers. The 5702/03 chips
13913 * can mistakenly decode the special cycles from the
13914 * ICH chipsets as memory write cycles, causing corruption
13915 * of register and memory space. Only certain ICH bridges
13916 * will drive special cycles with non-zero data during the
13917 * address phase which can fall within the 5703's address
13918 * range. This is not an ICH bug as the PCI spec allows
13919 * non-zero address during special cycles. However, only
13920 * these ICH bridges are known to drive non-zero addresses
13921 * during special cycles.
13922 *
13923 * Since special cycles do not cross PCI bridges, we only
13924 * enable this workaround if the 5703 is on the secondary
13925 * bus of these ICH bridges.
13926 */
13927 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13928 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13929 static struct tg3_dev_id {
13930 u32 vendor;
13931 u32 device;
13932 u32 rev;
13933 } ich_chipsets[] = {
13934 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13935 PCI_ANY_ID },
13936 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13937 PCI_ANY_ID },
13938 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13939 0xa },
13940 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13941 PCI_ANY_ID },
13942 { },
13943 };
13944 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13945 struct pci_dev *bridge = NULL;
13946
13947 while (pci_id->vendor != 0) {
13948 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13949 bridge);
13950 if (!bridge) {
13951 pci_id++;
13952 continue;
13953 }
13954 if (pci_id->rev != PCI_ANY_ID) {
13955 if (bridge->revision > pci_id->rev)
13956 continue;
13957 }
13958 if (bridge->subordinate &&
13959 (bridge->subordinate->number ==
13960 tp->pdev->bus->number)) {
13961 tg3_flag_set(tp, ICH_WORKAROUND);
13962 pci_dev_put(bridge);
13963 break;
13964 }
13965 }
13966 }
13967
13968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13969 static struct tg3_dev_id {
13970 u32 vendor;
13971 u32 device;
13972 } bridge_chipsets[] = {
13973 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13974 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13975 { },
13976 };
13977 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13978 struct pci_dev *bridge = NULL;
13979
13980 while (pci_id->vendor != 0) {
13981 bridge = pci_get_device(pci_id->vendor,
13982 pci_id->device,
13983 bridge);
13984 if (!bridge) {
13985 pci_id++;
13986 continue;
13987 }
13988 if (bridge->subordinate &&
13989 (bridge->subordinate->number <=
13990 tp->pdev->bus->number) &&
13991 (bridge->subordinate->subordinate >=
13992 tp->pdev->bus->number)) {
13993 tg3_flag_set(tp, 5701_DMA_BUG);
13994 pci_dev_put(bridge);
13995 break;
13996 }
13997 }
13998 }
13999
14000 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14001 * DMA addresses > 40-bit. This bridge may have other additional
14002 * 57xx devices behind it in some 4-port NIC designs for example.
14003 * Any tg3 device found behind the bridge will also need the 40-bit
14004 * DMA workaround.
14005 */
14006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14008 tg3_flag_set(tp, 5780_CLASS);
14009 tg3_flag_set(tp, 40BIT_DMA_BUG);
14010 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14011 } else {
14012 struct pci_dev *bridge = NULL;
14013
14014 do {
14015 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14016 PCI_DEVICE_ID_SERVERWORKS_EPB,
14017 bridge);
14018 if (bridge && bridge->subordinate &&
14019 (bridge->subordinate->number <=
14020 tp->pdev->bus->number) &&
14021 (bridge->subordinate->subordinate >=
14022 tp->pdev->bus->number)) {
14023 tg3_flag_set(tp, 40BIT_DMA_BUG);
14024 pci_dev_put(bridge);
14025 break;
14026 }
14027 } while (bridge);
14028 }
14029
14030 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14031 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14032 tp->pdev_peer = tg3_find_peer(tp);
14033
14034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14037 tg3_flag_set(tp, 5717_PLUS);
14038
14039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14041 tg3_flag_set(tp, 57765_CLASS);
14042
14043 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14044 tg3_flag_set(tp, 57765_PLUS);
14045
14046 /* Intentionally exclude ASIC_REV_5906 */
14047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14053 tg3_flag(tp, 57765_PLUS))
14054 tg3_flag_set(tp, 5755_PLUS);
14055
14056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14059 tg3_flag(tp, 5755_PLUS) ||
14060 tg3_flag(tp, 5780_CLASS))
14061 tg3_flag_set(tp, 5750_PLUS);
14062
14063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14064 tg3_flag(tp, 5750_PLUS))
14065 tg3_flag_set(tp, 5705_PLUS);
14066
14067 /* Determine TSO capabilities */
14068 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14069 ; /* Do nothing. HW bug. */
14070 else if (tg3_flag(tp, 57765_PLUS))
14071 tg3_flag_set(tp, HW_TSO_3);
14072 else if (tg3_flag(tp, 5755_PLUS) ||
14073 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14074 tg3_flag_set(tp, HW_TSO_2);
14075 else if (tg3_flag(tp, 5750_PLUS)) {
14076 tg3_flag_set(tp, HW_TSO_1);
14077 tg3_flag_set(tp, TSO_BUG);
14078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14079 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14080 tg3_flag_clear(tp, TSO_BUG);
14081 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14082 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14083 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14084 tg3_flag_set(tp, TSO_BUG);
14085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14086 tp->fw_needed = FIRMWARE_TG3TSO5;
14087 else
14088 tp->fw_needed = FIRMWARE_TG3TSO;
14089 }
14090
14091 /* Selectively allow TSO based on operating conditions */
14092 if (tg3_flag(tp, HW_TSO_1) ||
14093 tg3_flag(tp, HW_TSO_2) ||
14094 tg3_flag(tp, HW_TSO_3) ||
14095 tp->fw_needed) {
14096 /* For firmware TSO, assume ASF is disabled.
14097 * We'll disable TSO later if we discover ASF
14098 * is enabled in tg3_get_eeprom_hw_cfg().
14099 */
14100 tg3_flag_set(tp, TSO_CAPABLE);
14101 } else {
14102 tg3_flag_clear(tp, TSO_CAPABLE);
14103 tg3_flag_clear(tp, TSO_BUG);
14104 tp->fw_needed = NULL;
14105 }
14106
14107 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14108 tp->fw_needed = FIRMWARE_TG3;
14109
14110 tp->irq_max = 1;
14111
14112 if (tg3_flag(tp, 5750_PLUS)) {
14113 tg3_flag_set(tp, SUPPORT_MSI);
14114 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14115 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14116 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14117 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14118 tp->pdev_peer == tp->pdev))
14119 tg3_flag_clear(tp, SUPPORT_MSI);
14120
14121 if (tg3_flag(tp, 5755_PLUS) ||
14122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14123 tg3_flag_set(tp, 1SHOT_MSI);
14124 }
14125
14126 if (tg3_flag(tp, 57765_PLUS)) {
14127 tg3_flag_set(tp, SUPPORT_MSIX);
14128 tp->irq_max = TG3_IRQ_MAX_VECS;
14129 tg3_rss_init_dflt_indir_tbl(tp);
14130 }
14131 }
14132
14133 if (tg3_flag(tp, 5755_PLUS))
14134 tg3_flag_set(tp, SHORT_DMA_BUG);
14135
14136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14137 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14138 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14139 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
14140
14141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14142 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14143 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14144 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14145
14146 if (tg3_flag(tp, 57765_PLUS) &&
14147 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14148 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14149
14150 if (!tg3_flag(tp, 5705_PLUS) ||
14151 tg3_flag(tp, 5780_CLASS) ||
14152 tg3_flag(tp, USE_JUMBO_BDFLAG))
14153 tg3_flag_set(tp, JUMBO_CAPABLE);
14154
14155 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14156 &pci_state_reg);
14157
14158 if (pci_is_pcie(tp->pdev)) {
14159 u16 lnkctl;
14160
14161 tg3_flag_set(tp, PCI_EXPRESS);
14162
14163 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14164 int readrq = pcie_get_readrq(tp->pdev);
14165 if (readrq > 2048)
14166 pcie_set_readrq(tp->pdev, 2048);
14167 }
14168
14169 pci_read_config_word(tp->pdev,
14170 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14171 &lnkctl);
14172 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14173 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14174 ASIC_REV_5906) {
14175 tg3_flag_clear(tp, HW_TSO_2);
14176 tg3_flag_clear(tp, TSO_CAPABLE);
14177 }
14178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14180 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14181 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14182 tg3_flag_set(tp, CLKREQ_BUG);
14183 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14184 tg3_flag_set(tp, L1PLLPD_EN);
14185 }
14186 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14187 /* BCM5785 devices are effectively PCIe devices, and should
14188 * follow PCIe codepaths, but do not have a PCIe capabilities
14189 * section.
14190 */
14191 tg3_flag_set(tp, PCI_EXPRESS);
14192 } else if (!tg3_flag(tp, 5705_PLUS) ||
14193 tg3_flag(tp, 5780_CLASS)) {
14194 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14195 if (!tp->pcix_cap) {
14196 dev_err(&tp->pdev->dev,
14197 "Cannot find PCI-X capability, aborting\n");
14198 return -EIO;
14199 }
14200
14201 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14202 tg3_flag_set(tp, PCIX_MODE);
14203 }
14204
14205 /* If we have an AMD 762 or VIA K8T800 chipset, write
14206 * reordering to the mailbox registers done by the host
14207 * controller can cause major troubles. We read back from
14208 * every mailbox register write to force the writes to be
14209 * posted to the chip in order.
14210 */
14211 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14212 !tg3_flag(tp, PCI_EXPRESS))
14213 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14214
14215 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14216 &tp->pci_cacheline_sz);
14217 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14218 &tp->pci_lat_timer);
14219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14220 tp->pci_lat_timer < 64) {
14221 tp->pci_lat_timer = 64;
14222 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14223 tp->pci_lat_timer);
14224 }
14225
14226 /* Important! -- It is critical that the PCI-X hw workaround
14227 * situation is decided before the first MMIO register access.
14228 */
14229 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14230 /* 5700 BX chips need to have their TX producer index
14231 * mailboxes written twice to workaround a bug.
14232 */
14233 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14234
14235 /* If we are in PCI-X mode, enable register write workaround.
14236 *
14237 * The workaround is to use indirect register accesses
14238 * for all chip writes not to mailbox registers.
14239 */
14240 if (tg3_flag(tp, PCIX_MODE)) {
14241 u32 pm_reg;
14242
14243 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14244
14245 /* The chip can have it's power management PCI config
14246 * space registers clobbered due to this bug.
14247 * So explicitly force the chip into D0 here.
14248 */
14249 pci_read_config_dword(tp->pdev,
14250 tp->pm_cap + PCI_PM_CTRL,
14251 &pm_reg);
14252 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14253 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14254 pci_write_config_dword(tp->pdev,
14255 tp->pm_cap + PCI_PM_CTRL,
14256 pm_reg);
14257
14258 /* Also, force SERR#/PERR# in PCI command. */
14259 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14260 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14261 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14262 }
14263 }
14264
14265 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14266 tg3_flag_set(tp, PCI_HIGH_SPEED);
14267 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14268 tg3_flag_set(tp, PCI_32BIT);
14269
14270 /* Chip-specific fixup from Broadcom driver */
14271 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14272 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14273 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14274 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14275 }
14276
14277 /* Default fast path register access methods */
14278 tp->read32 = tg3_read32;
14279 tp->write32 = tg3_write32;
14280 tp->read32_mbox = tg3_read32;
14281 tp->write32_mbox = tg3_write32;
14282 tp->write32_tx_mbox = tg3_write32;
14283 tp->write32_rx_mbox = tg3_write32;
14284
14285 /* Various workaround register access methods */
14286 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14287 tp->write32 = tg3_write_indirect_reg32;
14288 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14289 (tg3_flag(tp, PCI_EXPRESS) &&
14290 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14291 /*
14292 * Back to back register writes can cause problems on these
14293 * chips, the workaround is to read back all reg writes
14294 * except those to mailbox regs.
14295 *
14296 * See tg3_write_indirect_reg32().
14297 */
14298 tp->write32 = tg3_write_flush_reg32;
14299 }
14300
14301 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14302 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14303 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14304 tp->write32_rx_mbox = tg3_write_flush_reg32;
14305 }
14306
14307 if (tg3_flag(tp, ICH_WORKAROUND)) {
14308 tp->read32 = tg3_read_indirect_reg32;
14309 tp->write32 = tg3_write_indirect_reg32;
14310 tp->read32_mbox = tg3_read_indirect_mbox;
14311 tp->write32_mbox = tg3_write_indirect_mbox;
14312 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14313 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14314
14315 iounmap(tp->regs);
14316 tp->regs = NULL;
14317
14318 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14319 pci_cmd &= ~PCI_COMMAND_MEMORY;
14320 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14321 }
14322 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14323 tp->read32_mbox = tg3_read32_mbox_5906;
14324 tp->write32_mbox = tg3_write32_mbox_5906;
14325 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14326 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14327 }
14328
14329 if (tp->write32 == tg3_write_indirect_reg32 ||
14330 (tg3_flag(tp, PCIX_MODE) &&
14331 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14332 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14333 tg3_flag_set(tp, SRAM_USE_CONFIG);
14334
14335 /* The memory arbiter has to be enabled in order for SRAM accesses
14336 * to succeed. Normally on powerup the tg3 chip firmware will make
14337 * sure it is enabled, but other entities such as system netboot
14338 * code might disable it.
14339 */
14340 val = tr32(MEMARB_MODE);
14341 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14342
14343 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14345 tg3_flag(tp, 5780_CLASS)) {
14346 if (tg3_flag(tp, PCIX_MODE)) {
14347 pci_read_config_dword(tp->pdev,
14348 tp->pcix_cap + PCI_X_STATUS,
14349 &val);
14350 tp->pci_fn = val & 0x7;
14351 }
14352 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14353 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14354 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14355 NIC_SRAM_CPMUSTAT_SIG) {
14356 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14357 tp->pci_fn = tp->pci_fn ? 1 : 0;
14358 }
14359 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14360 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14361 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14362 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14363 NIC_SRAM_CPMUSTAT_SIG) {
14364 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14365 TG3_CPMU_STATUS_FSHFT_5719;
14366 }
14367 }
14368
14369 /* Get eeprom hw config before calling tg3_set_power_state().
14370 * In particular, the TG3_FLAG_IS_NIC flag must be
14371 * determined before calling tg3_set_power_state() so that
14372 * we know whether or not to switch out of Vaux power.
14373 * When the flag is set, it means that GPIO1 is used for eeprom
14374 * write protect and also implies that it is a LOM where GPIOs
14375 * are not used to switch power.
14376 */
14377 tg3_get_eeprom_hw_cfg(tp);
14378
14379 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14380 tg3_flag_clear(tp, TSO_CAPABLE);
14381 tg3_flag_clear(tp, TSO_BUG);
14382 tp->fw_needed = NULL;
14383 }
14384
14385 if (tg3_flag(tp, ENABLE_APE)) {
14386 /* Allow reads and writes to the
14387 * APE register and memory space.
14388 */
14389 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14390 PCISTATE_ALLOW_APE_SHMEM_WR |
14391 PCISTATE_ALLOW_APE_PSPACE_WR;
14392 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14393 pci_state_reg);
14394
14395 tg3_ape_lock_init(tp);
14396 }
14397
14398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14400 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14401 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14402 tg3_flag(tp, 57765_PLUS))
14403 tg3_flag_set(tp, CPMU_PRESENT);
14404
14405 /* Set up tp->grc_local_ctrl before calling
14406 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14407 * will bring 5700's external PHY out of reset.
14408 * It is also used as eeprom write protect on LOMs.
14409 */
14410 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14411 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14412 tg3_flag(tp, EEPROM_WRITE_PROT))
14413 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14414 GRC_LCLCTRL_GPIO_OUTPUT1);
14415 /* Unused GPIO3 must be driven as output on 5752 because there
14416 * are no pull-up resistors on unused GPIO pins.
14417 */
14418 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14419 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14420
14421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14422 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14423 tg3_flag(tp, 57765_CLASS))
14424 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14425
14426 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14427 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14428 /* Turn off the debug UART. */
14429 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14430 if (tg3_flag(tp, IS_NIC))
14431 /* Keep VMain power. */
14432 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14433 GRC_LCLCTRL_GPIO_OUTPUT0;
14434 }
14435
14436 /* Switch out of Vaux if it is a NIC */
14437 tg3_pwrsrc_switch_to_vmain(tp);
14438
14439 /* Derive initial jumbo mode from MTU assigned in
14440 * ether_setup() via the alloc_etherdev() call
14441 */
14442 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14443 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14444
14445 /* Determine WakeOnLan speed to use. */
14446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14447 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14448 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14449 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14450 tg3_flag_clear(tp, WOL_SPEED_100MB);
14451 } else {
14452 tg3_flag_set(tp, WOL_SPEED_100MB);
14453 }
14454
14455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14456 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14457
14458 /* A few boards don't want Ethernet@WireSpeed phy feature */
14459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14460 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14461 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14462 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14463 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14464 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14465 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14466
14467 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14468 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14469 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14470 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14471 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14472
14473 if (tg3_flag(tp, 5705_PLUS) &&
14474 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14475 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14476 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14477 !tg3_flag(tp, 57765_PLUS)) {
14478 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14480 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14481 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14482 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14483 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14484 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14485 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14486 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14487 } else
14488 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14489 }
14490
14491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14492 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14493 tp->phy_otp = tg3_read_otp_phycfg(tp);
14494 if (tp->phy_otp == 0)
14495 tp->phy_otp = TG3_OTP_DEFAULT;
14496 }
14497
14498 if (tg3_flag(tp, CPMU_PRESENT))
14499 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14500 else
14501 tp->mi_mode = MAC_MI_MODE_BASE;
14502
14503 tp->coalesce_mode = 0;
14504 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14505 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14506 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14507
14508 /* Set these bits to enable statistics workaround. */
14509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14510 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14511 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14512 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14513 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14514 }
14515
14516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14518 tg3_flag_set(tp, USE_PHYLIB);
14519
14520 err = tg3_mdio_init(tp);
14521 if (err)
14522 return err;
14523
14524 /* Initialize data/descriptor byte/word swapping. */
14525 val = tr32(GRC_MODE);
14526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14527 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14528 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14529 GRC_MODE_B2HRX_ENABLE |
14530 GRC_MODE_HTX2B_ENABLE |
14531 GRC_MODE_HOST_STACKUP);
14532 else
14533 val &= GRC_MODE_HOST_STACKUP;
14534
14535 tw32(GRC_MODE, val | tp->grc_mode);
14536
14537 tg3_switch_clocks(tp);
14538
14539 /* Clear this out for sanity. */
14540 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14541
14542 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14543 &pci_state_reg);
14544 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14545 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14546 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14547
14548 if (chiprevid == CHIPREV_ID_5701_A0 ||
14549 chiprevid == CHIPREV_ID_5701_B0 ||
14550 chiprevid == CHIPREV_ID_5701_B2 ||
14551 chiprevid == CHIPREV_ID_5701_B5) {
14552 void __iomem *sram_base;
14553
14554 /* Write some dummy words into the SRAM status block
14555 * area, see if it reads back correctly. If the return
14556 * value is bad, force enable the PCIX workaround.
14557 */
14558 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14559
14560 writel(0x00000000, sram_base);
14561 writel(0x00000000, sram_base + 4);
14562 writel(0xffffffff, sram_base + 4);
14563 if (readl(sram_base) != 0x00000000)
14564 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14565 }
14566 }
14567
14568 udelay(50);
14569 tg3_nvram_init(tp);
14570
14571 grc_misc_cfg = tr32(GRC_MISC_CFG);
14572 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14573
14574 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14575 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14576 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14577 tg3_flag_set(tp, IS_5788);
14578
14579 if (!tg3_flag(tp, IS_5788) &&
14580 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14581 tg3_flag_set(tp, TAGGED_STATUS);
14582 if (tg3_flag(tp, TAGGED_STATUS)) {
14583 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14584 HOSTCC_MODE_CLRTICK_TXBD);
14585
14586 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14587 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14588 tp->misc_host_ctrl);
14589 }
14590
14591 /* Preserve the APE MAC_MODE bits */
14592 if (tg3_flag(tp, ENABLE_APE))
14593 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14594 else
14595 tp->mac_mode = 0;
14596
14597 /* these are limited to 10/100 only */
14598 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14599 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14600 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14601 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14602 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14603 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14604 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14605 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14606 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14607 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14608 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14609 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14610 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14611 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14612 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14613 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14614
14615 err = tg3_phy_probe(tp);
14616 if (err) {
14617 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14618 /* ... but do not return immediately ... */
14619 tg3_mdio_fini(tp);
14620 }
14621
14622 tg3_read_vpd(tp);
14623 tg3_read_fw_ver(tp);
14624
14625 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14626 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14627 } else {
14628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14629 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14630 else
14631 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14632 }
14633
14634 /* 5700 {AX,BX} chips have a broken status block link
14635 * change bit implementation, so we must use the
14636 * status register in those cases.
14637 */
14638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14639 tg3_flag_set(tp, USE_LINKCHG_REG);
14640 else
14641 tg3_flag_clear(tp, USE_LINKCHG_REG);
14642
14643 /* The led_ctrl is set during tg3_phy_probe, here we might
14644 * have to force the link status polling mechanism based
14645 * upon subsystem IDs.
14646 */
14647 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14648 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14649 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14650 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14651 tg3_flag_set(tp, USE_LINKCHG_REG);
14652 }
14653
14654 /* For all SERDES we poll the MAC status register. */
14655 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14656 tg3_flag_set(tp, POLL_SERDES);
14657 else
14658 tg3_flag_clear(tp, POLL_SERDES);
14659
14660 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14661 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14663 tg3_flag(tp, PCIX_MODE)) {
14664 tp->rx_offset = NET_SKB_PAD;
14665 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14666 tp->rx_copy_thresh = ~(u16)0;
14667 #endif
14668 }
14669
14670 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14671 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14672 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14673
14674 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14675
14676 /* Increment the rx prod index on the rx std ring by at most
14677 * 8 for these chips to workaround hw errata.
14678 */
14679 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14680 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14682 tp->rx_std_max_post = 8;
14683
14684 if (tg3_flag(tp, ASPM_WORKAROUND))
14685 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14686 PCIE_PWR_MGMT_L1_THRESH_MSK;
14687
14688 return err;
14689 }
14690
14691 #ifdef CONFIG_SPARC
tg3_get_macaddr_sparc(struct tg3 * tp)14692 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14693 {
14694 struct net_device *dev = tp->dev;
14695 struct pci_dev *pdev = tp->pdev;
14696 struct device_node *dp = pci_device_to_OF_node(pdev);
14697 const unsigned char *addr;
14698 int len;
14699
14700 addr = of_get_property(dp, "local-mac-address", &len);
14701 if (addr && len == 6) {
14702 memcpy(dev->dev_addr, addr, 6);
14703 memcpy(dev->perm_addr, dev->dev_addr, 6);
14704 return 0;
14705 }
14706 return -ENODEV;
14707 }
14708
tg3_get_default_macaddr_sparc(struct tg3 * tp)14709 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14710 {
14711 struct net_device *dev = tp->dev;
14712
14713 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14714 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14715 return 0;
14716 }
14717 #endif
14718
tg3_get_device_address(struct tg3 * tp)14719 static int __devinit tg3_get_device_address(struct tg3 *tp)
14720 {
14721 struct net_device *dev = tp->dev;
14722 u32 hi, lo, mac_offset;
14723 int addr_ok = 0;
14724
14725 #ifdef CONFIG_SPARC
14726 if (!tg3_get_macaddr_sparc(tp))
14727 return 0;
14728 #endif
14729
14730 mac_offset = 0x7c;
14731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14732 tg3_flag(tp, 5780_CLASS)) {
14733 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14734 mac_offset = 0xcc;
14735 if (tg3_nvram_lock(tp))
14736 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14737 else
14738 tg3_nvram_unlock(tp);
14739 } else if (tg3_flag(tp, 5717_PLUS)) {
14740 if (tp->pci_fn & 1)
14741 mac_offset = 0xcc;
14742 if (tp->pci_fn > 1)
14743 mac_offset += 0x18c;
14744 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14745 mac_offset = 0x10;
14746
14747 /* First try to get it from MAC address mailbox. */
14748 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14749 if ((hi >> 16) == 0x484b) {
14750 dev->dev_addr[0] = (hi >> 8) & 0xff;
14751 dev->dev_addr[1] = (hi >> 0) & 0xff;
14752
14753 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14754 dev->dev_addr[2] = (lo >> 24) & 0xff;
14755 dev->dev_addr[3] = (lo >> 16) & 0xff;
14756 dev->dev_addr[4] = (lo >> 8) & 0xff;
14757 dev->dev_addr[5] = (lo >> 0) & 0xff;
14758
14759 /* Some old bootcode may report a 0 MAC address in SRAM */
14760 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14761 }
14762 if (!addr_ok) {
14763 /* Next, try NVRAM. */
14764 if (!tg3_flag(tp, NO_NVRAM) &&
14765 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14766 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14767 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14768 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14769 }
14770 /* Finally just fetch it out of the MAC control regs. */
14771 else {
14772 hi = tr32(MAC_ADDR_0_HIGH);
14773 lo = tr32(MAC_ADDR_0_LOW);
14774
14775 dev->dev_addr[5] = lo & 0xff;
14776 dev->dev_addr[4] = (lo >> 8) & 0xff;
14777 dev->dev_addr[3] = (lo >> 16) & 0xff;
14778 dev->dev_addr[2] = (lo >> 24) & 0xff;
14779 dev->dev_addr[1] = hi & 0xff;
14780 dev->dev_addr[0] = (hi >> 8) & 0xff;
14781 }
14782 }
14783
14784 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14785 #ifdef CONFIG_SPARC
14786 if (!tg3_get_default_macaddr_sparc(tp))
14787 return 0;
14788 #endif
14789 return -EINVAL;
14790 }
14791 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14792 return 0;
14793 }
14794
14795 #define BOUNDARY_SINGLE_CACHELINE 1
14796 #define BOUNDARY_MULTI_CACHELINE 2
14797
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)14798 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14799 {
14800 int cacheline_size;
14801 u8 byte;
14802 int goal;
14803
14804 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14805 if (byte == 0)
14806 cacheline_size = 1024;
14807 else
14808 cacheline_size = (int) byte * 4;
14809
14810 /* On 5703 and later chips, the boundary bits have no
14811 * effect.
14812 */
14813 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14814 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14815 !tg3_flag(tp, PCI_EXPRESS))
14816 goto out;
14817
14818 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14819 goal = BOUNDARY_MULTI_CACHELINE;
14820 #else
14821 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14822 goal = BOUNDARY_SINGLE_CACHELINE;
14823 #else
14824 goal = 0;
14825 #endif
14826 #endif
14827
14828 if (tg3_flag(tp, 57765_PLUS)) {
14829 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14830 goto out;
14831 }
14832
14833 if (!goal)
14834 goto out;
14835
14836 /* PCI controllers on most RISC systems tend to disconnect
14837 * when a device tries to burst across a cache-line boundary.
14838 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14839 *
14840 * Unfortunately, for PCI-E there are only limited
14841 * write-side controls for this, and thus for reads
14842 * we will still get the disconnects. We'll also waste
14843 * these PCI cycles for both read and write for chips
14844 * other than 5700 and 5701 which do not implement the
14845 * boundary bits.
14846 */
14847 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14848 switch (cacheline_size) {
14849 case 16:
14850 case 32:
14851 case 64:
14852 case 128:
14853 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14854 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14855 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14856 } else {
14857 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14858 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14859 }
14860 break;
14861
14862 case 256:
14863 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14864 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14865 break;
14866
14867 default:
14868 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14869 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14870 break;
14871 }
14872 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14873 switch (cacheline_size) {
14874 case 16:
14875 case 32:
14876 case 64:
14877 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14878 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14879 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14880 break;
14881 }
14882 /* fallthrough */
14883 case 128:
14884 default:
14885 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14886 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14887 break;
14888 }
14889 } else {
14890 switch (cacheline_size) {
14891 case 16:
14892 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14893 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14894 DMA_RWCTRL_WRITE_BNDRY_16);
14895 break;
14896 }
14897 /* fallthrough */
14898 case 32:
14899 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14900 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14901 DMA_RWCTRL_WRITE_BNDRY_32);
14902 break;
14903 }
14904 /* fallthrough */
14905 case 64:
14906 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14907 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14908 DMA_RWCTRL_WRITE_BNDRY_64);
14909 break;
14910 }
14911 /* fallthrough */
14912 case 128:
14913 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14914 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14915 DMA_RWCTRL_WRITE_BNDRY_128);
14916 break;
14917 }
14918 /* fallthrough */
14919 case 256:
14920 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14921 DMA_RWCTRL_WRITE_BNDRY_256);
14922 break;
14923 case 512:
14924 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14925 DMA_RWCTRL_WRITE_BNDRY_512);
14926 break;
14927 case 1024:
14928 default:
14929 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14930 DMA_RWCTRL_WRITE_BNDRY_1024);
14931 break;
14932 }
14933 }
14934
14935 out:
14936 return val;
14937 }
14938
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,int to_device)14939 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14940 {
14941 struct tg3_internal_buffer_desc test_desc;
14942 u32 sram_dma_descs;
14943 int i, ret;
14944
14945 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14946
14947 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14948 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14949 tw32(RDMAC_STATUS, 0);
14950 tw32(WDMAC_STATUS, 0);
14951
14952 tw32(BUFMGR_MODE, 0);
14953 tw32(FTQ_RESET, 0);
14954
14955 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14956 test_desc.addr_lo = buf_dma & 0xffffffff;
14957 test_desc.nic_mbuf = 0x00002100;
14958 test_desc.len = size;
14959
14960 /*
14961 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14962 * the *second* time the tg3 driver was getting loaded after an
14963 * initial scan.
14964 *
14965 * Broadcom tells me:
14966 * ...the DMA engine is connected to the GRC block and a DMA
14967 * reset may affect the GRC block in some unpredictable way...
14968 * The behavior of resets to individual blocks has not been tested.
14969 *
14970 * Broadcom noted the GRC reset will also reset all sub-components.
14971 */
14972 if (to_device) {
14973 test_desc.cqid_sqid = (13 << 8) | 2;
14974
14975 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14976 udelay(40);
14977 } else {
14978 test_desc.cqid_sqid = (16 << 8) | 7;
14979
14980 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14981 udelay(40);
14982 }
14983 test_desc.flags = 0x00000005;
14984
14985 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14986 u32 val;
14987
14988 val = *(((u32 *)&test_desc) + i);
14989 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14990 sram_dma_descs + (i * sizeof(u32)));
14991 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14992 }
14993 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14994
14995 if (to_device)
14996 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14997 else
14998 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14999
15000 ret = -ENODEV;
15001 for (i = 0; i < 40; i++) {
15002 u32 val;
15003
15004 if (to_device)
15005 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15006 else
15007 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15008 if ((val & 0xffff) == sram_dma_descs) {
15009 ret = 0;
15010 break;
15011 }
15012
15013 udelay(100);
15014 }
15015
15016 return ret;
15017 }
15018
15019 #define TEST_BUFFER_SIZE 0x2000
15020
15021 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15022 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15023 { },
15024 };
15025
tg3_test_dma(struct tg3 * tp)15026 static int __devinit tg3_test_dma(struct tg3 *tp)
15027 {
15028 dma_addr_t buf_dma;
15029 u32 *buf, saved_dma_rwctrl;
15030 int ret = 0;
15031
15032 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15033 &buf_dma, GFP_KERNEL);
15034 if (!buf) {
15035 ret = -ENOMEM;
15036 goto out_nofree;
15037 }
15038
15039 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15040 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15041
15042 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15043
15044 if (tg3_flag(tp, 57765_PLUS))
15045 goto out;
15046
15047 if (tg3_flag(tp, PCI_EXPRESS)) {
15048 /* DMA read watermark not used on PCIE */
15049 tp->dma_rwctrl |= 0x00180000;
15050 } else if (!tg3_flag(tp, PCIX_MODE)) {
15051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15053 tp->dma_rwctrl |= 0x003f0000;
15054 else
15055 tp->dma_rwctrl |= 0x003f000f;
15056 } else {
15057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15059 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15060 u32 read_water = 0x7;
15061
15062 /* If the 5704 is behind the EPB bridge, we can
15063 * do the less restrictive ONE_DMA workaround for
15064 * better performance.
15065 */
15066 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15068 tp->dma_rwctrl |= 0x8000;
15069 else if (ccval == 0x6 || ccval == 0x7)
15070 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15071
15072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15073 read_water = 4;
15074 /* Set bit 23 to enable PCIX hw bug fix */
15075 tp->dma_rwctrl |=
15076 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15077 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15078 (1 << 23);
15079 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15080 /* 5780 always in PCIX mode */
15081 tp->dma_rwctrl |= 0x00144000;
15082 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15083 /* 5714 always in PCIX mode */
15084 tp->dma_rwctrl |= 0x00148000;
15085 } else {
15086 tp->dma_rwctrl |= 0x001b000f;
15087 }
15088 }
15089
15090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15092 tp->dma_rwctrl &= 0xfffffff0;
15093
15094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15095 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15096 /* Remove this if it causes problems for some boards. */
15097 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15098
15099 /* On 5700/5701 chips, we need to set this bit.
15100 * Otherwise the chip will issue cacheline transactions
15101 * to streamable DMA memory with not all the byte
15102 * enables turned on. This is an error on several
15103 * RISC PCI controllers, in particular sparc64.
15104 *
15105 * On 5703/5704 chips, this bit has been reassigned
15106 * a different meaning. In particular, it is used
15107 * on those chips to enable a PCI-X workaround.
15108 */
15109 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15110 }
15111
15112 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15113
15114 #if 0
15115 /* Unneeded, already done by tg3_get_invariants. */
15116 tg3_switch_clocks(tp);
15117 #endif
15118
15119 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15120 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15121 goto out;
15122
15123 /* It is best to perform DMA test with maximum write burst size
15124 * to expose the 5700/5701 write DMA bug.
15125 */
15126 saved_dma_rwctrl = tp->dma_rwctrl;
15127 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15128 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15129
15130 while (1) {
15131 u32 *p = buf, i;
15132
15133 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15134 p[i] = i;
15135
15136 /* Send the buffer to the chip. */
15137 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15138 if (ret) {
15139 dev_err(&tp->pdev->dev,
15140 "%s: Buffer write failed. err = %d\n",
15141 __func__, ret);
15142 break;
15143 }
15144
15145 #if 0
15146 /* validate data reached card RAM correctly. */
15147 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15148 u32 val;
15149 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15150 if (le32_to_cpu(val) != p[i]) {
15151 dev_err(&tp->pdev->dev,
15152 "%s: Buffer corrupted on device! "
15153 "(%d != %d)\n", __func__, val, i);
15154 /* ret = -ENODEV here? */
15155 }
15156 p[i] = 0;
15157 }
15158 #endif
15159 /* Now read it back. */
15160 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15161 if (ret) {
15162 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15163 "err = %d\n", __func__, ret);
15164 break;
15165 }
15166
15167 /* Verify it. */
15168 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15169 if (p[i] == i)
15170 continue;
15171
15172 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15173 DMA_RWCTRL_WRITE_BNDRY_16) {
15174 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15175 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15176 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15177 break;
15178 } else {
15179 dev_err(&tp->pdev->dev,
15180 "%s: Buffer corrupted on read back! "
15181 "(%d != %d)\n", __func__, p[i], i);
15182 ret = -ENODEV;
15183 goto out;
15184 }
15185 }
15186
15187 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15188 /* Success. */
15189 ret = 0;
15190 break;
15191 }
15192 }
15193 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15194 DMA_RWCTRL_WRITE_BNDRY_16) {
15195 /* DMA test passed without adjusting DMA boundary,
15196 * now look for chipsets that are known to expose the
15197 * DMA bug without failing the test.
15198 */
15199 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15200 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15201 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15202 } else {
15203 /* Safe to use the calculated DMA boundary. */
15204 tp->dma_rwctrl = saved_dma_rwctrl;
15205 }
15206
15207 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15208 }
15209
15210 out:
15211 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15212 out_nofree:
15213 return ret;
15214 }
15215
tg3_init_bufmgr_config(struct tg3 * tp)15216 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15217 {
15218 if (tg3_flag(tp, 57765_PLUS)) {
15219 tp->bufmgr_config.mbuf_read_dma_low_water =
15220 DEFAULT_MB_RDMA_LOW_WATER_5705;
15221 tp->bufmgr_config.mbuf_mac_rx_low_water =
15222 DEFAULT_MB_MACRX_LOW_WATER_57765;
15223 tp->bufmgr_config.mbuf_high_water =
15224 DEFAULT_MB_HIGH_WATER_57765;
15225
15226 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15227 DEFAULT_MB_RDMA_LOW_WATER_5705;
15228 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15229 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15230 tp->bufmgr_config.mbuf_high_water_jumbo =
15231 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15232 } else if (tg3_flag(tp, 5705_PLUS)) {
15233 tp->bufmgr_config.mbuf_read_dma_low_water =
15234 DEFAULT_MB_RDMA_LOW_WATER_5705;
15235 tp->bufmgr_config.mbuf_mac_rx_low_water =
15236 DEFAULT_MB_MACRX_LOW_WATER_5705;
15237 tp->bufmgr_config.mbuf_high_water =
15238 DEFAULT_MB_HIGH_WATER_5705;
15239 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15240 tp->bufmgr_config.mbuf_mac_rx_low_water =
15241 DEFAULT_MB_MACRX_LOW_WATER_5906;
15242 tp->bufmgr_config.mbuf_high_water =
15243 DEFAULT_MB_HIGH_WATER_5906;
15244 }
15245
15246 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15247 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15248 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15249 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15250 tp->bufmgr_config.mbuf_high_water_jumbo =
15251 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15252 } else {
15253 tp->bufmgr_config.mbuf_read_dma_low_water =
15254 DEFAULT_MB_RDMA_LOW_WATER;
15255 tp->bufmgr_config.mbuf_mac_rx_low_water =
15256 DEFAULT_MB_MACRX_LOW_WATER;
15257 tp->bufmgr_config.mbuf_high_water =
15258 DEFAULT_MB_HIGH_WATER;
15259
15260 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15261 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15262 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15263 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15264 tp->bufmgr_config.mbuf_high_water_jumbo =
15265 DEFAULT_MB_HIGH_WATER_JUMBO;
15266 }
15267
15268 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15269 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15270 }
15271
tg3_phy_string(struct tg3 * tp)15272 static char * __devinit tg3_phy_string(struct tg3 *tp)
15273 {
15274 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15275 case TG3_PHY_ID_BCM5400: return "5400";
15276 case TG3_PHY_ID_BCM5401: return "5401";
15277 case TG3_PHY_ID_BCM5411: return "5411";
15278 case TG3_PHY_ID_BCM5701: return "5701";
15279 case TG3_PHY_ID_BCM5703: return "5703";
15280 case TG3_PHY_ID_BCM5704: return "5704";
15281 case TG3_PHY_ID_BCM5705: return "5705";
15282 case TG3_PHY_ID_BCM5750: return "5750";
15283 case TG3_PHY_ID_BCM5752: return "5752";
15284 case TG3_PHY_ID_BCM5714: return "5714";
15285 case TG3_PHY_ID_BCM5780: return "5780";
15286 case TG3_PHY_ID_BCM5755: return "5755";
15287 case TG3_PHY_ID_BCM5787: return "5787";
15288 case TG3_PHY_ID_BCM5784: return "5784";
15289 case TG3_PHY_ID_BCM5756: return "5722/5756";
15290 case TG3_PHY_ID_BCM5906: return "5906";
15291 case TG3_PHY_ID_BCM5761: return "5761";
15292 case TG3_PHY_ID_BCM5718C: return "5718C";
15293 case TG3_PHY_ID_BCM5718S: return "5718S";
15294 case TG3_PHY_ID_BCM57765: return "57765";
15295 case TG3_PHY_ID_BCM5719C: return "5719C";
15296 case TG3_PHY_ID_BCM5720C: return "5720C";
15297 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15298 case 0: return "serdes";
15299 default: return "unknown";
15300 }
15301 }
15302
tg3_bus_string(struct tg3 * tp,char * str)15303 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15304 {
15305 if (tg3_flag(tp, PCI_EXPRESS)) {
15306 strcpy(str, "PCI Express");
15307 return str;
15308 } else if (tg3_flag(tp, PCIX_MODE)) {
15309 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15310
15311 strcpy(str, "PCIX:");
15312
15313 if ((clock_ctrl == 7) ||
15314 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15315 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15316 strcat(str, "133MHz");
15317 else if (clock_ctrl == 0)
15318 strcat(str, "33MHz");
15319 else if (clock_ctrl == 2)
15320 strcat(str, "50MHz");
15321 else if (clock_ctrl == 4)
15322 strcat(str, "66MHz");
15323 else if (clock_ctrl == 6)
15324 strcat(str, "100MHz");
15325 } else {
15326 strcpy(str, "PCI:");
15327 if (tg3_flag(tp, PCI_HIGH_SPEED))
15328 strcat(str, "66MHz");
15329 else
15330 strcat(str, "33MHz");
15331 }
15332 if (tg3_flag(tp, PCI_32BIT))
15333 strcat(str, ":32-bit");
15334 else
15335 strcat(str, ":64-bit");
15336 return str;
15337 }
15338
tg3_find_peer(struct tg3 * tp)15339 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15340 {
15341 struct pci_dev *peer;
15342 unsigned int func, devnr = tp->pdev->devfn & ~7;
15343
15344 for (func = 0; func < 8; func++) {
15345 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15346 if (peer && peer != tp->pdev)
15347 break;
15348 pci_dev_put(peer);
15349 }
15350 /* 5704 can be configured in single-port mode, set peer to
15351 * tp->pdev in that case.
15352 */
15353 if (!peer) {
15354 peer = tp->pdev;
15355 return peer;
15356 }
15357
15358 /*
15359 * We don't need to keep the refcount elevated; there's no way
15360 * to remove one half of this device without removing the other
15361 */
15362 pci_dev_put(peer);
15363
15364 return peer;
15365 }
15366
tg3_init_coal(struct tg3 * tp)15367 static void __devinit tg3_init_coal(struct tg3 *tp)
15368 {
15369 struct ethtool_coalesce *ec = &tp->coal;
15370
15371 memset(ec, 0, sizeof(*ec));
15372 ec->cmd = ETHTOOL_GCOALESCE;
15373 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15374 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15375 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15376 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15377 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15378 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15379 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15380 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15381 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15382
15383 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15384 HOSTCC_MODE_CLRTICK_TXBD)) {
15385 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15386 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15387 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15388 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15389 }
15390
15391 if (tg3_flag(tp, 5705_PLUS)) {
15392 ec->rx_coalesce_usecs_irq = 0;
15393 ec->tx_coalesce_usecs_irq = 0;
15394 ec->stats_block_coalesce_usecs = 0;
15395 }
15396 }
15397
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)15398 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
15399 struct rtnl_link_stats64 *stats)
15400 {
15401 struct tg3 *tp = netdev_priv(dev);
15402
15403 if (!tp->hw_stats)
15404 return &tp->net_stats_prev;
15405
15406 spin_lock_bh(&tp->lock);
15407 tg3_get_nstats(tp, stats);
15408 spin_unlock_bh(&tp->lock);
15409
15410 return stats;
15411 }
15412
15413 static const struct net_device_ops tg3_netdev_ops = {
15414 .ndo_open = tg3_open,
15415 .ndo_stop = tg3_close,
15416 .ndo_start_xmit = tg3_start_xmit,
15417 .ndo_get_stats64 = tg3_get_stats64,
15418 .ndo_validate_addr = eth_validate_addr,
15419 .ndo_set_rx_mode = tg3_set_rx_mode,
15420 .ndo_set_mac_address = tg3_set_mac_addr,
15421 .ndo_do_ioctl = tg3_ioctl,
15422 .ndo_tx_timeout = tg3_tx_timeout,
15423 .ndo_change_mtu = tg3_change_mtu,
15424 .ndo_fix_features = tg3_fix_features,
15425 .ndo_set_features = tg3_set_features,
15426 #ifdef CONFIG_NET_POLL_CONTROLLER
15427 .ndo_poll_controller = tg3_poll_controller,
15428 #endif
15429 };
15430
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)15431 static int __devinit tg3_init_one(struct pci_dev *pdev,
15432 const struct pci_device_id *ent)
15433 {
15434 struct net_device *dev;
15435 struct tg3 *tp;
15436 int i, err, pm_cap;
15437 u32 sndmbx, rcvmbx, intmbx;
15438 char str[40];
15439 u64 dma_mask, persist_dma_mask;
15440 netdev_features_t features = 0;
15441
15442 printk_once(KERN_INFO "%s\n", version);
15443
15444 err = pci_enable_device(pdev);
15445 if (err) {
15446 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15447 return err;
15448 }
15449
15450 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15451 if (err) {
15452 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15453 goto err_out_disable_pdev;
15454 }
15455
15456 pci_set_master(pdev);
15457
15458 /* Find power-management capability. */
15459 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15460 if (pm_cap == 0) {
15461 dev_err(&pdev->dev,
15462 "Cannot find Power Management capability, aborting\n");
15463 err = -EIO;
15464 goto err_out_free_res;
15465 }
15466
15467 err = pci_set_power_state(pdev, PCI_D0);
15468 if (err) {
15469 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15470 goto err_out_free_res;
15471 }
15472
15473 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15474 if (!dev) {
15475 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15476 err = -ENOMEM;
15477 goto err_out_power_down;
15478 }
15479
15480 SET_NETDEV_DEV(dev, &pdev->dev);
15481
15482 tp = netdev_priv(dev);
15483 tp->pdev = pdev;
15484 tp->dev = dev;
15485 tp->pm_cap = pm_cap;
15486 tp->rx_mode = TG3_DEF_RX_MODE;
15487 tp->tx_mode = TG3_DEF_TX_MODE;
15488
15489 if (tg3_debug > 0)
15490 tp->msg_enable = tg3_debug;
15491 else
15492 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15493
15494 /* The word/byte swap controls here control register access byte
15495 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15496 * setting below.
15497 */
15498 tp->misc_host_ctrl =
15499 MISC_HOST_CTRL_MASK_PCI_INT |
15500 MISC_HOST_CTRL_WORD_SWAP |
15501 MISC_HOST_CTRL_INDIR_ACCESS |
15502 MISC_HOST_CTRL_PCISTATE_RW;
15503
15504 /* The NONFRM (non-frame) byte/word swap controls take effect
15505 * on descriptor entries, anything which isn't packet data.
15506 *
15507 * The StrongARM chips on the board (one for tx, one for rx)
15508 * are running in big-endian mode.
15509 */
15510 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15511 GRC_MODE_WSWAP_NONFRM_DATA);
15512 #ifdef __BIG_ENDIAN
15513 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15514 #endif
15515 spin_lock_init(&tp->lock);
15516 spin_lock_init(&tp->indirect_lock);
15517 INIT_WORK(&tp->reset_task, tg3_reset_task);
15518
15519 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15520 if (!tp->regs) {
15521 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15522 err = -ENOMEM;
15523 goto err_out_free_dev;
15524 }
15525
15526 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15527 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15528 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15529 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15530 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15531 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15532 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15533 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15534 tg3_flag_set(tp, ENABLE_APE);
15535 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15536 if (!tp->aperegs) {
15537 dev_err(&pdev->dev,
15538 "Cannot map APE registers, aborting\n");
15539 err = -ENOMEM;
15540 goto err_out_iounmap;
15541 }
15542 }
15543
15544 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15545 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15546
15547 dev->ethtool_ops = &tg3_ethtool_ops;
15548 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15549 dev->netdev_ops = &tg3_netdev_ops;
15550 dev->irq = pdev->irq;
15551
15552 err = tg3_get_invariants(tp);
15553 if (err) {
15554 dev_err(&pdev->dev,
15555 "Problem fetching invariants of chip, aborting\n");
15556 goto err_out_apeunmap;
15557 }
15558
15559 /* The EPB bridge inside 5714, 5715, and 5780 and any
15560 * device behind the EPB cannot support DMA addresses > 40-bit.
15561 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15562 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15563 * do DMA address check in tg3_start_xmit().
15564 */
15565 if (tg3_flag(tp, IS_5788))
15566 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15567 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15568 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15569 #ifdef CONFIG_HIGHMEM
15570 dma_mask = DMA_BIT_MASK(64);
15571 #endif
15572 } else
15573 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15574
15575 /* Configure DMA attributes. */
15576 if (dma_mask > DMA_BIT_MASK(32)) {
15577 err = pci_set_dma_mask(pdev, dma_mask);
15578 if (!err) {
15579 features |= NETIF_F_HIGHDMA;
15580 err = pci_set_consistent_dma_mask(pdev,
15581 persist_dma_mask);
15582 if (err < 0) {
15583 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15584 "DMA for consistent allocations\n");
15585 goto err_out_apeunmap;
15586 }
15587 }
15588 }
15589 if (err || dma_mask == DMA_BIT_MASK(32)) {
15590 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15591 if (err) {
15592 dev_err(&pdev->dev,
15593 "No usable DMA configuration, aborting\n");
15594 goto err_out_apeunmap;
15595 }
15596 }
15597
15598 tg3_init_bufmgr_config(tp);
15599
15600 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15601
15602 /* 5700 B0 chips do not support checksumming correctly due
15603 * to hardware bugs.
15604 */
15605 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15606 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15607
15608 if (tg3_flag(tp, 5755_PLUS))
15609 features |= NETIF_F_IPV6_CSUM;
15610 }
15611
15612 /* TSO is on by default on chips that support hardware TSO.
15613 * Firmware TSO on older chips gives lower performance, so it
15614 * is off by default, but can be enabled using ethtool.
15615 */
15616 if ((tg3_flag(tp, HW_TSO_1) ||
15617 tg3_flag(tp, HW_TSO_2) ||
15618 tg3_flag(tp, HW_TSO_3)) &&
15619 (features & NETIF_F_IP_CSUM))
15620 features |= NETIF_F_TSO;
15621 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15622 if (features & NETIF_F_IPV6_CSUM)
15623 features |= NETIF_F_TSO6;
15624 if (tg3_flag(tp, HW_TSO_3) ||
15625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15626 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15627 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15628 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15629 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15630 features |= NETIF_F_TSO_ECN;
15631 }
15632
15633 dev->features |= features;
15634 dev->vlan_features |= features;
15635
15636 /*
15637 * Add loopback capability only for a subset of devices that support
15638 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15639 * loopback for the remaining devices.
15640 */
15641 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15642 !tg3_flag(tp, CPMU_PRESENT))
15643 /* Add the loopback capability */
15644 features |= NETIF_F_LOOPBACK;
15645
15646 dev->hw_features |= features;
15647
15648 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15649 !tg3_flag(tp, TSO_CAPABLE) &&
15650 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15651 tg3_flag_set(tp, MAX_RXPEND_64);
15652 tp->rx_pending = 63;
15653 }
15654
15655 err = tg3_get_device_address(tp);
15656 if (err) {
15657 dev_err(&pdev->dev,
15658 "Could not obtain valid ethernet address, aborting\n");
15659 goto err_out_apeunmap;
15660 }
15661
15662 /*
15663 * Reset chip in case UNDI or EFI driver did not shutdown
15664 * DMA self test will enable WDMAC and we'll see (spurious)
15665 * pending DMA on the PCI bus at that point.
15666 */
15667 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15668 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15669 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15670 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15671 }
15672
15673 err = tg3_test_dma(tp);
15674 if (err) {
15675 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15676 goto err_out_apeunmap;
15677 }
15678
15679 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15680 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15681 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15682 for (i = 0; i < tp->irq_max; i++) {
15683 struct tg3_napi *tnapi = &tp->napi[i];
15684
15685 tnapi->tp = tp;
15686 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15687
15688 tnapi->int_mbox = intmbx;
15689 if (i <= 4)
15690 intmbx += 0x8;
15691 else
15692 intmbx += 0x4;
15693
15694 tnapi->consmbox = rcvmbx;
15695 tnapi->prodmbox = sndmbx;
15696
15697 if (i)
15698 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15699 else
15700 tnapi->coal_now = HOSTCC_MODE_NOW;
15701
15702 if (!tg3_flag(tp, SUPPORT_MSIX))
15703 break;
15704
15705 /*
15706 * If we support MSIX, we'll be using RSS. If we're using
15707 * RSS, the first vector only handles link interrupts and the
15708 * remaining vectors handle rx and tx interrupts. Reuse the
15709 * mailbox values for the next iteration. The values we setup
15710 * above are still useful for the single vectored mode.
15711 */
15712 if (!i)
15713 continue;
15714
15715 rcvmbx += 0x8;
15716
15717 if (sndmbx & 0x4)
15718 sndmbx -= 0x4;
15719 else
15720 sndmbx += 0xc;
15721 }
15722
15723 tg3_init_coal(tp);
15724
15725 pci_set_drvdata(pdev, dev);
15726
15727 if (tg3_flag(tp, 5717_PLUS)) {
15728 /* Resume a low-power mode */
15729 tg3_frob_aux_power(tp, false);
15730 }
15731
15732 err = register_netdev(dev);
15733 if (err) {
15734 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15735 goto err_out_apeunmap;
15736 }
15737
15738 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15739 tp->board_part_number,
15740 tp->pci_chip_rev_id,
15741 tg3_bus_string(tp, str),
15742 dev->dev_addr);
15743
15744 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15745 struct phy_device *phydev;
15746 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15747 netdev_info(dev,
15748 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15749 phydev->drv->name, dev_name(&phydev->dev));
15750 } else {
15751 char *ethtype;
15752
15753 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15754 ethtype = "10/100Base-TX";
15755 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15756 ethtype = "1000Base-SX";
15757 else
15758 ethtype = "10/100/1000Base-T";
15759
15760 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15761 "(WireSpeed[%d], EEE[%d])\n",
15762 tg3_phy_string(tp), ethtype,
15763 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15764 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15765 }
15766
15767 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15768 (dev->features & NETIF_F_RXCSUM) != 0,
15769 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15770 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15771 tg3_flag(tp, ENABLE_ASF) != 0,
15772 tg3_flag(tp, TSO_CAPABLE) != 0);
15773 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15774 tp->dma_rwctrl,
15775 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15776 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15777
15778 pci_save_state(pdev);
15779
15780 return 0;
15781
15782 err_out_apeunmap:
15783 if (tp->aperegs) {
15784 iounmap(tp->aperegs);
15785 tp->aperegs = NULL;
15786 }
15787
15788 err_out_iounmap:
15789 if (tp->regs) {
15790 iounmap(tp->regs);
15791 tp->regs = NULL;
15792 }
15793
15794 err_out_free_dev:
15795 free_netdev(dev);
15796
15797 err_out_power_down:
15798 pci_set_power_state(pdev, PCI_D3hot);
15799
15800 err_out_free_res:
15801 pci_release_regions(pdev);
15802
15803 err_out_disable_pdev:
15804 pci_disable_device(pdev);
15805 pci_set_drvdata(pdev, NULL);
15806 return err;
15807 }
15808
tg3_remove_one(struct pci_dev * pdev)15809 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15810 {
15811 struct net_device *dev = pci_get_drvdata(pdev);
15812
15813 if (dev) {
15814 struct tg3 *tp = netdev_priv(dev);
15815
15816 if (tp->fw)
15817 release_firmware(tp->fw);
15818
15819 tg3_reset_task_cancel(tp);
15820
15821 if (tg3_flag(tp, USE_PHYLIB)) {
15822 tg3_phy_fini(tp);
15823 tg3_mdio_fini(tp);
15824 }
15825
15826 unregister_netdev(dev);
15827 if (tp->aperegs) {
15828 iounmap(tp->aperegs);
15829 tp->aperegs = NULL;
15830 }
15831 if (tp->regs) {
15832 iounmap(tp->regs);
15833 tp->regs = NULL;
15834 }
15835 free_netdev(dev);
15836 pci_release_regions(pdev);
15837 pci_disable_device(pdev);
15838 pci_set_drvdata(pdev, NULL);
15839 }
15840 }
15841
15842 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)15843 static int tg3_suspend(struct device *device)
15844 {
15845 struct pci_dev *pdev = to_pci_dev(device);
15846 struct net_device *dev = pci_get_drvdata(pdev);
15847 struct tg3 *tp = netdev_priv(dev);
15848 int err;
15849
15850 if (!netif_running(dev))
15851 return 0;
15852
15853 tg3_reset_task_cancel(tp);
15854 tg3_phy_stop(tp);
15855 tg3_netif_stop(tp);
15856
15857 del_timer_sync(&tp->timer);
15858
15859 tg3_full_lock(tp, 1);
15860 tg3_disable_ints(tp);
15861 tg3_full_unlock(tp);
15862
15863 netif_device_detach(dev);
15864
15865 tg3_full_lock(tp, 0);
15866 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15867 tg3_flag_clear(tp, INIT_COMPLETE);
15868 tg3_full_unlock(tp);
15869
15870 err = tg3_power_down_prepare(tp);
15871 if (err) {
15872 int err2;
15873
15874 tg3_full_lock(tp, 0);
15875
15876 tg3_flag_set(tp, INIT_COMPLETE);
15877 err2 = tg3_restart_hw(tp, 1);
15878 if (err2)
15879 goto out;
15880
15881 tp->timer.expires = jiffies + tp->timer_offset;
15882 add_timer(&tp->timer);
15883
15884 netif_device_attach(dev);
15885 tg3_netif_start(tp);
15886
15887 out:
15888 tg3_full_unlock(tp);
15889
15890 if (!err2)
15891 tg3_phy_start(tp);
15892 }
15893
15894 return err;
15895 }
15896
tg3_resume(struct device * device)15897 static int tg3_resume(struct device *device)
15898 {
15899 struct pci_dev *pdev = to_pci_dev(device);
15900 struct net_device *dev = pci_get_drvdata(pdev);
15901 struct tg3 *tp = netdev_priv(dev);
15902 int err;
15903
15904 if (!netif_running(dev))
15905 return 0;
15906
15907 netif_device_attach(dev);
15908
15909 tg3_full_lock(tp, 0);
15910
15911 tg3_flag_set(tp, INIT_COMPLETE);
15912 err = tg3_restart_hw(tp, 1);
15913 if (err)
15914 goto out;
15915
15916 tp->timer.expires = jiffies + tp->timer_offset;
15917 add_timer(&tp->timer);
15918
15919 tg3_netif_start(tp);
15920
15921 out:
15922 tg3_full_unlock(tp);
15923
15924 if (!err)
15925 tg3_phy_start(tp);
15926
15927 return err;
15928 }
15929
15930 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15931 #define TG3_PM_OPS (&tg3_pm_ops)
15932
15933 #else
15934
15935 #define TG3_PM_OPS NULL
15936
15937 #endif /* CONFIG_PM_SLEEP */
15938
15939 /**
15940 * tg3_io_error_detected - called when PCI error is detected
15941 * @pdev: Pointer to PCI device
15942 * @state: The current pci connection state
15943 *
15944 * This function is called after a PCI bus error affecting
15945 * this device has been detected.
15946 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)15947 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15948 pci_channel_state_t state)
15949 {
15950 struct net_device *netdev = pci_get_drvdata(pdev);
15951 struct tg3 *tp = netdev_priv(netdev);
15952 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15953
15954 netdev_info(netdev, "PCI I/O error detected\n");
15955
15956 rtnl_lock();
15957
15958 if (!netif_running(netdev))
15959 goto done;
15960
15961 tg3_phy_stop(tp);
15962
15963 tg3_netif_stop(tp);
15964
15965 del_timer_sync(&tp->timer);
15966
15967 /* Want to make sure that the reset task doesn't run */
15968 tg3_reset_task_cancel(tp);
15969 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15970
15971 netif_device_detach(netdev);
15972
15973 /* Clean up software state, even if MMIO is blocked */
15974 tg3_full_lock(tp, 0);
15975 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15976 tg3_full_unlock(tp);
15977
15978 done:
15979 if (state == pci_channel_io_perm_failure)
15980 err = PCI_ERS_RESULT_DISCONNECT;
15981 else
15982 pci_disable_device(pdev);
15983
15984 rtnl_unlock();
15985
15986 return err;
15987 }
15988
15989 /**
15990 * tg3_io_slot_reset - called after the pci bus has been reset.
15991 * @pdev: Pointer to PCI device
15992 *
15993 * Restart the card from scratch, as if from a cold-boot.
15994 * At this point, the card has exprienced a hard reset,
15995 * followed by fixups by BIOS, and has its config space
15996 * set up identically to what it was at cold boot.
15997 */
tg3_io_slot_reset(struct pci_dev * pdev)15998 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15999 {
16000 struct net_device *netdev = pci_get_drvdata(pdev);
16001 struct tg3 *tp = netdev_priv(netdev);
16002 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16003 int err;
16004
16005 rtnl_lock();
16006
16007 if (pci_enable_device(pdev)) {
16008 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16009 goto done;
16010 }
16011
16012 pci_set_master(pdev);
16013 pci_restore_state(pdev);
16014 pci_save_state(pdev);
16015
16016 if (!netif_running(netdev)) {
16017 rc = PCI_ERS_RESULT_RECOVERED;
16018 goto done;
16019 }
16020
16021 err = tg3_power_up(tp);
16022 if (err)
16023 goto done;
16024
16025 rc = PCI_ERS_RESULT_RECOVERED;
16026
16027 done:
16028 rtnl_unlock();
16029
16030 return rc;
16031 }
16032
16033 /**
16034 * tg3_io_resume - called when traffic can start flowing again.
16035 * @pdev: Pointer to PCI device
16036 *
16037 * This callback is called when the error recovery driver tells
16038 * us that its OK to resume normal operation.
16039 */
tg3_io_resume(struct pci_dev * pdev)16040 static void tg3_io_resume(struct pci_dev *pdev)
16041 {
16042 struct net_device *netdev = pci_get_drvdata(pdev);
16043 struct tg3 *tp = netdev_priv(netdev);
16044 int err;
16045
16046 rtnl_lock();
16047
16048 if (!netif_running(netdev))
16049 goto done;
16050
16051 tg3_full_lock(tp, 0);
16052 tg3_flag_set(tp, INIT_COMPLETE);
16053 err = tg3_restart_hw(tp, 1);
16054 tg3_full_unlock(tp);
16055 if (err) {
16056 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16057 goto done;
16058 }
16059
16060 netif_device_attach(netdev);
16061
16062 tp->timer.expires = jiffies + tp->timer_offset;
16063 add_timer(&tp->timer);
16064
16065 tg3_netif_start(tp);
16066
16067 tg3_phy_start(tp);
16068
16069 done:
16070 rtnl_unlock();
16071 }
16072
16073 static struct pci_error_handlers tg3_err_handler = {
16074 .error_detected = tg3_io_error_detected,
16075 .slot_reset = tg3_io_slot_reset,
16076 .resume = tg3_io_resume
16077 };
16078
16079 static struct pci_driver tg3_driver = {
16080 .name = DRV_MODULE_NAME,
16081 .id_table = tg3_pci_tbl,
16082 .probe = tg3_init_one,
16083 .remove = __devexit_p(tg3_remove_one),
16084 .err_handler = &tg3_err_handler,
16085 .driver.pm = TG3_PM_OPS,
16086 };
16087
tg3_init(void)16088 static int __init tg3_init(void)
16089 {
16090 return pci_register_driver(&tg3_driver);
16091 }
16092
tg3_cleanup(void)16093 static void __exit tg3_cleanup(void)
16094 {
16095 pci_unregister_driver(&tg3_driver);
16096 }
16097
16098 module_init(tg3_init);
16099 module_exit(tg3_cleanup);
16100