1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
11 *
12 * Firmware is:
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
18 *
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
22 */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/gso.h>
61 #include <net/ip.h>
62
63 #include <linux/io.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
66
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
69
70 #define BAR_0 0
71 #define BAR_2 2
72
73 #include "tg3.h"
74
75 /* Functions & macros to verify TG3_FLAGS types */
76
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 return test_bit(flag, bits);
80 }
81
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84 set_bit(flag, bits);
85 }
86
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89 clear_bit(flag, bits);
90 }
91
92 #define tg3_flag(tp, flag) \
93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag) \
95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag) \
97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98
99 #define DRV_MODULE_NAME "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM 3
102 #define TG3_MIN_NUM 137
103
104 #define RESET_KIND_SHUTDOWN 0
105 #define RESET_KIND_INIT 1
106 #define RESET_KIND_SUSPEND 2
107
108 #define TG3_DEF_RX_MODE 0
109 #define TG3_DEF_TX_MODE 0
110 #define TG3_DEF_MSG_ENABLE \
111 (NETIF_MSG_DRV | \
112 NETIF_MSG_PROBE | \
113 NETIF_MSG_LINK | \
114 NETIF_MSG_TIMER | \
115 NETIF_MSG_IFDOWN | \
116 NETIF_MSG_IFUP | \
117 NETIF_MSG_RX_ERR | \
118 NETIF_MSG_TX_ERR)
119
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121
122 /* length of time before we decide the hardware is borked,
123 * and dev->tx_timeout() should be called to fix the problem
124 */
125
126 #define TG3_TX_TIMEOUT (5 * HZ)
127
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134 * You can't change the ring sizes, but you can change where you place
135 * them in the NIC onboard memory.
136 */
137 #define TG3_RX_STD_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING 200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145
146 /* Do not place this n-ring entries value into the tp struct itself,
147 * we really want to expose these constants to GCC so that modulo et
148 * al. operations are done with shifts and masks instead of with
149 * hw multiply/modulo instructions. Another solution would be to
150 * replace things like '% foo' with '& (foo - 1)'.
151 */
152
153 #define TG3_TX_RING_SIZE 512
154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155
156 #define TG3_RX_STD_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 TG3_TX_RING_SIZE)
164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165
166 #define TG3_DMA_BYTE_ENAB 64
167
168 #define TG3_RX_STD_DMA_SZ 1536
169 #define TG3_RX_JMB_DMA_SZ 9046
170
171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172
173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183 * that are at least dword aligned when used in PCIX mode. The driver
184 * works around this bug by double copying the packet. This workaround
185 * is built into the normal double copy length check for efficiency.
186 *
187 * However, the double copy is only necessary on those architectures
188 * where unaligned memory accesses are inefficient. For those architectures
189 * where unaligned memory accesses incur little penalty, we can reintegrate
190 * the 5701 in the normal rx path. Doing so saves a device structure
191 * dereference by hardcoding the double copy threshold in place.
192 */
193 #define TG3_RX_COPY_THRESHOLD 256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #else
197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #endif
199
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #else
203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 #endif
205
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K 2048
209 #define TG3_TX_BD_DMA_MAX_4K 4096
210
211 #define TG3_RAW_IP_ALIGN 2
212
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215
216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218
219 #define FIRMWARE_TG3 "tigon/tg3.bin"
220 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
223
224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231
232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
238
239 static const struct pci_device_id tg3_pci_tbl[] = {
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289 PCI_VENDOR_ID_LENOVO,
290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
355 {}
356 };
357
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
359
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
363 { "rx_octets" },
364 { "rx_fragments" },
365 { "rx_ucast_packets" },
366 { "rx_mcast_packets" },
367 { "rx_bcast_packets" },
368 { "rx_fcs_errors" },
369 { "rx_align_errors" },
370 { "rx_xon_pause_rcvd" },
371 { "rx_xoff_pause_rcvd" },
372 { "rx_mac_ctrl_rcvd" },
373 { "rx_xoff_entered" },
374 { "rx_frame_too_long_errors" },
375 { "rx_jabbers" },
376 { "rx_undersize_packets" },
377 { "rx_in_length_errors" },
378 { "rx_out_length_errors" },
379 { "rx_64_or_less_octet_packets" },
380 { "rx_65_to_127_octet_packets" },
381 { "rx_128_to_255_octet_packets" },
382 { "rx_256_to_511_octet_packets" },
383 { "rx_512_to_1023_octet_packets" },
384 { "rx_1024_to_1522_octet_packets" },
385 { "rx_1523_to_2047_octet_packets" },
386 { "rx_2048_to_4095_octet_packets" },
387 { "rx_4096_to_8191_octet_packets" },
388 { "rx_8192_to_9022_octet_packets" },
389
390 { "tx_octets" },
391 { "tx_collisions" },
392
393 { "tx_xon_sent" },
394 { "tx_xoff_sent" },
395 { "tx_flow_control" },
396 { "tx_mac_errors" },
397 { "tx_single_collisions" },
398 { "tx_mult_collisions" },
399 { "tx_deferred" },
400 { "tx_excessive_collisions" },
401 { "tx_late_collisions" },
402 { "tx_collide_2times" },
403 { "tx_collide_3times" },
404 { "tx_collide_4times" },
405 { "tx_collide_5times" },
406 { "tx_collide_6times" },
407 { "tx_collide_7times" },
408 { "tx_collide_8times" },
409 { "tx_collide_9times" },
410 { "tx_collide_10times" },
411 { "tx_collide_11times" },
412 { "tx_collide_12times" },
413 { "tx_collide_13times" },
414 { "tx_collide_14times" },
415 { "tx_collide_15times" },
416 { "tx_ucast_packets" },
417 { "tx_mcast_packets" },
418 { "tx_bcast_packets" },
419 { "tx_carrier_sense_errors" },
420 { "tx_discards" },
421 { "tx_errors" },
422
423 { "dma_writeq_full" },
424 { "dma_write_prioq_full" },
425 { "rxbds_empty" },
426 { "rx_discards" },
427 { "rx_errors" },
428 { "rx_threshold_hit" },
429
430 { "dma_readq_full" },
431 { "dma_read_prioq_full" },
432 { "tx_comp_queue_full" },
433
434 { "ring_set_send_prod_index" },
435 { "ring_status_update" },
436 { "nic_irqs" },
437 { "nic_avoided_irqs" },
438 { "nic_tx_threshold_hit" },
439
440 { "mbuf_lwm_thresh_hit" },
441 };
442
443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST 0
445 #define TG3_LINK_TEST 1
446 #define TG3_REGISTER_TEST 2
447 #define TG3_MEMORY_TEST 3
448 #define TG3_MAC_LOOPB_TEST 4
449 #define TG3_PHY_LOOPB_TEST 5
450 #define TG3_EXT_LOOPB_TEST 6
451 #define TG3_INTERRUPT_TEST 7
452
453
454 static const struct {
455 const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457 [TG3_NVRAM_TEST] = { "nvram test (online) " },
458 [TG3_LINK_TEST] = { "link test (online) " },
459 [TG3_REGISTER_TEST] = { "register test (offline)" },
460 [TG3_MEMORY_TEST] = { "memory test (offline)" },
461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
465 };
466
467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468
469
tg3_write32(struct tg3 * tp,u32 off,u32 val)470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
471 {
472 writel(val, tp->regs + off);
473 }
474
tg3_read32(struct tg3 * tp,u32 off)475 static u32 tg3_read32(struct tg3 *tp, u32 off)
476 {
477 return readl(tp->regs + off);
478 }
479
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
481 {
482 writel(val, tp->aperegs + off);
483 }
484
tg3_ape_read32(struct tg3 * tp,u32 off)485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
486 {
487 return readl(tp->aperegs + off);
488 }
489
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
491 {
492 unsigned long flags;
493
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 }
499
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
501 {
502 writel(val, tp->regs + off);
503 readl(tp->regs + off);
504 }
505
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
507 {
508 unsigned long flags;
509 u32 val;
510
511 spin_lock_irqsave(&tp->indirect_lock, flags);
512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 return val;
516 }
517
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
519 {
520 unsigned long flags;
521
522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524 TG3_64BIT_REG_LOW, val);
525 return;
526 }
527 if (off == TG3_RX_STD_PROD_IDX_REG) {
528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529 TG3_64BIT_REG_LOW, val);
530 return;
531 }
532
533 spin_lock_irqsave(&tp->indirect_lock, flags);
534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
537
538 /* In indirect mode when disabling interrupts, we also need
539 * to clear the interrupt bit in the GRC local ctrl register.
540 */
541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
542 (val == 0x1)) {
543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
545 }
546 }
547
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
549 {
550 unsigned long flags;
551 u32 val;
552
553 spin_lock_irqsave(&tp->indirect_lock, flags);
554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556 spin_unlock_irqrestore(&tp->indirect_lock, flags);
557 return val;
558 }
559
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561 * where it is unsafe to read back the register without some delay.
562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
564 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
566 {
567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568 /* Non-posted methods */
569 tp->write32(tp, off, val);
570 else {
571 /* Posted method */
572 tg3_write32(tp, off, val);
573 if (usec_wait)
574 udelay(usec_wait);
575 tp->read32(tp, off);
576 }
577 /* Wait again after the read for the posted method to guarantee that
578 * the wait time is met.
579 */
580 if (usec_wait)
581 udelay(usec_wait);
582 }
583
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
585 {
586 tp->write32_mbox(tp, off, val);
587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589 !tg3_flag(tp, ICH_WORKAROUND)))
590 tp->read32_mbox(tp, off);
591 }
592
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
594 {
595 void __iomem *mbox = tp->regs + off;
596 writel(val, mbox);
597 if (tg3_flag(tp, TXD_MBOX_HWBUG))
598 writel(val, mbox);
599 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600 tg3_flag(tp, FLUSH_POSTED_WRITES))
601 readl(mbox);
602 }
603
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
605 {
606 return readl(tp->regs + off + GRCMBOX_BASE);
607 }
608
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
610 {
611 writel(val, tp->regs + off + GRCMBOX_BASE);
612 }
613
614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
619
620 #define tw32(reg, val) tp->write32(tp, reg, val)
621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg) tp->read32(tp, reg)
624
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
626 {
627 unsigned long flags;
628
629 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 return;
632
633 spin_lock_irqsave(&tp->indirect_lock, flags);
634 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
637
638 /* Always leave this as zero. */
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 } else {
641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642 tw32_f(TG3PCI_MEM_WIN_DATA, val);
643
644 /* Always leave this as zero. */
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
646 }
647 spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 }
649
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
651 {
652 unsigned long flags;
653
654 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
656 *val = 0;
657 return;
658 }
659
660 spin_lock_irqsave(&tp->indirect_lock, flags);
661 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
664
665 /* Always leave this as zero. */
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 } else {
668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669 *val = tr32(TG3PCI_MEM_WIN_DATA);
670
671 /* Always leave this as zero. */
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
673 }
674 spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 }
676
tg3_ape_lock_init(struct tg3 * tp)677 static void tg3_ape_lock_init(struct tg3 *tp)
678 {
679 int i;
680 u32 regbase, bit;
681
682 if (tg3_asic_rev(tp) == ASIC_REV_5761)
683 regbase = TG3_APE_LOCK_GRANT;
684 else
685 regbase = TG3_APE_PER_LOCK_GRANT;
686
687 /* Make sure the driver hasn't any stale locks. */
688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
689 switch (i) {
690 case TG3_APE_LOCK_PHY0:
691 case TG3_APE_LOCK_PHY1:
692 case TG3_APE_LOCK_PHY2:
693 case TG3_APE_LOCK_PHY3:
694 bit = APE_LOCK_GRANT_DRIVER;
695 break;
696 default:
697 if (!tp->pci_fn)
698 bit = APE_LOCK_GRANT_DRIVER;
699 else
700 bit = 1 << tp->pci_fn;
701 }
702 tg3_ape_write32(tp, regbase + 4 * i, bit);
703 }
704
705 }
706
tg3_ape_lock(struct tg3 * tp,int locknum)707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
708 {
709 int i, off;
710 int ret = 0;
711 u32 status, req, gnt, bit;
712
713 if (!tg3_flag(tp, ENABLE_APE))
714 return 0;
715
716 switch (locknum) {
717 case TG3_APE_LOCK_GPIO:
718 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719 return 0;
720 fallthrough;
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
723 if (!tp->pci_fn)
724 bit = APE_LOCK_REQ_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727 break;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
741 } else {
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
744 }
745
746 off = 4 * locknum;
747
748 tg3_ape_write32(tp, req + off, bit);
749
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
753 if (status == bit)
754 break;
755 if (pci_channel_offline(tp->pdev))
756 break;
757
758 udelay(10);
759 }
760
761 if (status != bit) {
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
764 ret = -EBUSY;
765 }
766
767 return ret;
768 }
769
tg3_ape_unlock(struct tg3 * tp,int locknum)770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 u32 gnt, bit;
773
774 if (!tg3_flag(tp, ENABLE_APE))
775 return;
776
777 switch (locknum) {
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 return;
781 fallthrough;
782 case TG3_APE_LOCK_GRC:
783 case TG3_APE_LOCK_MEM:
784 if (!tp->pci_fn)
785 bit = APE_LOCK_GRANT_DRIVER;
786 else
787 bit = 1 << tp->pci_fn;
788 break;
789 case TG3_APE_LOCK_PHY0:
790 case TG3_APE_LOCK_PHY1:
791 case TG3_APE_LOCK_PHY2:
792 case TG3_APE_LOCK_PHY3:
793 bit = APE_LOCK_GRANT_DRIVER;
794 break;
795 default:
796 return;
797 }
798
799 if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 gnt = TG3_APE_LOCK_GRANT;
801 else
802 gnt = TG3_APE_PER_LOCK_GRANT;
803
804 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809 u32 apedata;
810
811 while (timeout_us) {
812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 return -EBUSY;
814
815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 break;
818
819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821 udelay(10);
822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 }
824
825 return timeout_us ? 0 : -EBUSY;
826 }
827
828 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831 u32 i, apedata;
832
833 for (i = 0; i < timeout_us / 10; i++) {
834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835
836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837 break;
838
839 udelay(10);
840 }
841
842 return i == timeout_us / 10;
843 }
844
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846 u32 len)
847 {
848 int err;
849 u32 i, bufoff, msgoff, maxlen, apedata;
850
851 if (!tg3_flag(tp, APE_HAS_NCSI))
852 return 0;
853
854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 if (apedata != APE_SEG_SIG_MAGIC)
856 return -ENODEV;
857
858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 if (!(apedata & APE_FW_STATUS_READY))
860 return -EAGAIN;
861
862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863 TG3_APE_SHMEM_BASE;
864 msgoff = bufoff + 2 * sizeof(u32);
865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866
867 while (len) {
868 u32 length;
869
870 /* Cap xfer sizes to scratchpad limits. */
871 length = (len > maxlen) ? maxlen : len;
872 len -= length;
873
874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 if (!(apedata & APE_FW_STATUS_READY))
876 return -EAGAIN;
877
878 /* Wait for up to 1 msec for APE to service previous event. */
879 err = tg3_ape_event_lock(tp, 1000);
880 if (err)
881 return err;
882
883 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 APE_EVENT_STATUS_SCRTCHPD_READ |
885 APE_EVENT_STATUS_EVENT_PENDING;
886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887
888 tg3_ape_write32(tp, bufoff, base_off);
889 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890
891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893
894 base_off += length;
895
896 if (tg3_ape_wait_for_event(tp, 30000))
897 return -EAGAIN;
898
899 for (i = 0; length; i += 4, length -= 4) {
900 u32 val = tg3_ape_read32(tp, msgoff + i);
901 memcpy(data, &val, sizeof(u32));
902 data++;
903 }
904 }
905
906 return 0;
907 }
908 #endif
909
tg3_ape_send_event(struct tg3 * tp,u32 event)910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912 int err;
913 u32 apedata;
914
915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 if (apedata != APE_SEG_SIG_MAGIC)
917 return -EAGAIN;
918
919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 if (!(apedata & APE_FW_STATUS_READY))
921 return -EAGAIN;
922
923 /* Wait for up to 20 millisecond for APE to service previous event. */
924 err = tg3_ape_event_lock(tp, 20000);
925 if (err)
926 return err;
927
928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 event | APE_EVENT_STATUS_EVENT_PENDING);
930
931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933
934 return 0;
935 }
936
tg3_ape_driver_state_change(struct tg3 * tp,int kind)937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939 u32 event;
940 u32 apedata;
941
942 if (!tg3_flag(tp, ENABLE_APE))
943 return;
944
945 switch (kind) {
946 case RESET_KIND_INIT:
947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949 APE_HOST_SEG_SIG_MAGIC);
950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951 APE_HOST_SEG_LEN_MAGIC);
952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957 APE_HOST_BEHAV_NO_PHYLOCK);
958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959 TG3_APE_HOST_DRVR_STATE_START);
960
961 event = APE_EVENT_STATUS_STATE_START;
962 break;
963 case RESET_KIND_SHUTDOWN:
964 if (device_may_wakeup(&tp->pdev->dev) &&
965 tg3_flag(tp, WOL_ENABLE)) {
966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 TG3_APE_HOST_WOL_SPEED_AUTO);
968 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 } else
970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971
972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973
974 event = APE_EVENT_STATUS_STATE_UNLOAD;
975 break;
976 default:
977 return;
978 }
979
980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981
982 tg3_ape_send_event(tp, event);
983 }
984
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986 unsigned long interval)
987 {
988 /* Check if hb interval has exceeded */
989 if (!tg3_flag(tp, ENABLE_APE) ||
990 time_before(jiffies, tp->ape_hb_jiffies + interval))
991 return;
992
993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994 tp->ape_hb_jiffies = jiffies;
995 }
996
tg3_disable_ints(struct tg3 * tp)997 static void tg3_disable_ints(struct tg3 *tp)
998 {
999 int i;
1000
1001 tw32(TG3PCI_MISC_HOST_CTRL,
1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003 for (i = 0; i < tp->irq_max; i++)
1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 }
1006
tg3_enable_ints(struct tg3 * tp)1007 static void tg3_enable_ints(struct tg3 *tp)
1008 {
1009 int i;
1010
1011 tp->irq_sync = 0;
1012 wmb();
1013
1014 tw32(TG3PCI_MISC_HOST_CTRL,
1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1016
1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018 for (i = 0; i < tp->irq_cnt; i++) {
1019 struct tg3_napi *tnapi = &tp->napi[i];
1020
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 if (tg3_flag(tp, 1SHOT_MSI))
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024
1025 tp->coal_now |= tnapi->coal_now;
1026 }
1027
1028 /* Force an initial interrupt */
1029 if (!tg3_flag(tp, TAGGED_STATUS) &&
1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1032 else
1033 tw32(HOSTCC_MODE, tp->coal_now);
1034
1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 }
1037
tg3_has_work(struct tg3_napi * tnapi)1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 {
1040 struct tg3 *tp = tnapi->tp;
1041 struct tg3_hw_status *sblk = tnapi->hw_status;
1042 unsigned int work_exists = 0;
1043
1044 /* check for phy events */
1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046 if (sblk->status & SD_STATUS_LINK_CHG)
1047 work_exists = 1;
1048 }
1049
1050 /* check for TX work to do */
1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052 work_exists = 1;
1053
1054 /* check for RX work to do */
1055 if (tnapi->rx_rcb_prod_idx &&
1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1057 work_exists = 1;
1058
1059 return work_exists;
1060 }
1061
1062 /* tg3_int_reenable
1063 * similar to tg3_enable_ints, but it accurately determines whether there
1064 * is new work pending and can return without flushing the PIO write
1065 * which reenables interrupts
1066 */
tg3_int_reenable(struct tg3_napi * tnapi)1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 {
1069 struct tg3 *tp = tnapi->tp;
1070
1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1072
1073 /* When doing tagged status, this work check is unnecessary.
1074 * The last_tag we write above tells the chip which piece of
1075 * work we've completed.
1076 */
1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 }
1081
tg3_switch_clocks(struct tg3 * tp)1082 static void tg3_switch_clocks(struct tg3 *tp)
1083 {
1084 u32 clock_ctrl;
1085 u32 orig_clock_ctrl;
1086
1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088 return;
1089
1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1091
1092 orig_clock_ctrl = clock_ctrl;
1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094 CLOCK_CTRL_CLKRUN_OENABLE |
1095 0x1f);
1096 tp->pci_clock_ctrl = clock_ctrl;
1097
1098 if (tg3_flag(tp, 5705_PLUS)) {
1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1102 }
1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105 clock_ctrl |
1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1107 40);
1108 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110 40);
1111 }
1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 }
1114
1115 #define PHY_BUSY_LOOPS 5000
1116
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1118 u32 *val)
1119 {
1120 u32 frame_val;
1121 unsigned int loops;
1122 int ret;
1123
1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1125 tw32_f(MAC_MI_MODE,
1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1127 udelay(80);
1128 }
1129
1130 tg3_ape_lock(tp, tp->phy_ape_lock);
1131
1132 *val = 0x0;
1133
1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135 MI_COM_PHY_ADDR_MASK);
1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137 MI_COM_REG_ADDR_MASK);
1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1139
1140 tw32_f(MAC_MI_COM, frame_val);
1141
1142 loops = PHY_BUSY_LOOPS;
1143 while (loops != 0) {
1144 udelay(10);
1145 frame_val = tr32(MAC_MI_COM);
1146
1147 if ((frame_val & MI_COM_BUSY) == 0) {
1148 udelay(5);
1149 frame_val = tr32(MAC_MI_COM);
1150 break;
1151 }
1152 loops -= 1;
1153 }
1154
1155 ret = -EBUSY;
1156 if (loops != 0) {
1157 *val = frame_val & MI_COM_DATA_MASK;
1158 ret = 0;
1159 }
1160
1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162 tw32_f(MAC_MI_MODE, tp->mi_mode);
1163 udelay(80);
1164 }
1165
1166 tg3_ape_unlock(tp, tp->phy_ape_lock);
1167
1168 return ret;
1169 }
1170
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1172 {
1173 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 }
1175
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1177 u32 val)
1178 {
1179 u32 frame_val;
1180 unsigned int loops;
1181 int ret;
1182
1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185 return 0;
1186
1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188 tw32_f(MAC_MI_MODE,
1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1190 udelay(80);
1191 }
1192
1193 tg3_ape_lock(tp, tp->phy_ape_lock);
1194
1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196 MI_COM_PHY_ADDR_MASK);
1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198 MI_COM_REG_ADDR_MASK);
1199 frame_val |= (val & MI_COM_DATA_MASK);
1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1201
1202 tw32_f(MAC_MI_COM, frame_val);
1203
1204 loops = PHY_BUSY_LOOPS;
1205 while (loops != 0) {
1206 udelay(10);
1207 frame_val = tr32(MAC_MI_COM);
1208 if ((frame_val & MI_COM_BUSY) == 0) {
1209 udelay(5);
1210 frame_val = tr32(MAC_MI_COM);
1211 break;
1212 }
1213 loops -= 1;
1214 }
1215
1216 ret = -EBUSY;
1217 if (loops != 0)
1218 ret = 0;
1219
1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221 tw32_f(MAC_MI_MODE, tp->mi_mode);
1222 udelay(80);
1223 }
1224
1225 tg3_ape_unlock(tp, tp->phy_ape_lock);
1226
1227 return ret;
1228 }
1229
tg3_writephy(struct tg3 * tp,int reg,u32 val)1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1231 {
1232 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 }
1234
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1236 {
1237 int err;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1240 if (err)
1241 goto done;
1242
1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1244 if (err)
1245 goto done;
1246
1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1249 if (err)
1250 goto done;
1251
1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1253
1254 done:
1255 return err;
1256 }
1257
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1259 {
1260 int err;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1263 if (err)
1264 goto done;
1265
1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1267 if (err)
1268 goto done;
1269
1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1272 if (err)
1273 goto done;
1274
1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1276
1277 done:
1278 return err;
1279 }
1280
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1282 {
1283 int err;
1284
1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 if (!err)
1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1288
1289 return err;
1290 }
1291
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1293 {
1294 int err;
1295
1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297 if (!err)
1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1299
1300 return err;
1301 }
1302
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1304 {
1305 int err;
1306
1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309 MII_TG3_AUXCTL_SHDWSEL_MISC);
1310 if (!err)
1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1312
1313 return err;
1314 }
1315
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1317 {
1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319 set |= MII_TG3_AUXCTL_MISC_WREN;
1320
1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 }
1323
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1325 {
1326 u32 val;
1327 int err;
1328
1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1330
1331 if (err)
1332 return err;
1333
1334 if (enable)
1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336 else
1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1338
1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1341
1342 return err;
1343 }
1344
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1346 {
1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348 reg | val | MII_TG3_MISC_SHDW_WREN);
1349 }
1350
tg3_bmcr_reset(struct tg3 * tp)1351 static int tg3_bmcr_reset(struct tg3 *tp)
1352 {
1353 u32 phy_control;
1354 int limit, err;
1355
1356 /* OK, reset it, and poll the BMCR_RESET bit until it
1357 * clears or we time out.
1358 */
1359 phy_control = BMCR_RESET;
1360 err = tg3_writephy(tp, MII_BMCR, phy_control);
1361 if (err != 0)
1362 return -EBUSY;
1363
1364 limit = 5000;
1365 while (limit--) {
1366 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1367 if (err != 0)
1368 return -EBUSY;
1369
1370 if ((phy_control & BMCR_RESET) == 0) {
1371 udelay(40);
1372 break;
1373 }
1374 udelay(10);
1375 }
1376 if (limit < 0)
1377 return -EBUSY;
1378
1379 return 0;
1380 }
1381
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1383 {
1384 struct tg3 *tp = bp->priv;
1385 u32 val;
1386
1387 spin_lock_bh(&tp->lock);
1388
1389 if (__tg3_readphy(tp, mii_id, reg, &val))
1390 val = -EIO;
1391
1392 spin_unlock_bh(&tp->lock);
1393
1394 return val;
1395 }
1396
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1398 {
1399 struct tg3 *tp = bp->priv;
1400 u32 ret = 0;
1401
1402 spin_lock_bh(&tp->lock);
1403
1404 if (__tg3_writephy(tp, mii_id, reg, val))
1405 ret = -EIO;
1406
1407 spin_unlock_bh(&tp->lock);
1408
1409 return ret;
1410 }
1411
tg3_mdio_config_5785(struct tg3 * tp)1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 {
1414 u32 val;
1415 struct phy_device *phydev;
1416
1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419 case PHY_ID_BCM50610:
1420 case PHY_ID_BCM50610M:
1421 val = MAC_PHYCFG2_50610_LED_MODES;
1422 break;
1423 case PHY_ID_BCMAC131:
1424 val = MAC_PHYCFG2_AC131_LED_MODES;
1425 break;
1426 case PHY_ID_RTL8211C:
1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1428 break;
1429 case PHY_ID_RTL8201E:
1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1431 break;
1432 default:
1433 return;
1434 }
1435
1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437 tw32(MAC_PHYCFG2, val);
1438
1439 val = tr32(MAC_PHYCFG1);
1440 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443 tw32(MAC_PHYCFG1, val);
1444
1445 return;
1446 }
1447
1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450 MAC_PHYCFG2_FMODE_MASK_MASK |
1451 MAC_PHYCFG2_GMODE_MASK_MASK |
1452 MAC_PHYCFG2_ACT_MASK_MASK |
1453 MAC_PHYCFG2_QUAL_MASK_MASK |
1454 MAC_PHYCFG2_INBAND_ENABLE;
1455
1456 tw32(MAC_PHYCFG2, val);
1457
1458 val = tr32(MAC_PHYCFG1);
1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1466 }
1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469 tw32(MAC_PHYCFG1, val);
1470
1471 val = tr32(MAC_EXT_RGMII_MODE);
1472 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473 MAC_RGMII_MODE_RX_QUALITY |
1474 MAC_RGMII_MODE_RX_ACTIVITY |
1475 MAC_RGMII_MODE_RX_ENG_DET |
1476 MAC_RGMII_MODE_TX_ENABLE |
1477 MAC_RGMII_MODE_TX_LOWPWR |
1478 MAC_RGMII_MODE_TX_RESET);
1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481 val |= MAC_RGMII_MODE_RX_INT_B |
1482 MAC_RGMII_MODE_RX_QUALITY |
1483 MAC_RGMII_MODE_RX_ACTIVITY |
1484 MAC_RGMII_MODE_RX_ENG_DET;
1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486 val |= MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET;
1489 }
1490 tw32(MAC_EXT_RGMII_MODE, val);
1491 }
1492
tg3_mdio_start(struct tg3 * tp)1493 static void tg3_mdio_start(struct tg3 *tp)
1494 {
1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496 tw32_f(MAC_MI_MODE, tp->mi_mode);
1497 udelay(80);
1498
1499 if (tg3_flag(tp, MDIOBUS_INITED) &&
1500 tg3_asic_rev(tp) == ASIC_REV_5785)
1501 tg3_mdio_config_5785(tp);
1502 }
1503
tg3_mdio_init(struct tg3 * tp)1504 static int tg3_mdio_init(struct tg3 *tp)
1505 {
1506 int i;
1507 u32 reg;
1508 struct phy_device *phydev;
1509
1510 if (tg3_flag(tp, 5717_PLUS)) {
1511 u32 is_serdes;
1512
1513 tp->phy_addr = tp->pci_fn + 1;
1514
1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1517 else
1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519 TG3_CPMU_PHY_STRAP_IS_SERDES;
1520 if (is_serdes)
1521 tp->phy_addr += 7;
1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523 int addr;
1524
1525 addr = ssb_gige_get_phyaddr(tp->pdev);
1526 if (addr < 0)
1527 return addr;
1528 tp->phy_addr = addr;
1529 } else
1530 tp->phy_addr = TG3_PHY_MII_ADDR;
1531
1532 tg3_mdio_start(tp);
1533
1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535 return 0;
1536
1537 tp->mdio_bus = mdiobus_alloc();
1538 if (tp->mdio_bus == NULL)
1539 return -ENOMEM;
1540
1541 tp->mdio_bus->name = "tg3 mdio bus";
1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1543 tp->mdio_bus->priv = tp;
1544 tp->mdio_bus->parent = &tp->pdev->dev;
1545 tp->mdio_bus->read = &tg3_mdio_read;
1546 tp->mdio_bus->write = &tg3_mdio_write;
1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548
1549 /* The bus registration will look for all the PHYs on the mdio bus.
1550 * Unfortunately, it does not ensure the PHY is powered up before
1551 * accessing the PHY ID registers. A chip reset is the
1552 * quickest way to bring the device back to an operational state..
1553 */
1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1555 tg3_bmcr_reset(tp);
1556
1557 i = mdiobus_register(tp->mdio_bus);
1558 if (i) {
1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 mdiobus_free(tp->mdio_bus);
1561 return i;
1562 }
1563
1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565
1566 if (!phydev || !phydev->drv) {
1567 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 mdiobus_unregister(tp->mdio_bus);
1569 mdiobus_free(tp->mdio_bus);
1570 return -ENODEV;
1571 }
1572
1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 case PHY_ID_BCM57780:
1575 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 break;
1578 case PHY_ID_BCM50610:
1579 case PHY_ID_BCM50610M:
1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 PHY_BRCM_RX_REFCLK_UNUSED |
1582 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 fallthrough;
1585 case PHY_ID_RTL8211C:
1586 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587 break;
1588 case PHY_ID_RTL8201E:
1589 case PHY_ID_BCMAC131:
1590 phydev->interface = PHY_INTERFACE_MODE_MII;
1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1593 break;
1594 }
1595
1596 tg3_flag_set(tp, MDIOBUS_INITED);
1597
1598 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599 tg3_mdio_config_5785(tp);
1600
1601 return 0;
1602 }
1603
tg3_mdio_fini(struct tg3 * tp)1604 static void tg3_mdio_fini(struct tg3 *tp)
1605 {
1606 if (tg3_flag(tp, MDIOBUS_INITED)) {
1607 tg3_flag_clear(tp, MDIOBUS_INITED);
1608 mdiobus_unregister(tp->mdio_bus);
1609 mdiobus_free(tp->mdio_bus);
1610 }
1611 }
1612
1613 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1615 {
1616 u32 val;
1617
1618 val = tr32(GRC_RX_CPU_EVENT);
1619 val |= GRC_RX_CPU_DRIVER_EVENT;
1620 tw32_f(GRC_RX_CPU_EVENT, val);
1621
1622 tp->last_event_jiffies = jiffies;
1623 }
1624
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626
1627 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1629 {
1630 int i;
1631 unsigned int delay_cnt;
1632 long time_remain;
1633
1634 /* If enough time has passed, no wait is necessary. */
1635 time_remain = (long)(tp->last_event_jiffies + 1 +
1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637 (long)jiffies;
1638 if (time_remain < 0)
1639 return;
1640
1641 /* Check if we can shorten the wait time. */
1642 delay_cnt = jiffies_to_usecs(time_remain);
1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645 delay_cnt = (delay_cnt >> 3) + 1;
1646
1647 for (i = 0; i < delay_cnt; i++) {
1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649 break;
1650 if (pci_channel_offline(tp->pdev))
1651 break;
1652
1653 udelay(8);
1654 }
1655 }
1656
1657 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1659 {
1660 u32 reg, val;
1661
1662 val = 0;
1663 if (!tg3_readphy(tp, MII_BMCR, ®))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_BMSR, ®))
1666 val |= (reg & 0xffff);
1667 *data++ = val;
1668
1669 val = 0;
1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1671 val = reg << 16;
1672 if (!tg3_readphy(tp, MII_LPA, ®))
1673 val |= (reg & 0xffff);
1674 *data++ = val;
1675
1676 val = 0;
1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1679 val = reg << 16;
1680 if (!tg3_readphy(tp, MII_STAT1000, ®))
1681 val |= (reg & 0xffff);
1682 }
1683 *data++ = val;
1684
1685 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1686 val = reg << 16;
1687 else
1688 val = 0;
1689 *data++ = val;
1690 }
1691
1692 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1693 static void tg3_ump_link_report(struct tg3 *tp)
1694 {
1695 u32 data[4];
1696
1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1698 return;
1699
1700 tg3_phy_gather_ump_data(tp, data);
1701
1702 tg3_wait_for_event_ack(tp);
1703
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710
1711 tg3_generate_fw_event(tp);
1712 }
1713
1714 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1715 static void tg3_stop_fw(struct tg3 *tp)
1716 {
1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718 /* Wait for RX cpu to ACK the previous event. */
1719 tg3_wait_for_event_ack(tp);
1720
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722
1723 tg3_generate_fw_event(tp);
1724
1725 /* Wait for RX cpu to ACK this event. */
1726 tg3_wait_for_event_ack(tp);
1727 }
1728 }
1729
1730 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 {
1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735
1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737 switch (kind) {
1738 case RESET_KIND_INIT:
1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1740 DRV_STATE_START);
1741 break;
1742
1743 case RESET_KIND_SHUTDOWN:
1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745 DRV_STATE_UNLOAD);
1746 break;
1747
1748 case RESET_KIND_SUSPEND:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750 DRV_STATE_SUSPEND);
1751 break;
1752
1753 default:
1754 break;
1755 }
1756 }
1757 }
1758
1759 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 {
1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763 switch (kind) {
1764 case RESET_KIND_INIT:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_START_DONE);
1767 break;
1768
1769 case RESET_KIND_SHUTDOWN:
1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771 DRV_STATE_UNLOAD_DONE);
1772 break;
1773
1774 default:
1775 break;
1776 }
1777 }
1778 }
1779
1780 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 {
1783 if (tg3_flag(tp, ENABLE_ASF)) {
1784 switch (kind) {
1785 case RESET_KIND_INIT:
1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 DRV_STATE_START);
1788 break;
1789
1790 case RESET_KIND_SHUTDOWN:
1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792 DRV_STATE_UNLOAD);
1793 break;
1794
1795 case RESET_KIND_SUSPEND:
1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797 DRV_STATE_SUSPEND);
1798 break;
1799
1800 default:
1801 break;
1802 }
1803 }
1804 }
1805
tg3_poll_fw(struct tg3 * tp)1806 static int tg3_poll_fw(struct tg3 *tp)
1807 {
1808 int i;
1809 u32 val;
1810
1811 if (tg3_flag(tp, NO_FWARE_REPORTED))
1812 return 0;
1813
1814 if (tg3_flag(tp, IS_SSB_CORE)) {
1815 /* We don't use firmware. */
1816 return 0;
1817 }
1818
1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820 /* Wait up to 20ms for init done. */
1821 for (i = 0; i < 200; i++) {
1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823 return 0;
1824 if (pci_channel_offline(tp->pdev))
1825 return -ENODEV;
1826
1827 udelay(100);
1828 }
1829 return -ENODEV;
1830 }
1831
1832 /* Wait for firmware initialization to complete. */
1833 for (i = 0; i < 100000; i++) {
1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836 break;
1837 if (pci_channel_offline(tp->pdev)) {
1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839 tg3_flag_set(tp, NO_FWARE_REPORTED);
1840 netdev_info(tp->dev, "No firmware running\n");
1841 }
1842
1843 break;
1844 }
1845
1846 udelay(10);
1847 }
1848
1849 /* Chip might not be fitted with firmware. Some Sun onboard
1850 * parts are configured like that. So don't signal the timeout
1851 * of the above loop as an error, but do report the lack of
1852 * running firmware once.
1853 */
1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856
1857 netdev_info(tp->dev, "No firmware running\n");
1858 }
1859
1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861 /* The 57765 A0 needs a little more
1862 * time to do some important work.
1863 */
1864 mdelay(10);
1865 }
1866
1867 return 0;
1868 }
1869
tg3_link_report(struct tg3 * tp)1870 static void tg3_link_report(struct tg3 *tp)
1871 {
1872 if (!netif_carrier_ok(tp->dev)) {
1873 netif_info(tp, link, tp->dev, "Link is down\n");
1874 tg3_ump_link_report(tp);
1875 } else if (netif_msg_link(tp)) {
1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877 (tp->link_config.active_speed == SPEED_1000 ?
1878 1000 :
1879 (tp->link_config.active_speed == SPEED_100 ?
1880 100 : 10)),
1881 (tp->link_config.active_duplex == DUPLEX_FULL ?
1882 "full" : "half"));
1883
1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886 "on" : "off",
1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1888 "on" : "off");
1889
1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891 netdev_info(tp->dev, "EEE is %s\n",
1892 tp->setlpicnt ? "enabled" : "disabled");
1893
1894 tg3_ump_link_report(tp);
1895 }
1896
1897 tp->link_up = netif_carrier_ok(tp->dev);
1898 }
1899
tg3_decode_flowctrl_1000T(u32 adv)1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1901 {
1902 u32 flowctrl = 0;
1903
1904 if (adv & ADVERTISE_PAUSE_CAP) {
1905 flowctrl |= FLOW_CTRL_RX;
1906 if (!(adv & ADVERTISE_PAUSE_ASYM))
1907 flowctrl |= FLOW_CTRL_TX;
1908 } else if (adv & ADVERTISE_PAUSE_ASYM)
1909 flowctrl |= FLOW_CTRL_TX;
1910
1911 return flowctrl;
1912 }
1913
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1915 {
1916 u16 miireg;
1917
1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919 miireg = ADVERTISE_1000XPAUSE;
1920 else if (flow_ctrl & FLOW_CTRL_TX)
1921 miireg = ADVERTISE_1000XPSE_ASYM;
1922 else if (flow_ctrl & FLOW_CTRL_RX)
1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1924 else
1925 miireg = 0;
1926
1927 return miireg;
1928 }
1929
tg3_decode_flowctrl_1000X(u32 adv)1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1931 {
1932 u32 flowctrl = 0;
1933
1934 if (adv & ADVERTISE_1000XPAUSE) {
1935 flowctrl |= FLOW_CTRL_RX;
1936 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937 flowctrl |= FLOW_CTRL_TX;
1938 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1939 flowctrl |= FLOW_CTRL_TX;
1940
1941 return flowctrl;
1942 }
1943
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1945 {
1946 u8 cap = 0;
1947
1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951 if (lcladv & ADVERTISE_1000XPAUSE)
1952 cap = FLOW_CTRL_RX;
1953 if (rmtadv & ADVERTISE_1000XPAUSE)
1954 cap = FLOW_CTRL_TX;
1955 }
1956
1957 return cap;
1958 }
1959
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1961 {
1962 u8 autoneg;
1963 u8 flowctrl = 0;
1964 u32 old_rx_mode = tp->rx_mode;
1965 u32 old_tx_mode = tp->tx_mode;
1966
1967 if (tg3_flag(tp, USE_PHYLIB))
1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969 else
1970 autoneg = tp->link_config.autoneg;
1971
1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975 else
1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977 } else
1978 flowctrl = tp->link_config.flowctrl;
1979
1980 tp->link_config.active_flowctrl = flowctrl;
1981
1982 if (flowctrl & FLOW_CTRL_RX)
1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984 else
1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986
1987 if (old_rx_mode != tp->rx_mode)
1988 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989
1990 if (flowctrl & FLOW_CTRL_TX)
1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992 else
1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994
1995 if (old_tx_mode != tp->tx_mode)
1996 tw32_f(MAC_TX_MODE, tp->tx_mode);
1997 }
1998
tg3_adjust_link(struct net_device * dev)1999 static void tg3_adjust_link(struct net_device *dev)
2000 {
2001 u8 oldflowctrl, linkmesg = 0;
2002 u32 mac_mode, lcl_adv, rmt_adv;
2003 struct tg3 *tp = netdev_priv(dev);
2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005
2006 spin_lock_bh(&tp->lock);
2007
2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009 MAC_MODE_HALF_DUPLEX);
2010
2011 oldflowctrl = tp->link_config.active_flowctrl;
2012
2013 if (phydev->link) {
2014 lcl_adv = 0;
2015 rmt_adv = 0;
2016
2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018 mac_mode |= MAC_MODE_PORT_MODE_MII;
2019 else if (phydev->speed == SPEED_1000 ||
2020 tg3_asic_rev(tp) != ASIC_REV_5785)
2021 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022 else
2023 mac_mode |= MAC_MODE_PORT_MODE_MII;
2024
2025 if (phydev->duplex == DUPLEX_HALF)
2026 mac_mode |= MAC_MODE_HALF_DUPLEX;
2027 else {
2028 lcl_adv = mii_advertise_flowctrl(
2029 tp->link_config.flowctrl);
2030
2031 if (phydev->pause)
2032 rmt_adv = LPA_PAUSE_CAP;
2033 if (phydev->asym_pause)
2034 rmt_adv |= LPA_PAUSE_ASYM;
2035 }
2036
2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038 } else
2039 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040
2041 if (mac_mode != tp->mac_mode) {
2042 tp->mac_mode = mac_mode;
2043 tw32_f(MAC_MODE, tp->mac_mode);
2044 udelay(40);
2045 }
2046
2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048 if (phydev->speed == SPEED_10)
2049 tw32(MAC_MI_STAT,
2050 MAC_MI_STAT_10MBPS_MODE |
2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 else
2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054 }
2055
2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057 tw32(MAC_TX_LENGTHS,
2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059 (6 << TX_LENGTHS_IPG_SHIFT) |
2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061 else
2062 tw32(MAC_TX_LENGTHS,
2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064 (6 << TX_LENGTHS_IPG_SHIFT) |
2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066
2067 if (phydev->link != tp->old_link ||
2068 phydev->speed != tp->link_config.active_speed ||
2069 phydev->duplex != tp->link_config.active_duplex ||
2070 oldflowctrl != tp->link_config.active_flowctrl)
2071 linkmesg = 1;
2072
2073 tp->old_link = phydev->link;
2074 tp->link_config.active_speed = phydev->speed;
2075 tp->link_config.active_duplex = phydev->duplex;
2076
2077 spin_unlock_bh(&tp->lock);
2078
2079 if (linkmesg)
2080 tg3_link_report(tp);
2081 }
2082
tg3_phy_init(struct tg3 * tp)2083 static int tg3_phy_init(struct tg3 *tp)
2084 {
2085 struct phy_device *phydev;
2086
2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2088 return 0;
2089
2090 /* Bring the PHY back to a known state. */
2091 tg3_bmcr_reset(tp);
2092
2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094
2095 /* Attach the MAC to the PHY. */
2096 phydev = phy_connect(tp->dev, phydev_name(phydev),
2097 tg3_adjust_link, phydev->interface);
2098 if (IS_ERR(phydev)) {
2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100 return PTR_ERR(phydev);
2101 }
2102
2103 /* Mask with MAC supported features. */
2104 switch (phydev->interface) {
2105 case PHY_INTERFACE_MODE_GMII:
2106 case PHY_INTERFACE_MODE_RGMII:
2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108 phy_set_max_speed(phydev, SPEED_1000);
2109 phy_support_asym_pause(phydev);
2110 break;
2111 }
2112 fallthrough;
2113 case PHY_INTERFACE_MODE_MII:
2114 phy_set_max_speed(phydev, SPEED_100);
2115 phy_support_asym_pause(phydev);
2116 break;
2117 default:
2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2119 return -EINVAL;
2120 }
2121
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124 phy_attached_info(phydev);
2125
2126 return 0;
2127 }
2128
tg3_phy_start(struct tg3 * tp)2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131 struct phy_device *phydev;
2132
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134 return;
2135
2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 ethtool_convert_legacy_u32_to_link_mode(
2144 phydev->advertising, tp->link_config.advertising);
2145 }
2146
2147 phy_start(phydev);
2148
2149 phy_start_aneg(phydev);
2150 }
2151
tg3_phy_stop(struct tg3 * tp)2152 static void tg3_phy_stop(struct tg3 *tp)
2153 {
2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2155 return;
2156
2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2158 }
2159
tg3_phy_fini(struct tg3 * tp)2160 static void tg3_phy_fini(struct tg3 *tp)
2161 {
2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2165 }
2166 }
2167
tg3_phy_set_extloopbk(struct tg3 * tp)2168 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2169 {
2170 int err;
2171 u32 val;
2172
2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2174 return 0;
2175
2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2177 /* Cannot do read-modify-write on 5401 */
2178 err = tg3_phy_auxctl_write(tp,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2181 0x4c20);
2182 goto done;
2183 }
2184
2185 err = tg3_phy_auxctl_read(tp,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2187 if (err)
2188 return err;
2189
2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2191 err = tg3_phy_auxctl_write(tp,
2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2193
2194 done:
2195 return err;
2196 }
2197
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2199 {
2200 u32 phytest;
2201
2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2203 u32 phy;
2204
2205 tg3_writephy(tp, MII_TG3_FET_TEST,
2206 phytest | MII_TG3_FET_SHADOW_EN);
2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208 if (enable)
2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210 else
2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213 }
2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2215 }
2216 }
2217
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2219 {
2220 u32 reg;
2221
2222 if (!tg3_flag(tp, 5705_PLUS) ||
2223 (tg3_flag(tp, 5717_PLUS) &&
2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2225 return;
2226
2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2228 tg3_phy_fet_toggle_apd(tp, enable);
2229 return;
2230 }
2231
2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2234 MII_TG3_MISC_SHDW_SCR5_SDTL |
2235 MII_TG3_MISC_SHDW_SCR5_C125OE;
2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238
2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2240
2241
2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243 if (enable)
2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245
2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2247 }
2248
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2250 {
2251 u32 phy;
2252
2253 if (!tg3_flag(tp, 5705_PLUS) ||
2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2255 return;
2256
2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2258 u32 ephy;
2259
2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262
2263 tg3_writephy(tp, MII_TG3_FET_TEST,
2264 ephy | MII_TG3_FET_SHADOW_EN);
2265 if (!tg3_readphy(tp, reg, &phy)) {
2266 if (enable)
2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268 else
2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270 tg3_writephy(tp, reg, phy);
2271 }
2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2273 }
2274 } else {
2275 int ret;
2276
2277 ret = tg3_phy_auxctl_read(tp,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2279 if (!ret) {
2280 if (enable)
2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282 else
2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284 tg3_phy_auxctl_write(tp,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2286 }
2287 }
2288 }
2289
tg3_phy_set_wirespeed(struct tg3 * tp)2290 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2291 {
2292 int ret;
2293 u32 val;
2294
2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2296 return;
2297
2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299 if (!ret)
2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2302 }
2303
tg3_phy_apply_otp(struct tg3 * tp)2304 static void tg3_phy_apply_otp(struct tg3 *tp)
2305 {
2306 u32 otp, phy;
2307
2308 if (!tp->phy_otp)
2309 return;
2310
2311 otp = tp->phy_otp;
2312
2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2314 return;
2315
2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319
2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323
2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327
2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330
2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333
2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337
2338 tg3_phy_toggle_auxctl_smdsp(tp, false);
2339 }
2340
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_eee * eee)2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2342 {
2343 u32 val;
2344 struct ethtool_eee *dest = &tp->eee;
2345
2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2347 return;
2348
2349 if (eee)
2350 dest = eee;
2351
2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2353 return;
2354
2355 /* Pull eee_active */
2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2358 dest->eee_active = 1;
2359 } else
2360 dest->eee_active = 0;
2361
2362 /* Pull lp advertised settings */
2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364 return;
2365 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2366
2367 /* Pull advertised and eee_enabled settings */
2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369 return;
2370 dest->eee_enabled = !!val;
2371 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2372
2373 /* Pull tx_lpi_enabled */
2374 val = tr32(TG3_CPMU_EEE_MODE);
2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376
2377 /* Pull lpi timer value */
2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2379 }
2380
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2382 {
2383 u32 val;
2384
2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2386 return;
2387
2388 tp->setlpicnt = 0;
2389
2390 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391 current_link_up &&
2392 tp->link_config.active_duplex == DUPLEX_FULL &&
2393 (tp->link_config.active_speed == SPEED_100 ||
2394 tp->link_config.active_speed == SPEED_1000)) {
2395 u32 eeectl;
2396
2397 if (tp->link_config.active_speed == SPEED_1000)
2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399 else
2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401
2402 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403
2404 tg3_eee_pull_config(tp, NULL);
2405 if (tp->eee.eee_active)
2406 tp->setlpicnt = 2;
2407 }
2408
2409 if (!tp->setlpicnt) {
2410 if (current_link_up &&
2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2413 tg3_phy_toggle_auxctl_smdsp(tp, false);
2414 }
2415
2416 val = tr32(TG3_CPMU_EEE_MODE);
2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2418 }
2419 }
2420
tg3_phy_eee_enable(struct tg3 * tp)2421 static void tg3_phy_eee_enable(struct tg3 *tp)
2422 {
2423 u32 val;
2424
2425 if (tp->link_config.active_speed == SPEED_1000 &&
2426 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2427 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2428 tg3_flag(tp, 57765_CLASS)) &&
2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2430 val = MII_TG3_DSP_TAP26_ALNOKO |
2431 MII_TG3_DSP_TAP26_RMRXSTO;
2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2433 tg3_phy_toggle_auxctl_smdsp(tp, false);
2434 }
2435
2436 val = tr32(TG3_CPMU_EEE_MODE);
2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2438 }
2439
tg3_wait_macro_done(struct tg3 * tp)2440 static int tg3_wait_macro_done(struct tg3 *tp)
2441 {
2442 int limit = 100;
2443
2444 while (limit--) {
2445 u32 tmp32;
2446
2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2448 if ((tmp32 & 0x1000) == 0)
2449 break;
2450 }
2451 }
2452 if (limit < 0)
2453 return -EBUSY;
2454
2455 return 0;
2456 }
2457
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 {
2460 static const u32 test_pat[4][6] = {
2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2465 };
2466 int chan;
2467
2468 for (chan = 0; chan < 4; chan++) {
2469 int i;
2470
2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2472 (chan * 0x2000) | 0x0200);
2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474
2475 for (i = 0; i < 6; i++)
2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2477 test_pat[chan][i]);
2478
2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2480 if (tg3_wait_macro_done(tp)) {
2481 *resetp = 1;
2482 return -EBUSY;
2483 }
2484
2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486 (chan * 0x2000) | 0x0200);
2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2488 if (tg3_wait_macro_done(tp)) {
2489 *resetp = 1;
2490 return -EBUSY;
2491 }
2492
2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2494 if (tg3_wait_macro_done(tp)) {
2495 *resetp = 1;
2496 return -EBUSY;
2497 }
2498
2499 for (i = 0; i < 6; i += 2) {
2500 u32 low, high;
2501
2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2504 tg3_wait_macro_done(tp)) {
2505 *resetp = 1;
2506 return -EBUSY;
2507 }
2508 low &= 0x7fff;
2509 high &= 0x000f;
2510 if (low != test_pat[chan][i] ||
2511 high != test_pat[chan][i+1]) {
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2515
2516 return -EBUSY;
2517 }
2518 }
2519 }
2520
2521 return 0;
2522 }
2523
tg3_phy_reset_chanpat(struct tg3 * tp)2524 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2525 {
2526 int chan;
2527
2528 for (chan = 0; chan < 4; chan++) {
2529 int i;
2530
2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2532 (chan * 0x2000) | 0x0200);
2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2534 for (i = 0; i < 6; i++)
2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2537 if (tg3_wait_macro_done(tp))
2538 return -EBUSY;
2539 }
2540
2541 return 0;
2542 }
2543
tg3_phy_reset_5703_4_5(struct tg3 * tp)2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 {
2546 u32 reg32, phy9_orig;
2547 int retries, do_phy_reset, err;
2548
2549 retries = 10;
2550 do_phy_reset = 1;
2551 do {
2552 if (do_phy_reset) {
2553 err = tg3_bmcr_reset(tp);
2554 if (err)
2555 return err;
2556 do_phy_reset = 0;
2557 }
2558
2559 /* Disable transmitter and interrupt. */
2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2561 continue;
2562
2563 reg32 |= 0x3000;
2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565
2566 /* Set full-duplex, 1000 mbps. */
2567 tg3_writephy(tp, MII_BMCR,
2568 BMCR_FULLDPLX | BMCR_SPEED1000);
2569
2570 /* Set to master mode. */
2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2572 continue;
2573
2574 tg3_writephy(tp, MII_CTRL1000,
2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576
2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2578 if (err)
2579 return err;
2580
2581 /* Block the PHY control access. */
2582 tg3_phydsp_write(tp, 0x8005, 0x0800);
2583
2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2585 if (!err)
2586 break;
2587 } while (--retries);
2588
2589 err = tg3_phy_reset_chanpat(tp);
2590 if (err)
2591 return err;
2592
2593 tg3_phydsp_write(tp, 0x8005, 0x0000);
2594
2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597
2598 tg3_phy_toggle_auxctl_smdsp(tp, false);
2599
2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601
2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2603 if (err)
2604 return err;
2605
2606 reg32 &= ~0x3000;
2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2608
2609 return 0;
2610 }
2611
tg3_carrier_off(struct tg3 * tp)2612 static void tg3_carrier_off(struct tg3 *tp)
2613 {
2614 netif_carrier_off(tp->dev);
2615 tp->link_up = false;
2616 }
2617
tg3_warn_mgmt_link_flap(struct tg3 * tp)2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 {
2620 if (tg3_flag(tp, ENABLE_ASF))
2621 netdev_warn(tp->dev,
2622 "Management side-band traffic will be interrupted during phy settings change\n");
2623 }
2624
2625 /* This will reset the tigon3 PHY if there is no valid
2626 * link unless the FORCE argument is non-zero.
2627 */
tg3_phy_reset(struct tg3 * tp)2628 static int tg3_phy_reset(struct tg3 *tp)
2629 {
2630 u32 val, cpmuctrl;
2631 int err;
2632
2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2634 val = tr32(GRC_MISC_CFG);
2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2636 udelay(40);
2637 }
2638 err = tg3_readphy(tp, MII_BMSR, &val);
2639 err |= tg3_readphy(tp, MII_BMSR, &val);
2640 if (err != 0)
2641 return -EBUSY;
2642
2643 if (netif_running(tp->dev) && tp->link_up) {
2644 netif_carrier_off(tp->dev);
2645 tg3_link_report(tp);
2646 }
2647
2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2649 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2650 tg3_asic_rev(tp) == ASIC_REV_5705) {
2651 err = tg3_phy_reset_5703_4_5(tp);
2652 if (err)
2653 return err;
2654 goto out;
2655 }
2656
2657 cpmuctrl = 0;
2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2660 cpmuctrl = tr32(TG3_CPMU_CTRL);
2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662 tw32(TG3_CPMU_CTRL,
2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2664 }
2665
2666 err = tg3_bmcr_reset(tp);
2667 if (err)
2668 return err;
2669
2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673
2674 tw32(TG3_CPMU_CTRL, cpmuctrl);
2675 }
2676
2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2681 CPMU_LSPD_1000MB_MACCLK_12_5) {
2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683 udelay(40);
2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2685 }
2686 }
2687
2688 if (tg3_flag(tp, 5717_PLUS) &&
2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2690 return 0;
2691
2692 tg3_phy_apply_otp(tp);
2693
2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2695 tg3_phy_toggle_apd(tp, true);
2696 else
2697 tg3_phy_toggle_apd(tp, false);
2698
2699 out:
2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2703 tg3_phydsp_write(tp, 0x000a, 0x0323);
2704 tg3_phy_toggle_auxctl_smdsp(tp, false);
2705 }
2706
2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 }
2711
2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714 tg3_phydsp_write(tp, 0x000a, 0x310b);
2715 tg3_phydsp_write(tp, 0x201f, 0x9506);
2716 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2717 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718 }
2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2724 tg3_writephy(tp, MII_TG3_TEST1,
2725 MII_TG3_TEST1_TRIM_EN | 0x4);
2726 } else
2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728
2729 tg3_phy_toggle_auxctl_smdsp(tp, false);
2730 }
2731 }
2732
2733 /* Set Extended packet length bit (bit 14) on all chips that */
2734 /* support jumbo frames */
2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2736 /* Cannot do read-modify-write on 5401 */
2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2739 /* Set bit 14 with read-modify-write to preserve other bits */
2740 err = tg3_phy_auxctl_read(tp,
2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742 if (!err)
2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2745 }
2746
2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2748 * jumbo frames transmission.
2749 */
2750 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2752 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2754 }
2755
2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2757 /* adjust output voltage */
2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2759 }
2760
2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2762 tg3_phydsp_write(tp, 0xffb, 0x4000);
2763
2764 tg3_phy_toggle_automdix(tp, true);
2765 tg3_phy_set_wirespeed(tp);
2766 return 0;
2767 }
2768
2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2772 TG3_GPIO_MSG_NEED_VAUX)
2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 12))
2778
2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 12))
2784
tg3_set_function_status(struct tg3 * tp,u32 newstat)2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2786 {
2787 u32 status, shift;
2788
2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2790 tg3_asic_rev(tp) == ASIC_REV_5719)
2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792 else
2793 status = tr32(TG3_CPMU_DRV_STATUS);
2794
2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2796 status &= ~(TG3_GPIO_MSG_MASK << shift);
2797 status |= (newstat << shift);
2798
2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800 tg3_asic_rev(tp) == ASIC_REV_5719)
2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802 else
2803 tw32(TG3_CPMU_DRV_STATUS, status);
2804
2805 return status >> TG3_APE_GPIO_MSG_SHIFT;
2806 }
2807
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 {
2810 if (!tg3_flag(tp, IS_NIC))
2811 return 0;
2812
2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2815 tg3_asic_rev(tp) == ASIC_REV_5720) {
2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2817 return -EIO;
2818
2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820
2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822 TG3_GRC_LCLCTL_PWRSW_DELAY);
2823
2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825 } else {
2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY);
2828 }
2829
2830 return 0;
2831 }
2832
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2834 {
2835 u32 grc_local_ctrl;
2836
2837 if (!tg3_flag(tp, IS_NIC) ||
2838 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2839 tg3_asic_rev(tp) == ASIC_REV_5701)
2840 return;
2841
2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848 tw32_wait_f(GRC_LOCAL_CTRL,
2849 grc_local_ctrl,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852 tw32_wait_f(GRC_LOCAL_CTRL,
2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 }
2856
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 {
2859 if (!tg3_flag(tp, IS_NIC))
2860 return;
2861
2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2863 tg3_asic_rev(tp) == ASIC_REV_5701) {
2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2865 (GRC_LCLCTRL_GPIO_OE0 |
2866 GRC_LCLCTRL_GPIO_OE1 |
2867 GRC_LCLCTRL_GPIO_OE2 |
2868 GRC_LCLCTRL_GPIO_OUTPUT0 |
2869 GRC_LCLCTRL_GPIO_OUTPUT1),
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2875 GRC_LCLCTRL_GPIO_OE1 |
2876 GRC_LCLCTRL_GPIO_OE2 |
2877 GRC_LCLCTRL_GPIO_OUTPUT0 |
2878 GRC_LCLCTRL_GPIO_OUTPUT1 |
2879 tp->grc_local_ctrl;
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2882
2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 TG3_GRC_LCLCTL_PWRSW_DELAY);
2886
2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 TG3_GRC_LCLCTL_PWRSW_DELAY);
2890 } else {
2891 u32 no_gpio2;
2892 u32 grc_local_ctrl = 0;
2893
2894 /* Workaround to prevent overdrawing Amps. */
2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898 grc_local_ctrl,
2899 TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 }
2901
2902 /* On 5753 and variants, GPIO2 cannot be used. */
2903 no_gpio2 = tp->nic_sram_data_cfg &
2904 NIC_SRAM_DATA_CFG_NO_GPIO2;
2905
2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2907 GRC_LCLCTRL_GPIO_OE1 |
2908 GRC_LCLCTRL_GPIO_OE2 |
2909 GRC_LCLCTRL_GPIO_OUTPUT1 |
2910 GRC_LCLCTRL_GPIO_OUTPUT2;
2911 if (no_gpio2) {
2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2913 GRC_LCLCTRL_GPIO_OUTPUT2);
2914 }
2915 tw32_wait_f(GRC_LOCAL_CTRL,
2916 tp->grc_local_ctrl | grc_local_ctrl,
2917 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918
2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920
2921 tw32_wait_f(GRC_LOCAL_CTRL,
2922 tp->grc_local_ctrl | grc_local_ctrl,
2923 TG3_GRC_LCLCTL_PWRSW_DELAY);
2924
2925 if (!no_gpio2) {
2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2927 tw32_wait_f(GRC_LOCAL_CTRL,
2928 tp->grc_local_ctrl | grc_local_ctrl,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 }
2931 }
2932 }
2933
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2935 {
2936 u32 msg = 0;
2937
2938 /* Serialize power state transitions */
2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2940 return;
2941
2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2943 msg = TG3_GPIO_MSG_NEED_VAUX;
2944
2945 msg = tg3_set_function_status(tp, msg);
2946
2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2948 goto done;
2949
2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2951 tg3_pwrsrc_switch_to_vaux(tp);
2952 else
2953 tg3_pwrsrc_die_with_vmain(tp);
2954
2955 done:
2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2957 }
2958
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 {
2961 bool need_vaux = false;
2962
2963 /* The GPIOs do something completely different on 57765. */
2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2965 return;
2966
2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2968 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2969 tg3_asic_rev(tp) == ASIC_REV_5720) {
2970 tg3_frob_aux_power_5717(tp, include_wol ?
2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2972 return;
2973 }
2974
2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2976 struct net_device *dev_peer;
2977
2978 dev_peer = pci_get_drvdata(tp->pdev_peer);
2979
2980 /* remove_one() may have been run on the peer. */
2981 if (dev_peer) {
2982 struct tg3 *tp_peer = netdev_priv(dev_peer);
2983
2984 if (tg3_flag(tp_peer, INIT_COMPLETE))
2985 return;
2986
2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2988 tg3_flag(tp_peer, ENABLE_ASF))
2989 need_vaux = true;
2990 }
2991 }
2992
2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2994 tg3_flag(tp, ENABLE_ASF))
2995 need_vaux = true;
2996
2997 if (need_vaux)
2998 tg3_pwrsrc_switch_to_vaux(tp);
2999 else
3000 tg3_pwrsrc_die_with_vmain(tp);
3001 }
3002
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 {
3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006 return 1;
3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3008 if (speed != SPEED_10)
3009 return 1;
3010 } else if (speed == SPEED_10)
3011 return 1;
3012
3013 return 0;
3014 }
3015
tg3_phy_power_bug(struct tg3 * tp)3016 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 {
3018 switch (tg3_asic_rev(tp)) {
3019 case ASIC_REV_5700:
3020 case ASIC_REV_5704:
3021 return true;
3022 case ASIC_REV_5780:
3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3024 return true;
3025 return false;
3026 case ASIC_REV_5717:
3027 if (!tp->pci_fn)
3028 return true;
3029 return false;
3030 case ASIC_REV_5719:
3031 case ASIC_REV_5720:
3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3033 !tp->pci_fn)
3034 return true;
3035 return false;
3036 }
3037
3038 return false;
3039 }
3040
tg3_phy_led_bug(struct tg3 * tp)3041 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 {
3043 switch (tg3_asic_rev(tp)) {
3044 case ASIC_REV_5719:
3045 case ASIC_REV_5720:
3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3047 !tp->pci_fn)
3048 return true;
3049 return false;
3050 }
3051
3052 return false;
3053 }
3054
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3056 {
3057 u32 val;
3058
3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3060 return;
3061
3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3066
3067 sg_dig_ctrl |=
3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3069 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3071 }
3072 return;
3073 }
3074
3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076 tg3_bmcr_reset(tp);
3077 val = tr32(GRC_MISC_CFG);
3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3079 udelay(40);
3080 return;
3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082 u32 phytest;
3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3084 u32 phy;
3085
3086 tg3_writephy(tp, MII_ADVERTISE, 0);
3087 tg3_writephy(tp, MII_BMCR,
3088 BMCR_ANENABLE | BMCR_ANRESTART);
3089
3090 tg3_writephy(tp, MII_TG3_FET_TEST,
3091 phytest | MII_TG3_FET_SHADOW_EN);
3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094 tg3_writephy(tp,
3095 MII_TG3_FET_SHDW_AUXMODE4,
3096 phy);
3097 }
3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3099 }
3100 return;
3101 } else if (do_low_power) {
3102 if (!tg3_phy_led_bug(tp))
3103 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105
3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3108 MII_TG3_AUXCTL_PCTL_VREG_11V;
3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3110 }
3111
3112 /* The PHY should not be powered down on some chips because
3113 * of bugs.
3114 */
3115 if (tg3_phy_power_bug(tp))
3116 return;
3117
3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3124 }
3125
3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3127 }
3128
3129 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3130 static int tg3_nvram_lock(struct tg3 *tp)
3131 {
3132 if (tg3_flag(tp, NVRAM)) {
3133 int i;
3134
3135 if (tp->nvram_lock_cnt == 0) {
3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3137 for (i = 0; i < 8000; i++) {
3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3139 break;
3140 udelay(20);
3141 }
3142 if (i == 8000) {
3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3144 return -ENODEV;
3145 }
3146 }
3147 tp->nvram_lock_cnt++;
3148 }
3149 return 0;
3150 }
3151
3152 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3153 static void tg3_nvram_unlock(struct tg3 *tp)
3154 {
3155 if (tg3_flag(tp, NVRAM)) {
3156 if (tp->nvram_lock_cnt > 0)
3157 tp->nvram_lock_cnt--;
3158 if (tp->nvram_lock_cnt == 0)
3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3160 }
3161 }
3162
3163 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3164 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 {
3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3167 u32 nvaccess = tr32(NVRAM_ACCESS);
3168
3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3170 }
3171 }
3172
3173 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3174 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 {
3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177 u32 nvaccess = tr32(NVRAM_ACCESS);
3178
3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3180 }
3181 }
3182
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3184 u32 offset, u32 *val)
3185 {
3186 u32 tmp;
3187 int i;
3188
3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3190 return -EINVAL;
3191
3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3193 EEPROM_ADDR_DEVID_MASK |
3194 EEPROM_ADDR_READ);
3195 tw32(GRC_EEPROM_ADDR,
3196 tmp |
3197 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3199 EEPROM_ADDR_ADDR_MASK) |
3200 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201
3202 for (i = 0; i < 1000; i++) {
3203 tmp = tr32(GRC_EEPROM_ADDR);
3204
3205 if (tmp & EEPROM_ADDR_COMPLETE)
3206 break;
3207 msleep(1);
3208 }
3209 if (!(tmp & EEPROM_ADDR_COMPLETE))
3210 return -EBUSY;
3211
3212 tmp = tr32(GRC_EEPROM_DATA);
3213
3214 /*
3215 * The data will always be opposite the native endian
3216 * format. Perform a blind byteswap to compensate.
3217 */
3218 *val = swab32(tmp);
3219
3220 return 0;
3221 }
3222
3223 #define NVRAM_CMD_TIMEOUT 10000
3224
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3226 {
3227 int i;
3228
3229 tw32(NVRAM_CMD, nvram_cmd);
3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3231 usleep_range(10, 40);
3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3233 udelay(10);
3234 break;
3235 }
3236 }
3237
3238 if (i == NVRAM_CMD_TIMEOUT)
3239 return -EBUSY;
3240
3241 return 0;
3242 }
3243
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 {
3246 if (tg3_flag(tp, NVRAM) &&
3247 tg3_flag(tp, NVRAM_BUFFERED) &&
3248 tg3_flag(tp, FLASH) &&
3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3250 (tp->nvram_jedecnum == JEDEC_ATMEL))
3251
3252 addr = ((addr / tp->nvram_pagesize) <<
3253 ATMEL_AT45DB0X1B_PAGE_POS) +
3254 (addr % tp->nvram_pagesize);
3255
3256 return addr;
3257 }
3258
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 {
3261 if (tg3_flag(tp, NVRAM) &&
3262 tg3_flag(tp, NVRAM_BUFFERED) &&
3263 tg3_flag(tp, FLASH) &&
3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265 (tp->nvram_jedecnum == JEDEC_ATMEL))
3266
3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3268 tp->nvram_pagesize) +
3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3270
3271 return addr;
3272 }
3273
3274 /* NOTE: Data read in from NVRAM is byteswapped according to
3275 * the byteswapping settings for all other register accesses.
3276 * tg3 devices are BE devices, so on a BE machine, the data
3277 * returned will be exactly as it is seen in NVRAM. On a LE
3278 * machine, the 32-bit value will be byteswapped.
3279 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3281 {
3282 int ret;
3283
3284 if (!tg3_flag(tp, NVRAM))
3285 return tg3_nvram_read_using_eeprom(tp, offset, val);
3286
3287 offset = tg3_nvram_phys_addr(tp, offset);
3288
3289 if (offset > NVRAM_ADDR_MSK)
3290 return -EINVAL;
3291
3292 ret = tg3_nvram_lock(tp);
3293 if (ret)
3294 return ret;
3295
3296 tg3_enable_nvram_access(tp);
3297
3298 tw32(NVRAM_ADDR, offset);
3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3301
3302 if (ret == 0)
3303 *val = tr32(NVRAM_RDDATA);
3304
3305 tg3_disable_nvram_access(tp);
3306
3307 tg3_nvram_unlock(tp);
3308
3309 return ret;
3310 }
3311
3312 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3314 {
3315 u32 v;
3316 int res = tg3_nvram_read(tp, offset, &v);
3317 if (!res)
3318 *val = cpu_to_be32(v);
3319 return res;
3320 }
3321
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3323 u32 offset, u32 len, u8 *buf)
3324 {
3325 int i, j, rc = 0;
3326 u32 val;
3327
3328 for (i = 0; i < len; i += 4) {
3329 u32 addr;
3330 __be32 data;
3331
3332 addr = offset + i;
3333
3334 memcpy(&data, buf + i, 4);
3335
3336 /*
3337 * The SEEPROM interface expects the data to always be opposite
3338 * the native endian format. We accomplish this by reversing
3339 * all the operations that would have been performed on the
3340 * data from a call to tg3_nvram_read_be32().
3341 */
3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343
3344 val = tr32(GRC_EEPROM_ADDR);
3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346
3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348 EEPROM_ADDR_READ);
3349 tw32(GRC_EEPROM_ADDR, val |
3350 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3351 (addr & EEPROM_ADDR_ADDR_MASK) |
3352 EEPROM_ADDR_START |
3353 EEPROM_ADDR_WRITE);
3354
3355 for (j = 0; j < 1000; j++) {
3356 val = tr32(GRC_EEPROM_ADDR);
3357
3358 if (val & EEPROM_ADDR_COMPLETE)
3359 break;
3360 msleep(1);
3361 }
3362 if (!(val & EEPROM_ADDR_COMPLETE)) {
3363 rc = -EBUSY;
3364 break;
3365 }
3366 }
3367
3368 return rc;
3369 }
3370
3371 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3373 u8 *buf)
3374 {
3375 int ret = 0;
3376 u32 pagesize = tp->nvram_pagesize;
3377 u32 pagemask = pagesize - 1;
3378 u32 nvram_cmd;
3379 u8 *tmp;
3380
3381 tmp = kmalloc(pagesize, GFP_KERNEL);
3382 if (tmp == NULL)
3383 return -ENOMEM;
3384
3385 while (len) {
3386 int j;
3387 u32 phy_addr, page_off, size;
3388
3389 phy_addr = offset & ~pagemask;
3390
3391 for (j = 0; j < pagesize; j += 4) {
3392 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3393 (__be32 *) (tmp + j));
3394 if (ret)
3395 break;
3396 }
3397 if (ret)
3398 break;
3399
3400 page_off = offset & pagemask;
3401 size = pagesize;
3402 if (len < size)
3403 size = len;
3404
3405 len -= size;
3406
3407 memcpy(tmp + page_off, buf, size);
3408
3409 offset = offset + (pagesize - page_off);
3410
3411 tg3_enable_nvram_access(tp);
3412
3413 /*
3414 * Before we can erase the flash page, we need
3415 * to issue a special "write enable" command.
3416 */
3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418
3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3420 break;
3421
3422 /* Erase the target page */
3423 tw32(NVRAM_ADDR, phy_addr);
3424
3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427
3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3429 break;
3430
3431 /* Issue another write enable to start the write. */
3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433
3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 break;
3436
3437 for (j = 0; j < pagesize; j += 4) {
3438 __be32 data;
3439
3440 data = *((__be32 *) (tmp + j));
3441
3442 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443
3444 tw32(NVRAM_ADDR, phy_addr + j);
3445
3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3447 NVRAM_CMD_WR;
3448
3449 if (j == 0)
3450 nvram_cmd |= NVRAM_CMD_FIRST;
3451 else if (j == (pagesize - 4))
3452 nvram_cmd |= NVRAM_CMD_LAST;
3453
3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3455 if (ret)
3456 break;
3457 }
3458 if (ret)
3459 break;
3460 }
3461
3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3463 tg3_nvram_exec_cmd(tp, nvram_cmd);
3464
3465 kfree(tmp);
3466
3467 return ret;
3468 }
3469
3470 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3472 u8 *buf)
3473 {
3474 int i, ret = 0;
3475
3476 for (i = 0; i < len; i += 4, offset += 4) {
3477 u32 page_off, phy_addr, nvram_cmd;
3478 __be32 data;
3479
3480 memcpy(&data, buf + i, 4);
3481 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482
3483 page_off = offset % tp->nvram_pagesize;
3484
3485 phy_addr = tg3_nvram_phys_addr(tp, offset);
3486
3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488
3489 if (page_off == 0 || i == 0)
3490 nvram_cmd |= NVRAM_CMD_FIRST;
3491 if (page_off == (tp->nvram_pagesize - 4))
3492 nvram_cmd |= NVRAM_CMD_LAST;
3493
3494 if (i == (len - 4))
3495 nvram_cmd |= NVRAM_CMD_LAST;
3496
3497 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3498 !tg3_flag(tp, FLASH) ||
3499 !tg3_flag(tp, 57765_PLUS))
3500 tw32(NVRAM_ADDR, phy_addr);
3501
3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3503 !tg3_flag(tp, 5755_PLUS) &&
3504 (tp->nvram_jedecnum == JEDEC_ST) &&
3505 (nvram_cmd & NVRAM_CMD_FIRST)) {
3506 u32 cmd;
3507
3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3509 ret = tg3_nvram_exec_cmd(tp, cmd);
3510 if (ret)
3511 break;
3512 }
3513 if (!tg3_flag(tp, FLASH)) {
3514 /* We always do complete word writes to eeprom. */
3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3516 }
3517
3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3519 if (ret)
3520 break;
3521 }
3522 return ret;
3523 }
3524
3525 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3527 {
3528 int ret;
3529
3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3532 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3533 udelay(40);
3534 }
3535
3536 if (!tg3_flag(tp, NVRAM)) {
3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3538 } else {
3539 u32 grc_mode;
3540
3541 ret = tg3_nvram_lock(tp);
3542 if (ret)
3543 return ret;
3544
3545 tg3_enable_nvram_access(tp);
3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3547 tw32(NVRAM_WRITE1, 0x406);
3548
3549 grc_mode = tr32(GRC_MODE);
3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551
3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3553 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3554 buf);
3555 } else {
3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3557 buf);
3558 }
3559
3560 grc_mode = tr32(GRC_MODE);
3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562
3563 tg3_disable_nvram_access(tp);
3564 tg3_nvram_unlock(tp);
3565 }
3566
3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3569 udelay(40);
3570 }
3571
3572 return ret;
3573 }
3574
3575 #define RX_CPU_SCRATCH_BASE 0x30000
3576 #define RX_CPU_SCRATCH_SIZE 0x04000
3577 #define TX_CPU_SCRATCH_BASE 0x34000
3578 #define TX_CPU_SCRATCH_SIZE 0x04000
3579
3580 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583 int i;
3584 const int iters = 10000;
3585
3586 for (i = 0; i < iters; i++) {
3587 tw32(cpu_base + CPU_STATE, 0xffffffff);
3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590 break;
3591 if (pci_channel_offline(tp->pdev))
3592 return -EBUSY;
3593 }
3594
3595 return (i == iters) ? -EBUSY : 0;
3596 }
3597
3598 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3599 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 {
3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602
3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3605 udelay(10);
3606
3607 return rc;
3608 }
3609
3610 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3611 static int tg3_txcpu_pause(struct tg3 *tp)
3612 {
3613 return tg3_pause_cpu(tp, TX_CPU_BASE);
3614 }
3615
3616 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 {
3619 tw32(cpu_base + CPU_STATE, 0xffffffff);
3620 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3621 }
3622
3623 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3624 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 {
3626 tg3_resume_cpu(tp, RX_CPU_BASE);
3627 }
3628
3629 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3631 {
3632 int rc;
3633
3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635
3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3637 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638
3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3640 return 0;
3641 }
3642 if (cpu_base == RX_CPU_BASE) {
3643 rc = tg3_rxcpu_pause(tp);
3644 } else {
3645 /*
3646 * There is only an Rx CPU for the 5750 derivative in the
3647 * BCM4785.
3648 */
3649 if (tg3_flag(tp, IS_SSB_CORE))
3650 return 0;
3651
3652 rc = tg3_txcpu_pause(tp);
3653 }
3654
3655 if (rc) {
3656 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3658 return -ENODEV;
3659 }
3660
3661 /* Clear firmware's nvram arbitration. */
3662 if (tg3_flag(tp, NVRAM))
3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3664 return 0;
3665 }
3666
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3667 static int tg3_fw_data_len(struct tg3 *tp,
3668 const struct tg3_firmware_hdr *fw_hdr)
3669 {
3670 int fw_len;
3671
3672 /* Non fragmented firmware have one firmware header followed by a
3673 * contiguous chunk of data to be written. The length field in that
3674 * header is not the length of data to be written but the complete
3675 * length of the bss. The data length is determined based on
3676 * tp->fw->size minus headers.
3677 *
3678 * Fragmented firmware have a main header followed by multiple
3679 * fragments. Each fragment is identical to non fragmented firmware
3680 * with a firmware header followed by a contiguous chunk of data. In
3681 * the main header, the length field is unused and set to 0xffffffff.
3682 * In each fragment header the length is the entire size of that
3683 * fragment i.e. fragment data + header length. Data length is
3684 * therefore length field in the header minus TG3_FW_HDR_LEN.
3685 */
3686 if (tp->fw_len == 0xffffffff)
3687 fw_len = be32_to_cpu(fw_hdr->len);
3688 else
3689 fw_len = tp->fw->size;
3690
3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3692 }
3693
3694 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3696 u32 cpu_scratch_base, int cpu_scratch_size,
3697 const struct tg3_firmware_hdr *fw_hdr)
3698 {
3699 int err, i;
3700 void (*write_op)(struct tg3 *, u32, u32);
3701 int total_len = tp->fw->size;
3702
3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704 netdev_err(tp->dev,
3705 "%s: Trying to load TX cpu firmware which is 5705\n",
3706 __func__);
3707 return -EINVAL;
3708 }
3709
3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3711 write_op = tg3_write_mem;
3712 else
3713 write_op = tg3_write_indirect_reg32;
3714
3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3716 /* It is possible that bootcode is still loading at this point.
3717 * Get the nvram lock first before halting the cpu.
3718 */
3719 int lock_err = tg3_nvram_lock(tp);
3720 err = tg3_halt_cpu(tp, cpu_base);
3721 if (!lock_err)
3722 tg3_nvram_unlock(tp);
3723 if (err)
3724 goto out;
3725
3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3727 write_op(tp, cpu_scratch_base + i, 0);
3728 tw32(cpu_base + CPU_STATE, 0xffffffff);
3729 tw32(cpu_base + CPU_MODE,
3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731 } else {
3732 /* Subtract additional main header for fragmented firmware and
3733 * advance to the first fragment
3734 */
3735 total_len -= TG3_FW_HDR_LEN;
3736 fw_hdr++;
3737 }
3738
3739 do {
3740 u32 *fw_data = (u32 *)(fw_hdr + 1);
3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3742 write_op(tp, cpu_scratch_base +
3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744 (i * sizeof(u32)),
3745 be32_to_cpu(fw_data[i]));
3746
3747 total_len -= be32_to_cpu(fw_hdr->len);
3748
3749 /* Advance to next fragment */
3750 fw_hdr = (struct tg3_firmware_hdr *)
3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3752 } while (total_len > 0);
3753
3754 err = 0;
3755
3756 out:
3757 return err;
3758 }
3759
3760 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3762 {
3763 int i;
3764 const int iters = 5;
3765
3766 tw32(cpu_base + CPU_STATE, 0xffffffff);
3767 tw32_f(cpu_base + CPU_PC, pc);
3768
3769 for (i = 0; i < iters; i++) {
3770 if (tr32(cpu_base + CPU_PC) == pc)
3771 break;
3772 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3774 tw32_f(cpu_base + CPU_PC, pc);
3775 udelay(1000);
3776 }
3777
3778 return (i == iters) ? -EBUSY : 0;
3779 }
3780
3781 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 {
3784 const struct tg3_firmware_hdr *fw_hdr;
3785 int err;
3786
3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788
3789 /* Firmware blob starts with version numbers, followed by
3790 start address and length. We are setting complete length.
3791 length = end_address_of_bss - start_address_of_text.
3792 Remainder is the blob to be loaded contiguously
3793 from start address. */
3794
3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3797 fw_hdr);
3798 if (err)
3799 return err;
3800
3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3803 fw_hdr);
3804 if (err)
3805 return err;
3806
3807 /* Now startup only the RX cpu. */
3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3809 be32_to_cpu(fw_hdr->base_addr));
3810 if (err) {
3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3812 "should be %08x\n", __func__,
3813 tr32(RX_CPU_BASE + CPU_PC),
3814 be32_to_cpu(fw_hdr->base_addr));
3815 return -ENODEV;
3816 }
3817
3818 tg3_rxcpu_resume(tp);
3819
3820 return 0;
3821 }
3822
tg3_validate_rxcpu_state(struct tg3 * tp)3823 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 {
3825 const int iters = 1000;
3826 int i;
3827 u32 val;
3828
3829 /* Wait for boot code to complete initialization and enter service
3830 * loop. It is then safe to download service patches
3831 */
3832 for (i = 0; i < iters; i++) {
3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3834 break;
3835
3836 udelay(10);
3837 }
3838
3839 if (i == iters) {
3840 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3841 return -EBUSY;
3842 }
3843
3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845 if (val & 0xff) {
3846 netdev_warn(tp->dev,
3847 "Other patches exist. Not downloading EEE patch\n");
3848 return -EEXIST;
3849 }
3850
3851 return 0;
3852 }
3853
3854 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3855 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 {
3857 struct tg3_firmware_hdr *fw_hdr;
3858
3859 if (!tg3_flag(tp, NO_NVRAM))
3860 return;
3861
3862 if (tg3_validate_rxcpu_state(tp))
3863 return;
3864
3865 if (!tp->fw)
3866 return;
3867
3868 /* This firmware blob has a different format than older firmware
3869 * releases as given below. The main difference is we have fragmented
3870 * data to be written to non-contiguous locations.
3871 *
3872 * In the beginning we have a firmware header identical to other
3873 * firmware which consists of version, base addr and length. The length
3874 * here is unused and set to 0xffffffff.
3875 *
3876 * This is followed by a series of firmware fragments which are
3877 * individually identical to previous firmware. i.e. they have the
3878 * firmware header and followed by data for that fragment. The version
3879 * field of the individual fragment header is unused.
3880 */
3881
3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3884 return;
3885
3886 if (tg3_rxcpu_pause(tp))
3887 return;
3888
3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891
3892 tg3_rxcpu_resume(tp);
3893 }
3894
3895 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3896 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 {
3898 const struct tg3_firmware_hdr *fw_hdr;
3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3900 int err;
3901
3902 if (!tg3_flag(tp, FW_TSO))
3903 return 0;
3904
3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906
3907 /* Firmware blob starts with version numbers, followed by
3908 start address and length. We are setting complete length.
3909 length = end_address_of_bss - start_address_of_text.
3910 Remainder is the blob to be loaded contiguously
3911 from start address. */
3912
3913 cpu_scratch_size = tp->fw_len;
3914
3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3916 cpu_base = RX_CPU_BASE;
3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918 } else {
3919 cpu_base = TX_CPU_BASE;
3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3922 }
3923
3924 err = tg3_load_firmware_cpu(tp, cpu_base,
3925 cpu_scratch_base, cpu_scratch_size,
3926 fw_hdr);
3927 if (err)
3928 return err;
3929
3930 /* Now startup the cpu. */
3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3932 be32_to_cpu(fw_hdr->base_addr));
3933 if (err) {
3934 netdev_err(tp->dev,
3935 "%s fails to set CPU PC, is %08x should be %08x\n",
3936 __func__, tr32(cpu_base + CPU_PC),
3937 be32_to_cpu(fw_hdr->base_addr));
3938 return -ENODEV;
3939 }
3940
3941 tg3_resume_cpu(tp, cpu_base);
3942 return 0;
3943 }
3944
3945 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3947 int index)
3948 {
3949 u32 addr_high, addr_low;
3950
3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3953 (mac_addr[4] << 8) | mac_addr[5]);
3954
3955 if (index < 4) {
3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3958 } else {
3959 index -= 4;
3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3962 }
3963 }
3964
3965 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3967 {
3968 u32 addr_high;
3969 int i;
3970
3971 for (i = 0; i < 4; i++) {
3972 if (i == 1 && skip_mac_1)
3973 continue;
3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3975 }
3976
3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3978 tg3_asic_rev(tp) == ASIC_REV_5704) {
3979 for (i = 4; i < 16; i++)
3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3981 }
3982
3983 addr_high = (tp->dev->dev_addr[0] +
3984 tp->dev->dev_addr[1] +
3985 tp->dev->dev_addr[2] +
3986 tp->dev->dev_addr[3] +
3987 tp->dev->dev_addr[4] +
3988 tp->dev->dev_addr[5]) &
3989 TX_BACKOFF_SEED_MASK;
3990 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3991 }
3992
tg3_enable_register_access(struct tg3 * tp)3993 static void tg3_enable_register_access(struct tg3 *tp)
3994 {
3995 /*
3996 * Make sure register accesses (indirect or otherwise) will function
3997 * correctly.
3998 */
3999 pci_write_config_dword(tp->pdev,
4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4001 }
4002
tg3_power_up(struct tg3 * tp)4003 static int tg3_power_up(struct tg3 *tp)
4004 {
4005 int err;
4006
4007 tg3_enable_register_access(tp);
4008
4009 err = pci_set_power_state(tp->pdev, PCI_D0);
4010 if (!err) {
4011 /* Switch out of Vaux if it is a NIC */
4012 tg3_pwrsrc_switch_to_vmain(tp);
4013 } else {
4014 netdev_err(tp->dev, "Transition to D0 failed\n");
4015 }
4016
4017 return err;
4018 }
4019
4020 static int tg3_setup_phy(struct tg3 *, bool);
4021
tg3_power_down_prepare(struct tg3 * tp)4022 static int tg3_power_down_prepare(struct tg3 *tp)
4023 {
4024 u32 misc_host_ctrl;
4025 bool device_should_wake, do_low_power;
4026
4027 tg3_enable_register_access(tp);
4028
4029 /* Restore the CLKREQ setting. */
4030 if (tg3_flag(tp, CLKREQ_BUG))
4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4032 PCI_EXP_LNKCTL_CLKREQ_EN);
4033
4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4035 tw32(TG3PCI_MISC_HOST_CTRL,
4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4037
4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4039 tg3_flag(tp, WOL_ENABLE);
4040
4041 if (tg3_flag(tp, USE_PHYLIB)) {
4042 do_low_power = false;
4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4046 struct phy_device *phydev;
4047 u32 phyid;
4048
4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050
4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052
4053 tp->link_config.speed = phydev->speed;
4054 tp->link_config.duplex = phydev->duplex;
4055 tp->link_config.autoneg = phydev->autoneg;
4056 ethtool_convert_link_mode_to_legacy_u32(
4057 &tp->link_config.advertising,
4058 phydev->advertising);
4059
4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4062 advertising);
4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4064 advertising);
4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4066 advertising);
4067
4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4071 advertising);
4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4073 advertising);
4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4075 advertising);
4076 } else {
4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4078 advertising);
4079 }
4080 }
4081
4082 linkmode_copy(phydev->advertising, advertising);
4083 phy_start_aneg(phydev);
4084
4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086 if (phyid != PHY_ID_BCMAC131) {
4087 phyid &= PHY_BCM_OUI_MASK;
4088 if (phyid == PHY_BCM_OUI_1 ||
4089 phyid == PHY_BCM_OUI_2 ||
4090 phyid == PHY_BCM_OUI_3)
4091 do_low_power = true;
4092 }
4093 }
4094 } else {
4095 do_low_power = true;
4096
4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099
4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101 tg3_setup_phy(tp, false);
4102 }
4103
4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105 u32 val;
4106
4107 val = tr32(GRC_VCPU_EXT_CTRL);
4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4110 int i;
4111 u32 val;
4112
4113 for (i = 0; i < 200; i++) {
4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116 break;
4117 msleep(1);
4118 }
4119 }
4120 if (tg3_flag(tp, WOL_CAP))
4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122 WOL_DRV_STATE_SHUTDOWN |
4123 WOL_DRV_WOL |
4124 WOL_SET_MAGIC_PKT);
4125
4126 if (device_should_wake) {
4127 u32 mac_mode;
4128
4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130 if (do_low_power &&
4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132 tg3_phy_auxctl_write(tp,
4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134 MII_TG3_AUXCTL_PCTL_WOL_EN |
4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4137 udelay(40);
4138 }
4139
4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142 else if (tp->phy_flags &
4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144 if (tp->link_config.active_speed == SPEED_1000)
4145 mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 else
4147 mac_mode = MAC_MODE_PORT_MODE_MII;
4148 } else
4149 mac_mode = MAC_MODE_PORT_MODE_MII;
4150
4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154 SPEED_100 : SPEED_10;
4155 if (tg3_5700_link_polarity(tp, speed))
4156 mac_mode |= MAC_MODE_LINK_POLARITY;
4157 else
4158 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159 }
4160 } else {
4161 mac_mode = MAC_MODE_PORT_MODE_TBI;
4162 }
4163
4164 if (!tg3_flag(tp, 5750_PLUS))
4165 tw32(MAC_LED_CTRL, tp->led_ctrl);
4166
4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171
4172 if (tg3_flag(tp, ENABLE_APE))
4173 mac_mode |= MAC_MODE_APE_TX_EN |
4174 MAC_MODE_APE_RX_EN |
4175 MAC_MODE_TDE_ENABLE;
4176
4177 tw32_f(MAC_MODE, mac_mode);
4178 udelay(100);
4179
4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4181 udelay(10);
4182 }
4183
4184 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187 u32 base_val;
4188
4189 base_val = tp->pci_clock_ctrl;
4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191 CLOCK_CTRL_TXCLK_DISABLE);
4192
4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195 } else if (tg3_flag(tp, 5780_CLASS) ||
4196 tg3_flag(tp, CPMU_PRESENT) ||
4197 tg3_asic_rev(tp) == ASIC_REV_5906) {
4198 /* do nothing */
4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200 u32 newbits1, newbits2;
4201
4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203 tg3_asic_rev(tp) == ASIC_REV_5701) {
4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205 CLOCK_CTRL_TXCLK_DISABLE |
4206 CLOCK_CTRL_ALTCLK);
4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 } else if (tg3_flag(tp, 5705_PLUS)) {
4209 newbits1 = CLOCK_CTRL_625_CORE;
4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211 } else {
4212 newbits1 = CLOCK_CTRL_ALTCLK;
4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214 }
4215
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217 40);
4218
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220 40);
4221
4222 if (!tg3_flag(tp, 5705_PLUS)) {
4223 u32 newbits3;
4224
4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226 tg3_asic_rev(tp) == ASIC_REV_5701) {
4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228 CLOCK_CTRL_TXCLK_DISABLE |
4229 CLOCK_CTRL_44MHZ_CORE);
4230 } else {
4231 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232 }
4233
4234 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235 tp->pci_clock_ctrl | newbits3, 40);
4236 }
4237 }
4238
4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240 tg3_power_down_phy(tp, do_low_power);
4241
4242 tg3_frob_aux_power(tp, true);
4243
4244 /* Workaround for unstable PLL clock */
4245 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248 u32 val = tr32(0x7d00);
4249
4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251 tw32(0x7d00, val);
4252 if (!tg3_flag(tp, ENABLE_ASF)) {
4253 int err;
4254
4255 err = tg3_nvram_lock(tp);
4256 tg3_halt_cpu(tp, RX_CPU_BASE);
4257 if (!err)
4258 tg3_nvram_unlock(tp);
4259 }
4260 }
4261
4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263
4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265
4266 return 0;
4267 }
4268
tg3_power_down(struct tg3 * tp)4269 static void tg3_power_down(struct tg3 *tp)
4270 {
4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272 pci_set_power_state(tp->pdev, PCI_D3hot);
4273 }
4274
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4276 {
4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278 case MII_TG3_AUX_STAT_10HALF:
4279 *speed = SPEED_10;
4280 *duplex = DUPLEX_HALF;
4281 break;
4282
4283 case MII_TG3_AUX_STAT_10FULL:
4284 *speed = SPEED_10;
4285 *duplex = DUPLEX_FULL;
4286 break;
4287
4288 case MII_TG3_AUX_STAT_100HALF:
4289 *speed = SPEED_100;
4290 *duplex = DUPLEX_HALF;
4291 break;
4292
4293 case MII_TG3_AUX_STAT_100FULL:
4294 *speed = SPEED_100;
4295 *duplex = DUPLEX_FULL;
4296 break;
4297
4298 case MII_TG3_AUX_STAT_1000HALF:
4299 *speed = SPEED_1000;
4300 *duplex = DUPLEX_HALF;
4301 break;
4302
4303 case MII_TG3_AUX_STAT_1000FULL:
4304 *speed = SPEED_1000;
4305 *duplex = DUPLEX_FULL;
4306 break;
4307
4308 default:
4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311 SPEED_10;
4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4313 DUPLEX_HALF;
4314 break;
4315 }
4316 *speed = SPEED_UNKNOWN;
4317 *duplex = DUPLEX_UNKNOWN;
4318 break;
4319 }
4320 }
4321
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 {
4324 int err = 0;
4325 u32 val, new_adv;
4326
4327 new_adv = ADVERTISE_CSMA;
4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329 new_adv |= mii_advertise_flowctrl(flowctrl);
4330
4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4332 if (err)
4333 goto done;
4334
4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337
4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341
4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343 if (err)
4344 goto done;
4345 }
4346
4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348 goto done;
4349
4350 tw32(TG3_CPMU_EEE_MODE,
4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352
4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354 if (!err) {
4355 u32 err2;
4356
4357 val = 0;
4358 /* Advertise 100-BaseTX EEE ability */
4359 if (advertise & ADVERTISED_100baseT_Full)
4360 val |= MDIO_AN_EEE_ADV_100TX;
4361 /* Advertise 1000-BaseT EEE ability */
4362 if (advertise & ADVERTISED_1000baseT_Full)
4363 val |= MDIO_AN_EEE_ADV_1000T;
4364
4365 if (!tp->eee.eee_enabled) {
4366 val = 0;
4367 tp->eee.advertised = 0;
4368 } else {
4369 tp->eee.advertised = advertise &
4370 (ADVERTISED_100baseT_Full |
4371 ADVERTISED_1000baseT_Full);
4372 }
4373
4374 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4375 if (err)
4376 val = 0;
4377
4378 switch (tg3_asic_rev(tp)) {
4379 case ASIC_REV_5717:
4380 case ASIC_REV_57765:
4381 case ASIC_REV_57766:
4382 case ASIC_REV_5719:
4383 /* If we advertised any eee advertisements above... */
4384 if (val)
4385 val = MII_TG3_DSP_TAP26_ALNOKO |
4386 MII_TG3_DSP_TAP26_RMRXSTO |
4387 MII_TG3_DSP_TAP26_OPCSINPT;
4388 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4389 fallthrough;
4390 case ASIC_REV_5720:
4391 case ASIC_REV_5762:
4392 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4393 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4394 MII_TG3_DSP_CH34TP2_HIBW01);
4395 }
4396
4397 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4398 if (!err)
4399 err = err2;
4400 }
4401
4402 done:
4403 return err;
4404 }
4405
tg3_phy_copper_begin(struct tg3 * tp)4406 static void tg3_phy_copper_begin(struct tg3 *tp)
4407 {
4408 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4409 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4410 u32 adv, fc;
4411
4412 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4413 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4414 adv = ADVERTISED_10baseT_Half |
4415 ADVERTISED_10baseT_Full;
4416 if (tg3_flag(tp, WOL_SPEED_100MB))
4417 adv |= ADVERTISED_100baseT_Half |
4418 ADVERTISED_100baseT_Full;
4419 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4420 if (!(tp->phy_flags &
4421 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4422 adv |= ADVERTISED_1000baseT_Half;
4423 adv |= ADVERTISED_1000baseT_Full;
4424 }
4425
4426 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4427 } else {
4428 adv = tp->link_config.advertising;
4429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4430 adv &= ~(ADVERTISED_1000baseT_Half |
4431 ADVERTISED_1000baseT_Full);
4432
4433 fc = tp->link_config.flowctrl;
4434 }
4435
4436 tg3_phy_autoneg_cfg(tp, adv, fc);
4437
4438 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4439 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4440 /* Normally during power down we want to autonegotiate
4441 * the lowest possible speed for WOL. However, to avoid
4442 * link flap, we leave it untouched.
4443 */
4444 return;
4445 }
4446
4447 tg3_writephy(tp, MII_BMCR,
4448 BMCR_ANENABLE | BMCR_ANRESTART);
4449 } else {
4450 int i;
4451 u32 bmcr, orig_bmcr;
4452
4453 tp->link_config.active_speed = tp->link_config.speed;
4454 tp->link_config.active_duplex = tp->link_config.duplex;
4455
4456 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4457 /* With autoneg disabled, 5715 only links up when the
4458 * advertisement register has the configured speed
4459 * enabled.
4460 */
4461 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4462 }
4463
4464 bmcr = 0;
4465 switch (tp->link_config.speed) {
4466 default:
4467 case SPEED_10:
4468 break;
4469
4470 case SPEED_100:
4471 bmcr |= BMCR_SPEED100;
4472 break;
4473
4474 case SPEED_1000:
4475 bmcr |= BMCR_SPEED1000;
4476 break;
4477 }
4478
4479 if (tp->link_config.duplex == DUPLEX_FULL)
4480 bmcr |= BMCR_FULLDPLX;
4481
4482 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4483 (bmcr != orig_bmcr)) {
4484 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4485 for (i = 0; i < 1500; i++) {
4486 u32 tmp;
4487
4488 udelay(10);
4489 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4490 tg3_readphy(tp, MII_BMSR, &tmp))
4491 continue;
4492 if (!(tmp & BMSR_LSTATUS)) {
4493 udelay(40);
4494 break;
4495 }
4496 }
4497 tg3_writephy(tp, MII_BMCR, bmcr);
4498 udelay(40);
4499 }
4500 }
4501 }
4502
tg3_phy_pull_config(struct tg3 * tp)4503 static int tg3_phy_pull_config(struct tg3 *tp)
4504 {
4505 int err;
4506 u32 val;
4507
4508 err = tg3_readphy(tp, MII_BMCR, &val);
4509 if (err)
4510 goto done;
4511
4512 if (!(val & BMCR_ANENABLE)) {
4513 tp->link_config.autoneg = AUTONEG_DISABLE;
4514 tp->link_config.advertising = 0;
4515 tg3_flag_clear(tp, PAUSE_AUTONEG);
4516
4517 err = -EIO;
4518
4519 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4520 case 0:
4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522 goto done;
4523
4524 tp->link_config.speed = SPEED_10;
4525 break;
4526 case BMCR_SPEED100:
4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4528 goto done;
4529
4530 tp->link_config.speed = SPEED_100;
4531 break;
4532 case BMCR_SPEED1000:
4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4534 tp->link_config.speed = SPEED_1000;
4535 break;
4536 }
4537 fallthrough;
4538 default:
4539 goto done;
4540 }
4541
4542 if (val & BMCR_FULLDPLX)
4543 tp->link_config.duplex = DUPLEX_FULL;
4544 else
4545 tp->link_config.duplex = DUPLEX_HALF;
4546
4547 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4548
4549 err = 0;
4550 goto done;
4551 }
4552
4553 tp->link_config.autoneg = AUTONEG_ENABLE;
4554 tp->link_config.advertising = ADVERTISED_Autoneg;
4555 tg3_flag_set(tp, PAUSE_AUTONEG);
4556
4557 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4558 u32 adv;
4559
4560 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4561 if (err)
4562 goto done;
4563
4564 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4565 tp->link_config.advertising |= adv | ADVERTISED_TP;
4566
4567 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4568 } else {
4569 tp->link_config.advertising |= ADVERTISED_FIBRE;
4570 }
4571
4572 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4573 u32 adv;
4574
4575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4576 err = tg3_readphy(tp, MII_CTRL1000, &val);
4577 if (err)
4578 goto done;
4579
4580 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4581 } else {
4582 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4583 if (err)
4584 goto done;
4585
4586 adv = tg3_decode_flowctrl_1000X(val);
4587 tp->link_config.flowctrl = adv;
4588
4589 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4590 adv = mii_adv_to_ethtool_adv_x(val);
4591 }
4592
4593 tp->link_config.advertising |= adv;
4594 }
4595
4596 done:
4597 return err;
4598 }
4599
tg3_init_5401phy_dsp(struct tg3 * tp)4600 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4601 {
4602 int err;
4603
4604 /* Turn off tap power management. */
4605 /* Set Extended packet length bit */
4606 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4607
4608 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4609 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4612 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4613
4614 udelay(40);
4615
4616 return err;
4617 }
4618
tg3_phy_eee_config_ok(struct tg3 * tp)4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4620 {
4621 struct ethtool_eee eee;
4622
4623 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4624 return true;
4625
4626 tg3_eee_pull_config(tp, &eee);
4627
4628 if (tp->eee.eee_enabled) {
4629 if (tp->eee.advertised != eee.advertised ||
4630 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4631 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4632 return false;
4633 } else {
4634 /* EEE is disabled but we're advertising */
4635 if (eee.advertised)
4636 return false;
4637 }
4638
4639 return true;
4640 }
4641
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4643 {
4644 u32 advmsk, tgtadv, advertising;
4645
4646 advertising = tp->link_config.advertising;
4647 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4648
4649 advmsk = ADVERTISE_ALL;
4650 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4651 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4652 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4653 }
4654
4655 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4656 return false;
4657
4658 if ((*lcladv & advmsk) != tgtadv)
4659 return false;
4660
4661 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4662 u32 tg3_ctrl;
4663
4664 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4665
4666 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4667 return false;
4668
4669 if (tgtadv &&
4670 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4672 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4674 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4675 } else {
4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4677 }
4678
4679 if (tg3_ctrl != tgtadv)
4680 return false;
4681 }
4682
4683 return true;
4684 }
4685
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4687 {
4688 u32 lpeth = 0;
4689
4690 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4691 u32 val;
4692
4693 if (tg3_readphy(tp, MII_STAT1000, &val))
4694 return false;
4695
4696 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4697 }
4698
4699 if (tg3_readphy(tp, MII_LPA, rmtadv))
4700 return false;
4701
4702 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4703 tp->link_config.rmt_adv = lpeth;
4704
4705 return true;
4706 }
4707
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4709 {
4710 if (curr_link_up != tp->link_up) {
4711 if (curr_link_up) {
4712 netif_carrier_on(tp->dev);
4713 } else {
4714 netif_carrier_off(tp->dev);
4715 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4716 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4717 }
4718
4719 tg3_link_report(tp);
4720 return true;
4721 }
4722
4723 return false;
4724 }
4725
tg3_clear_mac_status(struct tg3 * tp)4726 static void tg3_clear_mac_status(struct tg3 *tp)
4727 {
4728 tw32(MAC_EVENT, 0);
4729
4730 tw32_f(MAC_STATUS,
4731 MAC_STATUS_SYNC_CHANGED |
4732 MAC_STATUS_CFG_CHANGED |
4733 MAC_STATUS_MI_COMPLETION |
4734 MAC_STATUS_LNKSTATE_CHANGED);
4735 udelay(40);
4736 }
4737
tg3_setup_eee(struct tg3 * tp)4738 static void tg3_setup_eee(struct tg3 *tp)
4739 {
4740 u32 val;
4741
4742 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4743 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4745 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4746
4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4748
4749 tw32_f(TG3_CPMU_EEE_CTRL,
4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4751
4752 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4753 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4754 TG3_CPMU_EEEMD_LPI_IN_RX |
4755 TG3_CPMU_EEEMD_EEE_ENABLE;
4756
4757 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4758 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4759
4760 if (tg3_flag(tp, ENABLE_APE))
4761 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4762
4763 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4764
4765 tw32_f(TG3_CPMU_EEE_DBTMR1,
4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4767 (tp->eee.tx_lpi_timer & 0xffff));
4768
4769 tw32_f(TG3_CPMU_EEE_DBTMR2,
4770 TG3_CPMU_DBTMR2_APE_TX_2047US |
4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4772 }
4773
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4775 {
4776 bool current_link_up;
4777 u32 bmsr, val;
4778 u32 lcl_adv, rmt_adv;
4779 u32 current_speed;
4780 u8 current_duplex;
4781 int i, err;
4782
4783 tg3_clear_mac_status(tp);
4784
4785 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4786 tw32_f(MAC_MI_MODE,
4787 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4788 udelay(80);
4789 }
4790
4791 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4792
4793 /* Some third-party PHYs need to be reset on link going
4794 * down.
4795 */
4796 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4797 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4798 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4799 tp->link_up) {
4800 tg3_readphy(tp, MII_BMSR, &bmsr);
4801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4802 !(bmsr & BMSR_LSTATUS))
4803 force_reset = true;
4804 }
4805 if (force_reset)
4806 tg3_phy_reset(tp);
4807
4808 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4809 tg3_readphy(tp, MII_BMSR, &bmsr);
4810 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4811 !tg3_flag(tp, INIT_COMPLETE))
4812 bmsr = 0;
4813
4814 if (!(bmsr & BMSR_LSTATUS)) {
4815 err = tg3_init_5401phy_dsp(tp);
4816 if (err)
4817 return err;
4818
4819 tg3_readphy(tp, MII_BMSR, &bmsr);
4820 for (i = 0; i < 1000; i++) {
4821 udelay(10);
4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823 (bmsr & BMSR_LSTATUS)) {
4824 udelay(40);
4825 break;
4826 }
4827 }
4828
4829 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4830 TG3_PHY_REV_BCM5401_B0 &&
4831 !(bmsr & BMSR_LSTATUS) &&
4832 tp->link_config.active_speed == SPEED_1000) {
4833 err = tg3_phy_reset(tp);
4834 if (!err)
4835 err = tg3_init_5401phy_dsp(tp);
4836 if (err)
4837 return err;
4838 }
4839 }
4840 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4842 /* 5701 {A0,B0} CRC bug workaround */
4843 tg3_writephy(tp, 0x15, 0x0a75);
4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4847 }
4848
4849 /* Clear pending interrupts... */
4850 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852
4853 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4854 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4855 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4856 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4857
4858 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4859 tg3_asic_rev(tp) == ASIC_REV_5701) {
4860 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4861 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4863 else
4864 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4865 }
4866
4867 current_link_up = false;
4868 current_speed = SPEED_UNKNOWN;
4869 current_duplex = DUPLEX_UNKNOWN;
4870 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4871 tp->link_config.rmt_adv = 0;
4872
4873 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4874 err = tg3_phy_auxctl_read(tp,
4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4876 &val);
4877 if (!err && !(val & (1 << 10))) {
4878 tg3_phy_auxctl_write(tp,
4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4880 val | (1 << 10));
4881 goto relink;
4882 }
4883 }
4884
4885 bmsr = 0;
4886 for (i = 0; i < 100; i++) {
4887 tg3_readphy(tp, MII_BMSR, &bmsr);
4888 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4889 (bmsr & BMSR_LSTATUS))
4890 break;
4891 udelay(40);
4892 }
4893
4894 if (bmsr & BMSR_LSTATUS) {
4895 u32 aux_stat, bmcr;
4896
4897 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4898 for (i = 0; i < 2000; i++) {
4899 udelay(10);
4900 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4901 aux_stat)
4902 break;
4903 }
4904
4905 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4906 ¤t_speed,
4907 ¤t_duplex);
4908
4909 bmcr = 0;
4910 for (i = 0; i < 200; i++) {
4911 tg3_readphy(tp, MII_BMCR, &bmcr);
4912 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4913 continue;
4914 if (bmcr && bmcr != 0x7fff)
4915 break;
4916 udelay(10);
4917 }
4918
4919 lcl_adv = 0;
4920 rmt_adv = 0;
4921
4922 tp->link_config.active_speed = current_speed;
4923 tp->link_config.active_duplex = current_duplex;
4924
4925 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4926 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4927
4928 if ((bmcr & BMCR_ANENABLE) &&
4929 eee_config_ok &&
4930 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4931 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4932 current_link_up = true;
4933
4934 /* EEE settings changes take effect only after a phy
4935 * reset. If we have skipped a reset due to Link Flap
4936 * Avoidance being enabled, do it now.
4937 */
4938 if (!eee_config_ok &&
4939 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4940 !force_reset) {
4941 tg3_setup_eee(tp);
4942 tg3_phy_reset(tp);
4943 }
4944 } else {
4945 if (!(bmcr & BMCR_ANENABLE) &&
4946 tp->link_config.speed == current_speed &&
4947 tp->link_config.duplex == current_duplex) {
4948 current_link_up = true;
4949 }
4950 }
4951
4952 if (current_link_up &&
4953 tp->link_config.active_duplex == DUPLEX_FULL) {
4954 u32 reg, bit;
4955
4956 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4957 reg = MII_TG3_FET_GEN_STAT;
4958 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4959 } else {
4960 reg = MII_TG3_EXT_STAT;
4961 bit = MII_TG3_EXT_STAT_MDIX;
4962 }
4963
4964 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4965 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4966
4967 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4968 }
4969 }
4970
4971 relink:
4972 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4973 tg3_phy_copper_begin(tp);
4974
4975 if (tg3_flag(tp, ROBOSWITCH)) {
4976 current_link_up = true;
4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4978 current_speed = SPEED_1000;
4979 current_duplex = DUPLEX_FULL;
4980 tp->link_config.active_speed = current_speed;
4981 tp->link_config.active_duplex = current_duplex;
4982 }
4983
4984 tg3_readphy(tp, MII_BMSR, &bmsr);
4985 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4986 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4987 current_link_up = true;
4988 }
4989
4990 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4991 if (current_link_up) {
4992 if (tp->link_config.active_speed == SPEED_100 ||
4993 tp->link_config.active_speed == SPEED_10)
4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4995 else
4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4997 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999 else
5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001
5002 /* In order for the 5750 core in BCM4785 chip to work properly
5003 * in RGMII mode, the Led Control Register must be set up.
5004 */
5005 if (tg3_flag(tp, RGMII_MODE)) {
5006 u32 led_ctrl = tr32(MAC_LED_CTRL);
5007 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5008
5009 if (tp->link_config.active_speed == SPEED_10)
5010 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5011 else if (tp->link_config.active_speed == SPEED_100)
5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5013 LED_CTRL_100MBPS_ON);
5014 else if (tp->link_config.active_speed == SPEED_1000)
5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016 LED_CTRL_1000MBPS_ON);
5017
5018 tw32(MAC_LED_CTRL, led_ctrl);
5019 udelay(40);
5020 }
5021
5022 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5023 if (tp->link_config.active_duplex == DUPLEX_HALF)
5024 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5025
5026 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5027 if (current_link_up &&
5028 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5029 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5030 else
5031 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5032 }
5033
5034 /* ??? Without this setting Netgear GA302T PHY does not
5035 * ??? send/receive packets...
5036 */
5037 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5038 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5039 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5040 tw32_f(MAC_MI_MODE, tp->mi_mode);
5041 udelay(80);
5042 }
5043
5044 tw32_f(MAC_MODE, tp->mac_mode);
5045 udelay(40);
5046
5047 tg3_phy_eee_adjust(tp, current_link_up);
5048
5049 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5050 /* Polled via timer. */
5051 tw32_f(MAC_EVENT, 0);
5052 } else {
5053 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5054 }
5055 udelay(40);
5056
5057 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5058 current_link_up &&
5059 tp->link_config.active_speed == SPEED_1000 &&
5060 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5061 udelay(120);
5062 tw32_f(MAC_STATUS,
5063 (MAC_STATUS_SYNC_CHANGED |
5064 MAC_STATUS_CFG_CHANGED));
5065 udelay(40);
5066 tg3_write_mem(tp,
5067 NIC_SRAM_FIRMWARE_MBOX,
5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5069 }
5070
5071 /* Prevent send BD corruption. */
5072 if (tg3_flag(tp, CLKREQ_BUG)) {
5073 if (tp->link_config.active_speed == SPEED_100 ||
5074 tp->link_config.active_speed == SPEED_10)
5075 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5076 PCI_EXP_LNKCTL_CLKREQ_EN);
5077 else
5078 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5079 PCI_EXP_LNKCTL_CLKREQ_EN);
5080 }
5081
5082 tg3_test_and_report_link_chg(tp, current_link_up);
5083
5084 return 0;
5085 }
5086
5087 struct tg3_fiber_aneginfo {
5088 int state;
5089 #define ANEG_STATE_UNKNOWN 0
5090 #define ANEG_STATE_AN_ENABLE 1
5091 #define ANEG_STATE_RESTART_INIT 2
5092 #define ANEG_STATE_RESTART 3
5093 #define ANEG_STATE_DISABLE_LINK_OK 4
5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5095 #define ANEG_STATE_ABILITY_DETECT 6
5096 #define ANEG_STATE_ACK_DETECT_INIT 7
5097 #define ANEG_STATE_ACK_DETECT 8
5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5099 #define ANEG_STATE_COMPLETE_ACK 10
5100 #define ANEG_STATE_IDLE_DETECT_INIT 11
5101 #define ANEG_STATE_IDLE_DETECT 12
5102 #define ANEG_STATE_LINK_OK 13
5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5105
5106 u32 flags;
5107 #define MR_AN_ENABLE 0x00000001
5108 #define MR_RESTART_AN 0x00000002
5109 #define MR_AN_COMPLETE 0x00000004
5110 #define MR_PAGE_RX 0x00000008
5111 #define MR_NP_LOADED 0x00000010
5112 #define MR_TOGGLE_TX 0x00000020
5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5120 #define MR_TOGGLE_RX 0x00002000
5121 #define MR_NP_RX 0x00004000
5122
5123 #define MR_LINK_OK 0x80000000
5124
5125 unsigned long link_time, cur_time;
5126
5127 u32 ability_match_cfg;
5128 int ability_match_count;
5129
5130 char ability_match, idle_match, ack_match;
5131
5132 u32 txconfig, rxconfig;
5133 #define ANEG_CFG_NP 0x00000080
5134 #define ANEG_CFG_ACK 0x00000040
5135 #define ANEG_CFG_RF2 0x00000020
5136 #define ANEG_CFG_RF1 0x00000010
5137 #define ANEG_CFG_PS2 0x00000001
5138 #define ANEG_CFG_PS1 0x00008000
5139 #define ANEG_CFG_HD 0x00004000
5140 #define ANEG_CFG_FD 0x00002000
5141 #define ANEG_CFG_INVAL 0x00001f06
5142
5143 };
5144 #define ANEG_OK 0
5145 #define ANEG_DONE 1
5146 #define ANEG_TIMER_ENAB 2
5147 #define ANEG_FAILED -1
5148
5149 #define ANEG_STATE_SETTLE_TIME 10000
5150
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5152 struct tg3_fiber_aneginfo *ap)
5153 {
5154 u16 flowctrl;
5155 unsigned long delta;
5156 u32 rx_cfg_reg;
5157 int ret;
5158
5159 if (ap->state == ANEG_STATE_UNKNOWN) {
5160 ap->rxconfig = 0;
5161 ap->link_time = 0;
5162 ap->cur_time = 0;
5163 ap->ability_match_cfg = 0;
5164 ap->ability_match_count = 0;
5165 ap->ability_match = 0;
5166 ap->idle_match = 0;
5167 ap->ack_match = 0;
5168 }
5169 ap->cur_time++;
5170
5171 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5172 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5173
5174 if (rx_cfg_reg != ap->ability_match_cfg) {
5175 ap->ability_match_cfg = rx_cfg_reg;
5176 ap->ability_match = 0;
5177 ap->ability_match_count = 0;
5178 } else {
5179 if (++ap->ability_match_count > 1) {
5180 ap->ability_match = 1;
5181 ap->ability_match_cfg = rx_cfg_reg;
5182 }
5183 }
5184 if (rx_cfg_reg & ANEG_CFG_ACK)
5185 ap->ack_match = 1;
5186 else
5187 ap->ack_match = 0;
5188
5189 ap->idle_match = 0;
5190 } else {
5191 ap->idle_match = 1;
5192 ap->ability_match_cfg = 0;
5193 ap->ability_match_count = 0;
5194 ap->ability_match = 0;
5195 ap->ack_match = 0;
5196
5197 rx_cfg_reg = 0;
5198 }
5199
5200 ap->rxconfig = rx_cfg_reg;
5201 ret = ANEG_OK;
5202
5203 switch (ap->state) {
5204 case ANEG_STATE_UNKNOWN:
5205 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5206 ap->state = ANEG_STATE_AN_ENABLE;
5207
5208 fallthrough;
5209 case ANEG_STATE_AN_ENABLE:
5210 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5211 if (ap->flags & MR_AN_ENABLE) {
5212 ap->link_time = 0;
5213 ap->cur_time = 0;
5214 ap->ability_match_cfg = 0;
5215 ap->ability_match_count = 0;
5216 ap->ability_match = 0;
5217 ap->idle_match = 0;
5218 ap->ack_match = 0;
5219
5220 ap->state = ANEG_STATE_RESTART_INIT;
5221 } else {
5222 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5223 }
5224 break;
5225
5226 case ANEG_STATE_RESTART_INIT:
5227 ap->link_time = ap->cur_time;
5228 ap->flags &= ~(MR_NP_LOADED);
5229 ap->txconfig = 0;
5230 tw32(MAC_TX_AUTO_NEG, 0);
5231 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5232 tw32_f(MAC_MODE, tp->mac_mode);
5233 udelay(40);
5234
5235 ret = ANEG_TIMER_ENAB;
5236 ap->state = ANEG_STATE_RESTART;
5237
5238 fallthrough;
5239 case ANEG_STATE_RESTART:
5240 delta = ap->cur_time - ap->link_time;
5241 if (delta > ANEG_STATE_SETTLE_TIME)
5242 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5243 else
5244 ret = ANEG_TIMER_ENAB;
5245 break;
5246
5247 case ANEG_STATE_DISABLE_LINK_OK:
5248 ret = ANEG_DONE;
5249 break;
5250
5251 case ANEG_STATE_ABILITY_DETECT_INIT:
5252 ap->flags &= ~(MR_TOGGLE_TX);
5253 ap->txconfig = ANEG_CFG_FD;
5254 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5255 if (flowctrl & ADVERTISE_1000XPAUSE)
5256 ap->txconfig |= ANEG_CFG_PS1;
5257 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5258 ap->txconfig |= ANEG_CFG_PS2;
5259 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5260 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5261 tw32_f(MAC_MODE, tp->mac_mode);
5262 udelay(40);
5263
5264 ap->state = ANEG_STATE_ABILITY_DETECT;
5265 break;
5266
5267 case ANEG_STATE_ABILITY_DETECT:
5268 if (ap->ability_match != 0 && ap->rxconfig != 0)
5269 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5270 break;
5271
5272 case ANEG_STATE_ACK_DETECT_INIT:
5273 ap->txconfig |= ANEG_CFG_ACK;
5274 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5275 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5276 tw32_f(MAC_MODE, tp->mac_mode);
5277 udelay(40);
5278
5279 ap->state = ANEG_STATE_ACK_DETECT;
5280
5281 fallthrough;
5282 case ANEG_STATE_ACK_DETECT:
5283 if (ap->ack_match != 0) {
5284 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5285 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5286 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5287 } else {
5288 ap->state = ANEG_STATE_AN_ENABLE;
5289 }
5290 } else if (ap->ability_match != 0 &&
5291 ap->rxconfig == 0) {
5292 ap->state = ANEG_STATE_AN_ENABLE;
5293 }
5294 break;
5295
5296 case ANEG_STATE_COMPLETE_ACK_INIT:
5297 if (ap->rxconfig & ANEG_CFG_INVAL) {
5298 ret = ANEG_FAILED;
5299 break;
5300 }
5301 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5302 MR_LP_ADV_HALF_DUPLEX |
5303 MR_LP_ADV_SYM_PAUSE |
5304 MR_LP_ADV_ASYM_PAUSE |
5305 MR_LP_ADV_REMOTE_FAULT1 |
5306 MR_LP_ADV_REMOTE_FAULT2 |
5307 MR_LP_ADV_NEXT_PAGE |
5308 MR_TOGGLE_RX |
5309 MR_NP_RX);
5310 if (ap->rxconfig & ANEG_CFG_FD)
5311 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5312 if (ap->rxconfig & ANEG_CFG_HD)
5313 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5314 if (ap->rxconfig & ANEG_CFG_PS1)
5315 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5316 if (ap->rxconfig & ANEG_CFG_PS2)
5317 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5318 if (ap->rxconfig & ANEG_CFG_RF1)
5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5320 if (ap->rxconfig & ANEG_CFG_RF2)
5321 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5322 if (ap->rxconfig & ANEG_CFG_NP)
5323 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5324
5325 ap->link_time = ap->cur_time;
5326
5327 ap->flags ^= (MR_TOGGLE_TX);
5328 if (ap->rxconfig & 0x0008)
5329 ap->flags |= MR_TOGGLE_RX;
5330 if (ap->rxconfig & ANEG_CFG_NP)
5331 ap->flags |= MR_NP_RX;
5332 ap->flags |= MR_PAGE_RX;
5333
5334 ap->state = ANEG_STATE_COMPLETE_ACK;
5335 ret = ANEG_TIMER_ENAB;
5336 break;
5337
5338 case ANEG_STATE_COMPLETE_ACK:
5339 if (ap->ability_match != 0 &&
5340 ap->rxconfig == 0) {
5341 ap->state = ANEG_STATE_AN_ENABLE;
5342 break;
5343 }
5344 delta = ap->cur_time - ap->link_time;
5345 if (delta > ANEG_STATE_SETTLE_TIME) {
5346 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5348 } else {
5349 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5350 !(ap->flags & MR_NP_RX)) {
5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5352 } else {
5353 ret = ANEG_FAILED;
5354 }
5355 }
5356 }
5357 break;
5358
5359 case ANEG_STATE_IDLE_DETECT_INIT:
5360 ap->link_time = ap->cur_time;
5361 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5362 tw32_f(MAC_MODE, tp->mac_mode);
5363 udelay(40);
5364
5365 ap->state = ANEG_STATE_IDLE_DETECT;
5366 ret = ANEG_TIMER_ENAB;
5367 break;
5368
5369 case ANEG_STATE_IDLE_DETECT:
5370 if (ap->ability_match != 0 &&
5371 ap->rxconfig == 0) {
5372 ap->state = ANEG_STATE_AN_ENABLE;
5373 break;
5374 }
5375 delta = ap->cur_time - ap->link_time;
5376 if (delta > ANEG_STATE_SETTLE_TIME) {
5377 /* XXX another gem from the Broadcom driver :( */
5378 ap->state = ANEG_STATE_LINK_OK;
5379 }
5380 break;
5381
5382 case ANEG_STATE_LINK_OK:
5383 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5384 ret = ANEG_DONE;
5385 break;
5386
5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5388 /* ??? unimplemented */
5389 break;
5390
5391 case ANEG_STATE_NEXT_PAGE_WAIT:
5392 /* ??? unimplemented */
5393 break;
5394
5395 default:
5396 ret = ANEG_FAILED;
5397 break;
5398 }
5399
5400 return ret;
5401 }
5402
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5404 {
5405 int res = 0;
5406 struct tg3_fiber_aneginfo aninfo;
5407 int status = ANEG_FAILED;
5408 unsigned int tick;
5409 u32 tmp;
5410
5411 tw32_f(MAC_TX_AUTO_NEG, 0);
5412
5413 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5414 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5415 udelay(40);
5416
5417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5418 udelay(40);
5419
5420 memset(&aninfo, 0, sizeof(aninfo));
5421 aninfo.flags |= MR_AN_ENABLE;
5422 aninfo.state = ANEG_STATE_UNKNOWN;
5423 aninfo.cur_time = 0;
5424 tick = 0;
5425 while (++tick < 195000) {
5426 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5427 if (status == ANEG_DONE || status == ANEG_FAILED)
5428 break;
5429
5430 udelay(1);
5431 }
5432
5433 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5434 tw32_f(MAC_MODE, tp->mac_mode);
5435 udelay(40);
5436
5437 *txflags = aninfo.txconfig;
5438 *rxflags = aninfo.flags;
5439
5440 if (status == ANEG_DONE &&
5441 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5442 MR_LP_ADV_FULL_DUPLEX)))
5443 res = 1;
5444
5445 return res;
5446 }
5447
tg3_init_bcm8002(struct tg3 * tp)5448 static void tg3_init_bcm8002(struct tg3 *tp)
5449 {
5450 u32 mac_status = tr32(MAC_STATUS);
5451 int i;
5452
5453 /* Reset when initting first time or we have a link. */
5454 if (tg3_flag(tp, INIT_COMPLETE) &&
5455 !(mac_status & MAC_STATUS_PCS_SYNCED))
5456 return;
5457
5458 /* Set PLL lock range. */
5459 tg3_writephy(tp, 0x16, 0x8007);
5460
5461 /* SW reset */
5462 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5463
5464 /* Wait for reset to complete. */
5465 /* XXX schedule_timeout() ... */
5466 for (i = 0; i < 500; i++)
5467 udelay(10);
5468
5469 /* Config mode; select PMA/Ch 1 regs. */
5470 tg3_writephy(tp, 0x10, 0x8411);
5471
5472 /* Enable auto-lock and comdet, select txclk for tx. */
5473 tg3_writephy(tp, 0x11, 0x0a10);
5474
5475 tg3_writephy(tp, 0x18, 0x00a0);
5476 tg3_writephy(tp, 0x16, 0x41ff);
5477
5478 /* Assert and deassert POR. */
5479 tg3_writephy(tp, 0x13, 0x0400);
5480 udelay(40);
5481 tg3_writephy(tp, 0x13, 0x0000);
5482
5483 tg3_writephy(tp, 0x11, 0x0a50);
5484 udelay(40);
5485 tg3_writephy(tp, 0x11, 0x0a10);
5486
5487 /* Wait for signal to stabilize */
5488 /* XXX schedule_timeout() ... */
5489 for (i = 0; i < 15000; i++)
5490 udelay(10);
5491
5492 /* Deselect the channel register so we can read the PHYID
5493 * later.
5494 */
5495 tg3_writephy(tp, 0x10, 0x8011);
5496 }
5497
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5499 {
5500 u16 flowctrl;
5501 bool current_link_up;
5502 u32 sg_dig_ctrl, sg_dig_status;
5503 u32 serdes_cfg, expected_sg_dig_ctrl;
5504 int workaround, port_a;
5505
5506 serdes_cfg = 0;
5507 workaround = 0;
5508 port_a = 1;
5509 current_link_up = false;
5510
5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5513 workaround = 1;
5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5515 port_a = 0;
5516
5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5518 /* preserve bits 20-23 for voltage regulator */
5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5520 }
5521
5522 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5523
5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5526 if (workaround) {
5527 u32 val = serdes_cfg;
5528
5529 if (port_a)
5530 val |= 0xc010000;
5531 else
5532 val |= 0x4010000;
5533 tw32_f(MAC_SERDES_CFG, val);
5534 }
5535
5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5537 }
5538 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5539 tg3_setup_flow_control(tp, 0, 0);
5540 current_link_up = true;
5541 }
5542 goto out;
5543 }
5544
5545 /* Want auto-negotiation. */
5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5547
5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5549 if (flowctrl & ADVERTISE_1000XPAUSE)
5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5553
5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5556 tp->serdes_counter &&
5557 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5558 MAC_STATUS_RCVD_CFG)) ==
5559 MAC_STATUS_PCS_SYNCED)) {
5560 tp->serdes_counter--;
5561 current_link_up = true;
5562 goto out;
5563 }
5564 restart_autoneg:
5565 if (workaround)
5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5568 udelay(5);
5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5570
5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5574 MAC_STATUS_SIGNAL_DET)) {
5575 sg_dig_status = tr32(SG_DIG_STATUS);
5576 mac_status = tr32(MAC_STATUS);
5577
5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5579 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5580 u32 local_adv = 0, remote_adv = 0;
5581
5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5583 local_adv |= ADVERTISE_1000XPAUSE;
5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5585 local_adv |= ADVERTISE_1000XPSE_ASYM;
5586
5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5588 remote_adv |= LPA_1000XPAUSE;
5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5590 remote_adv |= LPA_1000XPAUSE_ASYM;
5591
5592 tp->link_config.rmt_adv =
5593 mii_adv_to_ethtool_adv_x(remote_adv);
5594
5595 tg3_setup_flow_control(tp, local_adv, remote_adv);
5596 current_link_up = true;
5597 tp->serdes_counter = 0;
5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5600 if (tp->serdes_counter)
5601 tp->serdes_counter--;
5602 else {
5603 if (workaround) {
5604 u32 val = serdes_cfg;
5605
5606 if (port_a)
5607 val |= 0xc010000;
5608 else
5609 val |= 0x4010000;
5610
5611 tw32_f(MAC_SERDES_CFG, val);
5612 }
5613
5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5615 udelay(40);
5616
5617 /* Link parallel detection - link is up */
5618 /* only if we have PCS_SYNC and not */
5619 /* receiving config code words */
5620 mac_status = tr32(MAC_STATUS);
5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5622 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5623 tg3_setup_flow_control(tp, 0, 0);
5624 current_link_up = true;
5625 tp->phy_flags |=
5626 TG3_PHYFLG_PARALLEL_DETECT;
5627 tp->serdes_counter =
5628 SERDES_PARALLEL_DET_TIMEOUT;
5629 } else
5630 goto restart_autoneg;
5631 }
5632 }
5633 } else {
5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5636 }
5637
5638 out:
5639 return current_link_up;
5640 }
5641
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5643 {
5644 bool current_link_up = false;
5645
5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5647 goto out;
5648
5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5650 u32 txflags, rxflags;
5651 int i;
5652
5653 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5654 u32 local_adv = 0, remote_adv = 0;
5655
5656 if (txflags & ANEG_CFG_PS1)
5657 local_adv |= ADVERTISE_1000XPAUSE;
5658 if (txflags & ANEG_CFG_PS2)
5659 local_adv |= ADVERTISE_1000XPSE_ASYM;
5660
5661 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5662 remote_adv |= LPA_1000XPAUSE;
5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5664 remote_adv |= LPA_1000XPAUSE_ASYM;
5665
5666 tp->link_config.rmt_adv =
5667 mii_adv_to_ethtool_adv_x(remote_adv);
5668
5669 tg3_setup_flow_control(tp, local_adv, remote_adv);
5670
5671 current_link_up = true;
5672 }
5673 for (i = 0; i < 30; i++) {
5674 udelay(20);
5675 tw32_f(MAC_STATUS,
5676 (MAC_STATUS_SYNC_CHANGED |
5677 MAC_STATUS_CFG_CHANGED));
5678 udelay(40);
5679 if ((tr32(MAC_STATUS) &
5680 (MAC_STATUS_SYNC_CHANGED |
5681 MAC_STATUS_CFG_CHANGED)) == 0)
5682 break;
5683 }
5684
5685 mac_status = tr32(MAC_STATUS);
5686 if (!current_link_up &&
5687 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5688 !(mac_status & MAC_STATUS_RCVD_CFG))
5689 current_link_up = true;
5690 } else {
5691 tg3_setup_flow_control(tp, 0, 0);
5692
5693 /* Forcing 1000FD link up. */
5694 current_link_up = true;
5695
5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5697 udelay(40);
5698
5699 tw32_f(MAC_MODE, tp->mac_mode);
5700 udelay(40);
5701 }
5702
5703 out:
5704 return current_link_up;
5705 }
5706
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5708 {
5709 u32 orig_pause_cfg;
5710 u32 orig_active_speed;
5711 u8 orig_active_duplex;
5712 u32 mac_status;
5713 bool current_link_up;
5714 int i;
5715
5716 orig_pause_cfg = tp->link_config.active_flowctrl;
5717 orig_active_speed = tp->link_config.active_speed;
5718 orig_active_duplex = tp->link_config.active_duplex;
5719
5720 if (!tg3_flag(tp, HW_AUTONEG) &&
5721 tp->link_up &&
5722 tg3_flag(tp, INIT_COMPLETE)) {
5723 mac_status = tr32(MAC_STATUS);
5724 mac_status &= (MAC_STATUS_PCS_SYNCED |
5725 MAC_STATUS_SIGNAL_DET |
5726 MAC_STATUS_CFG_CHANGED |
5727 MAC_STATUS_RCVD_CFG);
5728 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5729 MAC_STATUS_SIGNAL_DET)) {
5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5731 MAC_STATUS_CFG_CHANGED));
5732 return 0;
5733 }
5734 }
5735
5736 tw32_f(MAC_TX_AUTO_NEG, 0);
5737
5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5740 tw32_f(MAC_MODE, tp->mac_mode);
5741 udelay(40);
5742
5743 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5744 tg3_init_bcm8002(tp);
5745
5746 /* Enable link change event even when serdes polling. */
5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5748 udelay(40);
5749
5750 tp->link_config.rmt_adv = 0;
5751 mac_status = tr32(MAC_STATUS);
5752
5753 if (tg3_flag(tp, HW_AUTONEG))
5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5755 else
5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5757
5758 tp->napi[0].hw_status->status =
5759 (SD_STATUS_UPDATED |
5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5761
5762 for (i = 0; i < 100; i++) {
5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5764 MAC_STATUS_CFG_CHANGED));
5765 udelay(5);
5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5767 MAC_STATUS_CFG_CHANGED |
5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5769 break;
5770 }
5771
5772 mac_status = tr32(MAC_STATUS);
5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5774 current_link_up = false;
5775 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5776 tp->serdes_counter == 0) {
5777 tw32_f(MAC_MODE, (tp->mac_mode |
5778 MAC_MODE_SEND_CONFIGS));
5779 udelay(1);
5780 tw32_f(MAC_MODE, tp->mac_mode);
5781 }
5782 }
5783
5784 if (current_link_up) {
5785 tp->link_config.active_speed = SPEED_1000;
5786 tp->link_config.active_duplex = DUPLEX_FULL;
5787 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788 LED_CTRL_LNKLED_OVERRIDE |
5789 LED_CTRL_1000MBPS_ON));
5790 } else {
5791 tp->link_config.active_speed = SPEED_UNKNOWN;
5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5793 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794 LED_CTRL_LNKLED_OVERRIDE |
5795 LED_CTRL_TRAFFIC_OVERRIDE));
5796 }
5797
5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5799 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5800 if (orig_pause_cfg != now_pause_cfg ||
5801 orig_active_speed != tp->link_config.active_speed ||
5802 orig_active_duplex != tp->link_config.active_duplex)
5803 tg3_link_report(tp);
5804 }
5805
5806 return 0;
5807 }
5808
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5810 {
5811 int err = 0;
5812 u32 bmsr, bmcr;
5813 u32 current_speed = SPEED_UNKNOWN;
5814 u8 current_duplex = DUPLEX_UNKNOWN;
5815 bool current_link_up = false;
5816 u32 local_adv, remote_adv, sgsr;
5817
5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5819 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5821 (sgsr & SERDES_TG3_SGMII_MODE)) {
5822
5823 if (force_reset)
5824 tg3_phy_reset(tp);
5825
5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5827
5828 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830 } else {
5831 current_link_up = true;
5832 if (sgsr & SERDES_TG3_SPEED_1000) {
5833 current_speed = SPEED_1000;
5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835 } else if (sgsr & SERDES_TG3_SPEED_100) {
5836 current_speed = SPEED_100;
5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838 } else {
5839 current_speed = SPEED_10;
5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5841 }
5842
5843 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5844 current_duplex = DUPLEX_FULL;
5845 else
5846 current_duplex = DUPLEX_HALF;
5847 }
5848
5849 tw32_f(MAC_MODE, tp->mac_mode);
5850 udelay(40);
5851
5852 tg3_clear_mac_status(tp);
5853
5854 goto fiber_setup_done;
5855 }
5856
5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5858 tw32_f(MAC_MODE, tp->mac_mode);
5859 udelay(40);
5860
5861 tg3_clear_mac_status(tp);
5862
5863 if (force_reset)
5864 tg3_phy_reset(tp);
5865
5866 tp->link_config.rmt_adv = 0;
5867
5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5872 bmsr |= BMSR_LSTATUS;
5873 else
5874 bmsr &= ~BMSR_LSTATUS;
5875 }
5876
5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5878
5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5881 /* do nothing, just check for link up at the end */
5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5883 u32 adv, newadv;
5884
5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5887 ADVERTISE_1000XPAUSE |
5888 ADVERTISE_1000XPSE_ASYM |
5889 ADVERTISE_SLCT);
5890
5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5893
5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5895 tg3_writephy(tp, MII_ADVERTISE, newadv);
5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5897 tg3_writephy(tp, MII_BMCR, bmcr);
5898
5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5902
5903 return err;
5904 }
5905 } else {
5906 u32 new_bmcr;
5907
5908 bmcr &= ~BMCR_SPEED1000;
5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5910
5911 if (tp->link_config.duplex == DUPLEX_FULL)
5912 new_bmcr |= BMCR_FULLDPLX;
5913
5914 if (new_bmcr != bmcr) {
5915 /* BMCR_SPEED1000 is a reserved bit that needs
5916 * to be set on write.
5917 */
5918 new_bmcr |= BMCR_SPEED1000;
5919
5920 /* Force a linkdown */
5921 if (tp->link_up) {
5922 u32 adv;
5923
5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5925 adv &= ~(ADVERTISE_1000XFULL |
5926 ADVERTISE_1000XHALF |
5927 ADVERTISE_SLCT);
5928 tg3_writephy(tp, MII_ADVERTISE, adv);
5929 tg3_writephy(tp, MII_BMCR, bmcr |
5930 BMCR_ANRESTART |
5931 BMCR_ANENABLE);
5932 udelay(10);
5933 tg3_carrier_off(tp);
5934 }
5935 tg3_writephy(tp, MII_BMCR, new_bmcr);
5936 bmcr = new_bmcr;
5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5941 bmsr |= BMSR_LSTATUS;
5942 else
5943 bmsr &= ~BMSR_LSTATUS;
5944 }
5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5946 }
5947 }
5948
5949 if (bmsr & BMSR_LSTATUS) {
5950 current_speed = SPEED_1000;
5951 current_link_up = true;
5952 if (bmcr & BMCR_FULLDPLX)
5953 current_duplex = DUPLEX_FULL;
5954 else
5955 current_duplex = DUPLEX_HALF;
5956
5957 local_adv = 0;
5958 remote_adv = 0;
5959
5960 if (bmcr & BMCR_ANENABLE) {
5961 u32 common;
5962
5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5965 common = local_adv & remote_adv;
5966 if (common & (ADVERTISE_1000XHALF |
5967 ADVERTISE_1000XFULL)) {
5968 if (common & ADVERTISE_1000XFULL)
5969 current_duplex = DUPLEX_FULL;
5970 else
5971 current_duplex = DUPLEX_HALF;
5972
5973 tp->link_config.rmt_adv =
5974 mii_adv_to_ethtool_adv_x(remote_adv);
5975 } else if (!tg3_flag(tp, 5780_CLASS)) {
5976 /* Link is up via parallel detect */
5977 } else {
5978 current_link_up = false;
5979 }
5980 }
5981 }
5982
5983 fiber_setup_done:
5984 if (current_link_up && current_duplex == DUPLEX_FULL)
5985 tg3_setup_flow_control(tp, local_adv, remote_adv);
5986
5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5988 if (tp->link_config.active_duplex == DUPLEX_HALF)
5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5990
5991 tw32_f(MAC_MODE, tp->mac_mode);
5992 udelay(40);
5993
5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5995
5996 tp->link_config.active_speed = current_speed;
5997 tp->link_config.active_duplex = current_duplex;
5998
5999 tg3_test_and_report_link_chg(tp, current_link_up);
6000 return err;
6001 }
6002
tg3_serdes_parallel_detect(struct tg3 * tp)6003 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6004 {
6005 if (tp->serdes_counter) {
6006 /* Give autoneg time to complete. */
6007 tp->serdes_counter--;
6008 return;
6009 }
6010
6011 if (!tp->link_up &&
6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6013 u32 bmcr;
6014
6015 tg3_readphy(tp, MII_BMCR, &bmcr);
6016 if (bmcr & BMCR_ANENABLE) {
6017 u32 phy1, phy2;
6018
6019 /* Select shadow register 0x1f */
6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6022
6023 /* Select expansion interrupt status register */
6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6025 MII_TG3_DSP_EXP1_INT_STAT);
6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028
6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6030 /* We have signal detect and not receiving
6031 * config code words, link is up by parallel
6032 * detection.
6033 */
6034
6035 bmcr &= ~BMCR_ANENABLE;
6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037 tg3_writephy(tp, MII_BMCR, bmcr);
6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6039 }
6040 }
6041 } else if (tp->link_up &&
6042 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6044 u32 phy2;
6045
6046 /* Select expansion interrupt status register */
6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6048 MII_TG3_DSP_EXP1_INT_STAT);
6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6050 if (phy2 & 0x20) {
6051 u32 bmcr;
6052
6053 /* Config code words received, turn on autoneg. */
6054 tg3_readphy(tp, MII_BMCR, &bmcr);
6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6056
6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6058
6059 }
6060 }
6061 }
6062
tg3_setup_phy(struct tg3 * tp,bool force_reset)6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6064 {
6065 u32 val;
6066 int err;
6067
6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6069 err = tg3_setup_fiber_phy(tp, force_reset);
6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6071 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6072 else
6073 err = tg3_setup_copper_phy(tp, force_reset);
6074
6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6076 u32 scale;
6077
6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6080 scale = 65;
6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6082 scale = 6;
6083 else
6084 scale = 12;
6085
6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6088 tw32(GRC_MISC_CFG, val);
6089 }
6090
6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6092 (6 << TX_LENGTHS_IPG_SHIFT);
6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6094 tg3_asic_rev(tp) == ASIC_REV_5762)
6095 val |= tr32(MAC_TX_LENGTHS) &
6096 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6097 TX_LENGTHS_CNT_DWN_VAL_MSK);
6098
6099 if (tp->link_config.active_speed == SPEED_1000 &&
6100 tp->link_config.active_duplex == DUPLEX_HALF)
6101 tw32(MAC_TX_LENGTHS, val |
6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6103 else
6104 tw32(MAC_TX_LENGTHS, val |
6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6106
6107 if (!tg3_flag(tp, 5705_PLUS)) {
6108 if (tp->link_up) {
6109 tw32(HOSTCC_STAT_COAL_TICKS,
6110 tp->coal.stats_block_coalesce_usecs);
6111 } else {
6112 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6113 }
6114 }
6115
6116 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6117 val = tr32(PCIE_PWR_MGMT_THRESH);
6118 if (!tp->link_up)
6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6120 tp->pwrmgmt_thresh;
6121 else
6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6123 tw32(PCIE_PWR_MGMT_THRESH, val);
6124 }
6125
6126 return err;
6127 }
6128
6129 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6131 {
6132 u64 stamp;
6133
6134 ptp_read_system_prets(sts);
6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6136 ptp_read_system_postts(sts);
6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6138
6139 return stamp;
6140 }
6141
6142 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 {
6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146
6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6151 }
6152
6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6154 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 {
6157 struct tg3 *tp = netdev_priv(dev);
6158
6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6160 SOF_TIMESTAMPING_RX_SOFTWARE |
6161 SOF_TIMESTAMPING_SOFTWARE;
6162
6163 if (tg3_flag(tp, PTP_CAPABLE)) {
6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6165 SOF_TIMESTAMPING_RX_HARDWARE |
6166 SOF_TIMESTAMPING_RAW_HARDWARE;
6167 }
6168
6169 if (tp->ptp_clock)
6170 info->phc_index = ptp_clock_index(tp->ptp_clock);
6171 else
6172 info->phc_index = -1;
6173
6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175
6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6180 return 0;
6181 }
6182
tg3_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)6183 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6184 {
6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186 u64 correction;
6187 bool neg_adj;
6188
6189 /* Frequency adjustment is performed using hardware with a 24 bit
6190 * accumulator and a programmable correction value. On each clk, the
6191 * correction value gets added to the accumulator and when it
6192 * overflows, the time counter is incremented/decremented.
6193 */
6194 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6195
6196 tg3_full_lock(tp, 0);
6197
6198 if (correction)
6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6200 TG3_EAV_REF_CLK_CORRECT_EN |
6201 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6202 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6203 else
6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6205
6206 tg3_full_unlock(tp);
6207
6208 return 0;
6209 }
6210
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6212 {
6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6214
6215 tg3_full_lock(tp, 0);
6216 tp->ptp_adjust += delta;
6217 tg3_full_unlock(tp);
6218
6219 return 0;
6220 }
6221
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6222 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6223 struct ptp_system_timestamp *sts)
6224 {
6225 u64 ns;
6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6227
6228 tg3_full_lock(tp, 0);
6229 ns = tg3_refclk_read(tp, sts);
6230 ns += tp->ptp_adjust;
6231 tg3_full_unlock(tp);
6232
6233 *ts = ns_to_timespec64(ns);
6234
6235 return 0;
6236 }
6237
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6239 const struct timespec64 *ts)
6240 {
6241 u64 ns;
6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6243
6244 ns = timespec64_to_ns(ts);
6245
6246 tg3_full_lock(tp, 0);
6247 tg3_refclk_write(tp, ns);
6248 tp->ptp_adjust = 0;
6249 tg3_full_unlock(tp);
6250
6251 return 0;
6252 }
6253
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6255 struct ptp_clock_request *rq, int on)
6256 {
6257 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6258 u32 clock_ctl;
6259 int rval = 0;
6260
6261 switch (rq->type) {
6262 case PTP_CLK_REQ_PEROUT:
6263 /* Reject requests with unsupported flags */
6264 if (rq->perout.flags)
6265 return -EOPNOTSUPP;
6266
6267 if (rq->perout.index != 0)
6268 return -EINVAL;
6269
6270 tg3_full_lock(tp, 0);
6271 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6272 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6273
6274 if (on) {
6275 u64 nsec;
6276
6277 nsec = rq->perout.start.sec * 1000000000ULL +
6278 rq->perout.start.nsec;
6279
6280 if (rq->perout.period.sec || rq->perout.period.nsec) {
6281 netdev_warn(tp->dev,
6282 "Device supports only a one-shot timesync output, period must be 0\n");
6283 rval = -EINVAL;
6284 goto err_out;
6285 }
6286
6287 if (nsec & (1ULL << 63)) {
6288 netdev_warn(tp->dev,
6289 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6290 rval = -EINVAL;
6291 goto err_out;
6292 }
6293
6294 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6295 tw32(TG3_EAV_WATCHDOG0_MSB,
6296 TG3_EAV_WATCHDOG0_EN |
6297 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6298
6299 tw32(TG3_EAV_REF_CLCK_CTL,
6300 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6301 } else {
6302 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6303 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6304 }
6305
6306 err_out:
6307 tg3_full_unlock(tp);
6308 return rval;
6309
6310 default:
6311 break;
6312 }
6313
6314 return -EOPNOTSUPP;
6315 }
6316
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6317 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6318 struct skb_shared_hwtstamps *timestamp)
6319 {
6320 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6321 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6322 tp->ptp_adjust);
6323 }
6324
tg3_read_tx_tstamp(struct tg3 * tp,u64 * hwclock)6325 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6326 {
6327 *hwclock = tr32(TG3_TX_TSTAMP_LSB);
6328 *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6329 }
6330
tg3_ptp_ts_aux_work(struct ptp_clock_info * ptp)6331 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6332 {
6333 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6334 struct skb_shared_hwtstamps timestamp;
6335 u64 hwclock;
6336
6337 if (tp->ptp_txts_retrycnt > 2)
6338 goto done;
6339
6340 tg3_read_tx_tstamp(tp, &hwclock);
6341
6342 if (hwclock != tp->pre_tx_ts) {
6343 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6344 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp);
6345 goto done;
6346 }
6347 tp->ptp_txts_retrycnt++;
6348 return HZ / 10;
6349 done:
6350 dev_consume_skb_any(tp->tx_tstamp_skb);
6351 tp->tx_tstamp_skb = NULL;
6352 tp->ptp_txts_retrycnt = 0;
6353 tp->pre_tx_ts = 0;
6354 return -1;
6355 }
6356
6357 static const struct ptp_clock_info tg3_ptp_caps = {
6358 .owner = THIS_MODULE,
6359 .name = "tg3 clock",
6360 .max_adj = 250000000,
6361 .n_alarm = 0,
6362 .n_ext_ts = 0,
6363 .n_per_out = 1,
6364 .n_pins = 0,
6365 .pps = 0,
6366 .adjfine = tg3_ptp_adjfine,
6367 .adjtime = tg3_ptp_adjtime,
6368 .do_aux_work = tg3_ptp_ts_aux_work,
6369 .gettimex64 = tg3_ptp_gettimex,
6370 .settime64 = tg3_ptp_settime,
6371 .enable = tg3_ptp_enable,
6372 };
6373
6374 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6375 static void tg3_ptp_init(struct tg3 *tp)
6376 {
6377 if (!tg3_flag(tp, PTP_CAPABLE))
6378 return;
6379
6380 /* Initialize the hardware clock to the system time. */
6381 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6382 tp->ptp_adjust = 0;
6383 tp->ptp_info = tg3_ptp_caps;
6384 }
6385
6386 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6387 static void tg3_ptp_resume(struct tg3 *tp)
6388 {
6389 if (!tg3_flag(tp, PTP_CAPABLE))
6390 return;
6391
6392 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6393 tp->ptp_adjust = 0;
6394 }
6395
tg3_ptp_fini(struct tg3 * tp)6396 static void tg3_ptp_fini(struct tg3 *tp)
6397 {
6398 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6399 return;
6400
6401 ptp_clock_unregister(tp->ptp_clock);
6402 tp->ptp_clock = NULL;
6403 tp->ptp_adjust = 0;
6404 dev_consume_skb_any(tp->tx_tstamp_skb);
6405 tp->tx_tstamp_skb = NULL;
6406 }
6407
tg3_irq_sync(struct tg3 * tp)6408 static inline int tg3_irq_sync(struct tg3 *tp)
6409 {
6410 return tp->irq_sync;
6411 }
6412
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6413 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6414 {
6415 int i;
6416
6417 dst = (u32 *)((u8 *)dst + off);
6418 for (i = 0; i < len; i += sizeof(u32))
6419 *dst++ = tr32(off + i);
6420 }
6421
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6422 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6423 {
6424 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6425 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6426 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6427 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6428 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6429 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6430 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6431 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6432 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6433 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6434 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6435 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6436 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6437 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6438 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6439 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6440 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6441 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6442 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6443
6444 if (tg3_flag(tp, SUPPORT_MSIX))
6445 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6446
6447 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6448 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6449 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6450 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6451 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6452 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6453 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6454 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6455
6456 if (!tg3_flag(tp, 5705_PLUS)) {
6457 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6458 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6459 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6460 }
6461
6462 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6463 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6464 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6465 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6466 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6467
6468 if (tg3_flag(tp, NVRAM))
6469 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6470 }
6471
tg3_dump_state(struct tg3 * tp)6472 static void tg3_dump_state(struct tg3 *tp)
6473 {
6474 int i;
6475 u32 *regs;
6476
6477 /* If it is a PCI error, all registers will be 0xffff,
6478 * we don't dump them out, just report the error and return
6479 */
6480 if (tp->pdev->error_state != pci_channel_io_normal) {
6481 netdev_err(tp->dev, "PCI channel ERROR!\n");
6482 return;
6483 }
6484
6485 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6486 if (!regs)
6487 return;
6488
6489 if (tg3_flag(tp, PCI_EXPRESS)) {
6490 /* Read up to but not including private PCI registers */
6491 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6492 regs[i / sizeof(u32)] = tr32(i);
6493 } else
6494 tg3_dump_legacy_regs(tp, regs);
6495
6496 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6497 if (!regs[i + 0] && !regs[i + 1] &&
6498 !regs[i + 2] && !regs[i + 3])
6499 continue;
6500
6501 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6502 i * 4,
6503 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6504 }
6505
6506 kfree(regs);
6507
6508 for (i = 0; i < tp->irq_cnt; i++) {
6509 struct tg3_napi *tnapi = &tp->napi[i];
6510
6511 /* SW status block */
6512 netdev_err(tp->dev,
6513 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6514 i,
6515 tnapi->hw_status->status,
6516 tnapi->hw_status->status_tag,
6517 tnapi->hw_status->rx_jumbo_consumer,
6518 tnapi->hw_status->rx_consumer,
6519 tnapi->hw_status->rx_mini_consumer,
6520 tnapi->hw_status->idx[0].rx_producer,
6521 tnapi->hw_status->idx[0].tx_consumer);
6522
6523 netdev_err(tp->dev,
6524 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6525 i,
6526 tnapi->last_tag, tnapi->last_irq_tag,
6527 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6528 tnapi->rx_rcb_ptr,
6529 tnapi->prodring.rx_std_prod_idx,
6530 tnapi->prodring.rx_std_cons_idx,
6531 tnapi->prodring.rx_jmb_prod_idx,
6532 tnapi->prodring.rx_jmb_cons_idx);
6533 }
6534 }
6535
6536 /* This is called whenever we suspect that the system chipset is re-
6537 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6538 * is bogus tx completions. We try to recover by setting the
6539 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6540 * in the workqueue.
6541 */
tg3_tx_recover(struct tg3 * tp)6542 static void tg3_tx_recover(struct tg3 *tp)
6543 {
6544 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6545 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6546
6547 netdev_warn(tp->dev,
6548 "The system may be re-ordering memory-mapped I/O "
6549 "cycles to the network device, attempting to recover. "
6550 "Please report the problem to the driver maintainer "
6551 "and include system chipset information.\n");
6552
6553 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6554 }
6555
tg3_tx_avail(struct tg3_napi * tnapi)6556 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6557 {
6558 /* Tell compiler to fetch tx indices from memory. */
6559 barrier();
6560 return tnapi->tx_pending -
6561 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6562 }
6563
6564 /* Tigon3 never reports partial packet sends. So we do not
6565 * need special logic to handle SKBs that have not had all
6566 * of their frags sent yet, like SunGEM does.
6567 */
tg3_tx(struct tg3_napi * tnapi)6568 static void tg3_tx(struct tg3_napi *tnapi)
6569 {
6570 struct tg3 *tp = tnapi->tp;
6571 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6572 u32 sw_idx = tnapi->tx_cons;
6573 struct netdev_queue *txq;
6574 int index = tnapi - tp->napi;
6575 unsigned int pkts_compl = 0, bytes_compl = 0;
6576
6577 if (tg3_flag(tp, ENABLE_TSS))
6578 index--;
6579
6580 txq = netdev_get_tx_queue(tp->dev, index);
6581
6582 while (sw_idx != hw_idx) {
6583 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6584 bool complete_skb_later = false;
6585 struct sk_buff *skb = ri->skb;
6586 int i, tx_bug = 0;
6587
6588 if (unlikely(skb == NULL)) {
6589 tg3_tx_recover(tp);
6590 return;
6591 }
6592
6593 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6594 struct skb_shared_hwtstamps timestamp;
6595 u64 hwclock;
6596
6597 tg3_read_tx_tstamp(tp, &hwclock);
6598 if (hwclock != tp->pre_tx_ts) {
6599 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6600 skb_tstamp_tx(skb, ×tamp);
6601 tp->pre_tx_ts = 0;
6602 } else {
6603 tp->tx_tstamp_skb = skb;
6604 complete_skb_later = true;
6605 }
6606 }
6607
6608 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6609 skb_headlen(skb), DMA_TO_DEVICE);
6610
6611 ri->skb = NULL;
6612
6613 while (ri->fragmented) {
6614 ri->fragmented = false;
6615 sw_idx = NEXT_TX(sw_idx);
6616 ri = &tnapi->tx_buffers[sw_idx];
6617 }
6618
6619 sw_idx = NEXT_TX(sw_idx);
6620
6621 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6622 ri = &tnapi->tx_buffers[sw_idx];
6623 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6624 tx_bug = 1;
6625
6626 dma_unmap_page(&tp->pdev->dev,
6627 dma_unmap_addr(ri, mapping),
6628 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6629 DMA_TO_DEVICE);
6630
6631 while (ri->fragmented) {
6632 ri->fragmented = false;
6633 sw_idx = NEXT_TX(sw_idx);
6634 ri = &tnapi->tx_buffers[sw_idx];
6635 }
6636
6637 sw_idx = NEXT_TX(sw_idx);
6638 }
6639
6640 pkts_compl++;
6641 bytes_compl += skb->len;
6642
6643 if (!complete_skb_later)
6644 dev_consume_skb_any(skb);
6645 else
6646 ptp_schedule_worker(tp->ptp_clock, 0);
6647
6648 if (unlikely(tx_bug)) {
6649 tg3_tx_recover(tp);
6650 return;
6651 }
6652 }
6653
6654 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6655
6656 tnapi->tx_cons = sw_idx;
6657
6658 /* Need to make the tx_cons update visible to __tg3_start_xmit()
6659 * before checking for netif_queue_stopped(). Without the
6660 * memory barrier, there is a small possibility that __tg3_start_xmit()
6661 * will miss it and cause the queue to be stopped forever.
6662 */
6663 smp_mb();
6664
6665 if (unlikely(netif_tx_queue_stopped(txq) &&
6666 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6667 __netif_tx_lock(txq, smp_processor_id());
6668 if (netif_tx_queue_stopped(txq) &&
6669 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6670 netif_tx_wake_queue(txq);
6671 __netif_tx_unlock(txq);
6672 }
6673 }
6674
tg3_frag_free(bool is_frag,void * data)6675 static void tg3_frag_free(bool is_frag, void *data)
6676 {
6677 if (is_frag)
6678 skb_free_frag(data);
6679 else
6680 kfree(data);
6681 }
6682
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6683 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6684 {
6685 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6686 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6687
6688 if (!ri->data)
6689 return;
6690
6691 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6692 DMA_FROM_DEVICE);
6693 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6694 ri->data = NULL;
6695 }
6696
6697
6698 /* Returns size of skb allocated or < 0 on error.
6699 *
6700 * We only need to fill in the address because the other members
6701 * of the RX descriptor are invariant, see tg3_init_rings.
6702 *
6703 * Note the purposeful assymetry of cpu vs. chip accesses. For
6704 * posting buffers we only dirty the first cache line of the RX
6705 * descriptor (containing the address). Whereas for the RX status
6706 * buffers the cpu only reads the last cacheline of the RX descriptor
6707 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6708 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6709 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6710 u32 opaque_key, u32 dest_idx_unmasked,
6711 unsigned int *frag_size)
6712 {
6713 struct tg3_rx_buffer_desc *desc;
6714 struct ring_info *map;
6715 u8 *data;
6716 dma_addr_t mapping;
6717 int skb_size, data_size, dest_idx;
6718
6719 switch (opaque_key) {
6720 case RXD_OPAQUE_RING_STD:
6721 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6722 desc = &tpr->rx_std[dest_idx];
6723 map = &tpr->rx_std_buffers[dest_idx];
6724 data_size = tp->rx_pkt_map_sz;
6725 break;
6726
6727 case RXD_OPAQUE_RING_JUMBO:
6728 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6729 desc = &tpr->rx_jmb[dest_idx].std;
6730 map = &tpr->rx_jmb_buffers[dest_idx];
6731 data_size = TG3_RX_JMB_MAP_SZ;
6732 break;
6733
6734 default:
6735 return -EINVAL;
6736 }
6737
6738 /* Do not overwrite any of the map or rp information
6739 * until we are sure we can commit to a new buffer.
6740 *
6741 * Callers depend upon this behavior and assume that
6742 * we leave everything unchanged if we fail.
6743 */
6744 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6745 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6746 if (skb_size <= PAGE_SIZE) {
6747 data = napi_alloc_frag(skb_size);
6748 *frag_size = skb_size;
6749 } else {
6750 data = kmalloc(skb_size, GFP_ATOMIC);
6751 *frag_size = 0;
6752 }
6753 if (!data)
6754 return -ENOMEM;
6755
6756 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6757 data_size, DMA_FROM_DEVICE);
6758 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6759 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6760 return -EIO;
6761 }
6762
6763 map->data = data;
6764 dma_unmap_addr_set(map, mapping, mapping);
6765
6766 desc->addr_hi = ((u64)mapping >> 32);
6767 desc->addr_lo = ((u64)mapping & 0xffffffff);
6768
6769 return data_size;
6770 }
6771
6772 /* We only need to move over in the address because the other
6773 * members of the RX descriptor are invariant. See notes above
6774 * tg3_alloc_rx_data for full details.
6775 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6776 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6777 struct tg3_rx_prodring_set *dpr,
6778 u32 opaque_key, int src_idx,
6779 u32 dest_idx_unmasked)
6780 {
6781 struct tg3 *tp = tnapi->tp;
6782 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6783 struct ring_info *src_map, *dest_map;
6784 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6785 int dest_idx;
6786
6787 switch (opaque_key) {
6788 case RXD_OPAQUE_RING_STD:
6789 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6790 dest_desc = &dpr->rx_std[dest_idx];
6791 dest_map = &dpr->rx_std_buffers[dest_idx];
6792 src_desc = &spr->rx_std[src_idx];
6793 src_map = &spr->rx_std_buffers[src_idx];
6794 break;
6795
6796 case RXD_OPAQUE_RING_JUMBO:
6797 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6798 dest_desc = &dpr->rx_jmb[dest_idx].std;
6799 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6800 src_desc = &spr->rx_jmb[src_idx].std;
6801 src_map = &spr->rx_jmb_buffers[src_idx];
6802 break;
6803
6804 default:
6805 return;
6806 }
6807
6808 dest_map->data = src_map->data;
6809 dma_unmap_addr_set(dest_map, mapping,
6810 dma_unmap_addr(src_map, mapping));
6811 dest_desc->addr_hi = src_desc->addr_hi;
6812 dest_desc->addr_lo = src_desc->addr_lo;
6813
6814 /* Ensure that the update to the skb happens after the physical
6815 * addresses have been transferred to the new BD location.
6816 */
6817 smp_wmb();
6818
6819 src_map->data = NULL;
6820 }
6821
6822 /* The RX ring scheme is composed of multiple rings which post fresh
6823 * buffers to the chip, and one special ring the chip uses to report
6824 * status back to the host.
6825 *
6826 * The special ring reports the status of received packets to the
6827 * host. The chip does not write into the original descriptor the
6828 * RX buffer was obtained from. The chip simply takes the original
6829 * descriptor as provided by the host, updates the status and length
6830 * field, then writes this into the next status ring entry.
6831 *
6832 * Each ring the host uses to post buffers to the chip is described
6833 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6834 * it is first placed into the on-chip ram. When the packet's length
6835 * is known, it walks down the TG3_BDINFO entries to select the ring.
6836 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6837 * which is within the range of the new packet's length is chosen.
6838 *
6839 * The "separate ring for rx status" scheme may sound queer, but it makes
6840 * sense from a cache coherency perspective. If only the host writes
6841 * to the buffer post rings, and only the chip writes to the rx status
6842 * rings, then cache lines never move beyond shared-modified state.
6843 * If both the host and chip were to write into the same ring, cache line
6844 * eviction could occur since both entities want it in an exclusive state.
6845 */
tg3_rx(struct tg3_napi * tnapi,int budget)6846 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6847 {
6848 struct tg3 *tp = tnapi->tp;
6849 u32 work_mask, rx_std_posted = 0;
6850 u32 std_prod_idx, jmb_prod_idx;
6851 u32 sw_idx = tnapi->rx_rcb_ptr;
6852 u16 hw_idx;
6853 int received;
6854 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6855
6856 hw_idx = *(tnapi->rx_rcb_prod_idx);
6857 /*
6858 * We need to order the read of hw_idx and the read of
6859 * the opaque cookie.
6860 */
6861 rmb();
6862 work_mask = 0;
6863 received = 0;
6864 std_prod_idx = tpr->rx_std_prod_idx;
6865 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6866 while (sw_idx != hw_idx && budget > 0) {
6867 struct ring_info *ri;
6868 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6869 unsigned int len;
6870 struct sk_buff *skb;
6871 dma_addr_t dma_addr;
6872 u32 opaque_key, desc_idx, *post_ptr;
6873 u8 *data;
6874 u64 tstamp = 0;
6875
6876 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6877 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6878 if (opaque_key == RXD_OPAQUE_RING_STD) {
6879 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6880 dma_addr = dma_unmap_addr(ri, mapping);
6881 data = ri->data;
6882 post_ptr = &std_prod_idx;
6883 rx_std_posted++;
6884 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6885 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6886 dma_addr = dma_unmap_addr(ri, mapping);
6887 data = ri->data;
6888 post_ptr = &jmb_prod_idx;
6889 } else
6890 goto next_pkt_nopost;
6891
6892 work_mask |= opaque_key;
6893
6894 if (desc->err_vlan & RXD_ERR_MASK) {
6895 drop_it:
6896 tg3_recycle_rx(tnapi, tpr, opaque_key,
6897 desc_idx, *post_ptr);
6898 drop_it_no_recycle:
6899 /* Other statistics kept track of by card. */
6900 tnapi->rx_dropped++;
6901 goto next_pkt;
6902 }
6903
6904 prefetch(data + TG3_RX_OFFSET(tp));
6905 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6906 ETH_FCS_LEN;
6907
6908 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6909 RXD_FLAG_PTPSTAT_PTPV1 ||
6910 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6911 RXD_FLAG_PTPSTAT_PTPV2) {
6912 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6913 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6914 }
6915
6916 if (len > TG3_RX_COPY_THRESH(tp)) {
6917 int skb_size;
6918 unsigned int frag_size;
6919
6920 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6921 *post_ptr, &frag_size);
6922 if (skb_size < 0)
6923 goto drop_it;
6924
6925 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6926 DMA_FROM_DEVICE);
6927
6928 /* Ensure that the update to the data happens
6929 * after the usage of the old DMA mapping.
6930 */
6931 smp_wmb();
6932
6933 ri->data = NULL;
6934
6935 if (frag_size)
6936 skb = build_skb(data, frag_size);
6937 else
6938 skb = slab_build_skb(data);
6939 if (!skb) {
6940 tg3_frag_free(frag_size != 0, data);
6941 goto drop_it_no_recycle;
6942 }
6943 skb_reserve(skb, TG3_RX_OFFSET(tp));
6944 } else {
6945 tg3_recycle_rx(tnapi, tpr, opaque_key,
6946 desc_idx, *post_ptr);
6947
6948 skb = netdev_alloc_skb(tp->dev,
6949 len + TG3_RAW_IP_ALIGN);
6950 if (skb == NULL)
6951 goto drop_it_no_recycle;
6952
6953 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6954 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6955 DMA_FROM_DEVICE);
6956 memcpy(skb->data,
6957 data + TG3_RX_OFFSET(tp),
6958 len);
6959 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6960 len, DMA_FROM_DEVICE);
6961 }
6962
6963 skb_put(skb, len);
6964 if (tstamp)
6965 tg3_hwclock_to_timestamp(tp, tstamp,
6966 skb_hwtstamps(skb));
6967
6968 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6969 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6970 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6971 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6972 skb->ip_summed = CHECKSUM_UNNECESSARY;
6973 else
6974 skb_checksum_none_assert(skb);
6975
6976 skb->protocol = eth_type_trans(skb, tp->dev);
6977
6978 if (len > (tp->dev->mtu + ETH_HLEN) &&
6979 skb->protocol != htons(ETH_P_8021Q) &&
6980 skb->protocol != htons(ETH_P_8021AD)) {
6981 dev_kfree_skb_any(skb);
6982 goto drop_it_no_recycle;
6983 }
6984
6985 if (desc->type_flags & RXD_FLAG_VLAN &&
6986 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6987 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6988 desc->err_vlan & RXD_VLAN_MASK);
6989
6990 napi_gro_receive(&tnapi->napi, skb);
6991
6992 received++;
6993 budget--;
6994
6995 next_pkt:
6996 (*post_ptr)++;
6997
6998 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6999 tpr->rx_std_prod_idx = std_prod_idx &
7000 tp->rx_std_ring_mask;
7001 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7002 tpr->rx_std_prod_idx);
7003 work_mask &= ~RXD_OPAQUE_RING_STD;
7004 rx_std_posted = 0;
7005 }
7006 next_pkt_nopost:
7007 sw_idx++;
7008 sw_idx &= tp->rx_ret_ring_mask;
7009
7010 /* Refresh hw_idx to see if there is new work */
7011 if (sw_idx == hw_idx) {
7012 hw_idx = *(tnapi->rx_rcb_prod_idx);
7013 rmb();
7014 }
7015 }
7016
7017 /* ACK the status ring. */
7018 tnapi->rx_rcb_ptr = sw_idx;
7019 tw32_rx_mbox(tnapi->consmbox, sw_idx);
7020
7021 /* Refill RX ring(s). */
7022 if (!tg3_flag(tp, ENABLE_RSS)) {
7023 /* Sync BD data before updating mailbox */
7024 wmb();
7025
7026 if (work_mask & RXD_OPAQUE_RING_STD) {
7027 tpr->rx_std_prod_idx = std_prod_idx &
7028 tp->rx_std_ring_mask;
7029 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7030 tpr->rx_std_prod_idx);
7031 }
7032 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7033 tpr->rx_jmb_prod_idx = jmb_prod_idx &
7034 tp->rx_jmb_ring_mask;
7035 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7036 tpr->rx_jmb_prod_idx);
7037 }
7038 } else if (work_mask) {
7039 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7040 * updated before the producer indices can be updated.
7041 */
7042 smp_wmb();
7043
7044 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7045 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7046
7047 if (tnapi != &tp->napi[1]) {
7048 tp->rx_refill = true;
7049 napi_schedule(&tp->napi[1].napi);
7050 }
7051 }
7052
7053 return received;
7054 }
7055
tg3_poll_link(struct tg3 * tp)7056 static void tg3_poll_link(struct tg3 *tp)
7057 {
7058 /* handle link change and other phy events */
7059 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7060 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7061
7062 if (sblk->status & SD_STATUS_LINK_CHG) {
7063 sblk->status = SD_STATUS_UPDATED |
7064 (sblk->status & ~SD_STATUS_LINK_CHG);
7065 spin_lock(&tp->lock);
7066 if (tg3_flag(tp, USE_PHYLIB)) {
7067 tw32_f(MAC_STATUS,
7068 (MAC_STATUS_SYNC_CHANGED |
7069 MAC_STATUS_CFG_CHANGED |
7070 MAC_STATUS_MI_COMPLETION |
7071 MAC_STATUS_LNKSTATE_CHANGED));
7072 udelay(40);
7073 } else
7074 tg3_setup_phy(tp, false);
7075 spin_unlock(&tp->lock);
7076 }
7077 }
7078 }
7079
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7080 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7081 struct tg3_rx_prodring_set *dpr,
7082 struct tg3_rx_prodring_set *spr)
7083 {
7084 u32 si, di, cpycnt, src_prod_idx;
7085 int i, err = 0;
7086
7087 while (1) {
7088 src_prod_idx = spr->rx_std_prod_idx;
7089
7090 /* Make sure updates to the rx_std_buffers[] entries and the
7091 * standard producer index are seen in the correct order.
7092 */
7093 smp_rmb();
7094
7095 if (spr->rx_std_cons_idx == src_prod_idx)
7096 break;
7097
7098 if (spr->rx_std_cons_idx < src_prod_idx)
7099 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7100 else
7101 cpycnt = tp->rx_std_ring_mask + 1 -
7102 spr->rx_std_cons_idx;
7103
7104 cpycnt = min(cpycnt,
7105 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7106
7107 si = spr->rx_std_cons_idx;
7108 di = dpr->rx_std_prod_idx;
7109
7110 for (i = di; i < di + cpycnt; i++) {
7111 if (dpr->rx_std_buffers[i].data) {
7112 cpycnt = i - di;
7113 err = -ENOSPC;
7114 break;
7115 }
7116 }
7117
7118 if (!cpycnt)
7119 break;
7120
7121 /* Ensure that updates to the rx_std_buffers ring and the
7122 * shadowed hardware producer ring from tg3_recycle_skb() are
7123 * ordered correctly WRT the skb check above.
7124 */
7125 smp_rmb();
7126
7127 memcpy(&dpr->rx_std_buffers[di],
7128 &spr->rx_std_buffers[si],
7129 cpycnt * sizeof(struct ring_info));
7130
7131 for (i = 0; i < cpycnt; i++, di++, si++) {
7132 struct tg3_rx_buffer_desc *sbd, *dbd;
7133 sbd = &spr->rx_std[si];
7134 dbd = &dpr->rx_std[di];
7135 dbd->addr_hi = sbd->addr_hi;
7136 dbd->addr_lo = sbd->addr_lo;
7137 }
7138
7139 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7140 tp->rx_std_ring_mask;
7141 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7142 tp->rx_std_ring_mask;
7143 }
7144
7145 while (1) {
7146 src_prod_idx = spr->rx_jmb_prod_idx;
7147
7148 /* Make sure updates to the rx_jmb_buffers[] entries and
7149 * the jumbo producer index are seen in the correct order.
7150 */
7151 smp_rmb();
7152
7153 if (spr->rx_jmb_cons_idx == src_prod_idx)
7154 break;
7155
7156 if (spr->rx_jmb_cons_idx < src_prod_idx)
7157 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7158 else
7159 cpycnt = tp->rx_jmb_ring_mask + 1 -
7160 spr->rx_jmb_cons_idx;
7161
7162 cpycnt = min(cpycnt,
7163 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7164
7165 si = spr->rx_jmb_cons_idx;
7166 di = dpr->rx_jmb_prod_idx;
7167
7168 for (i = di; i < di + cpycnt; i++) {
7169 if (dpr->rx_jmb_buffers[i].data) {
7170 cpycnt = i - di;
7171 err = -ENOSPC;
7172 break;
7173 }
7174 }
7175
7176 if (!cpycnt)
7177 break;
7178
7179 /* Ensure that updates to the rx_jmb_buffers ring and the
7180 * shadowed hardware producer ring from tg3_recycle_skb() are
7181 * ordered correctly WRT the skb check above.
7182 */
7183 smp_rmb();
7184
7185 memcpy(&dpr->rx_jmb_buffers[di],
7186 &spr->rx_jmb_buffers[si],
7187 cpycnt * sizeof(struct ring_info));
7188
7189 for (i = 0; i < cpycnt; i++, di++, si++) {
7190 struct tg3_rx_buffer_desc *sbd, *dbd;
7191 sbd = &spr->rx_jmb[si].std;
7192 dbd = &dpr->rx_jmb[di].std;
7193 dbd->addr_hi = sbd->addr_hi;
7194 dbd->addr_lo = sbd->addr_lo;
7195 }
7196
7197 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7198 tp->rx_jmb_ring_mask;
7199 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7200 tp->rx_jmb_ring_mask;
7201 }
7202
7203 return err;
7204 }
7205
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7206 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7207 {
7208 struct tg3 *tp = tnapi->tp;
7209
7210 /* run TX completion thread */
7211 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7212 tg3_tx(tnapi);
7213 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7214 return work_done;
7215 }
7216
7217 if (!tnapi->rx_rcb_prod_idx)
7218 return work_done;
7219
7220 /* run RX thread, within the bounds set by NAPI.
7221 * All RX "locking" is done by ensuring outside
7222 * code synchronizes with tg3->napi.poll()
7223 */
7224 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7225 work_done += tg3_rx(tnapi, budget - work_done);
7226
7227 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7228 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7229 int i, err = 0;
7230 u32 std_prod_idx = dpr->rx_std_prod_idx;
7231 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7232
7233 tp->rx_refill = false;
7234 for (i = 1; i <= tp->rxq_cnt; i++)
7235 err |= tg3_rx_prodring_xfer(tp, dpr,
7236 &tp->napi[i].prodring);
7237
7238 wmb();
7239
7240 if (std_prod_idx != dpr->rx_std_prod_idx)
7241 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7242 dpr->rx_std_prod_idx);
7243
7244 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7245 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7246 dpr->rx_jmb_prod_idx);
7247
7248 if (err)
7249 tw32_f(HOSTCC_MODE, tp->coal_now);
7250 }
7251
7252 return work_done;
7253 }
7254
tg3_reset_task_schedule(struct tg3 * tp)7255 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7256 {
7257 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7258 schedule_work(&tp->reset_task);
7259 }
7260
tg3_reset_task_cancel(struct tg3 * tp)7261 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7262 {
7263 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7264 cancel_work_sync(&tp->reset_task);
7265 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7266 }
7267
tg3_poll_msix(struct napi_struct * napi,int budget)7268 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7269 {
7270 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7271 struct tg3 *tp = tnapi->tp;
7272 int work_done = 0;
7273 struct tg3_hw_status *sblk = tnapi->hw_status;
7274
7275 while (1) {
7276 work_done = tg3_poll_work(tnapi, work_done, budget);
7277
7278 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7279 goto tx_recovery;
7280
7281 if (unlikely(work_done >= budget))
7282 break;
7283
7284 /* tp->last_tag is used in tg3_int_reenable() below
7285 * to tell the hw how much work has been processed,
7286 * so we must read it before checking for more work.
7287 */
7288 tnapi->last_tag = sblk->status_tag;
7289 tnapi->last_irq_tag = tnapi->last_tag;
7290 rmb();
7291
7292 /* check for RX/TX work to do */
7293 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7294 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7295
7296 /* This test here is not race free, but will reduce
7297 * the number of interrupts by looping again.
7298 */
7299 if (tnapi == &tp->napi[1] && tp->rx_refill)
7300 continue;
7301
7302 napi_complete_done(napi, work_done);
7303 /* Reenable interrupts. */
7304 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7305
7306 /* This test here is synchronized by napi_schedule()
7307 * and napi_complete() to close the race condition.
7308 */
7309 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7310 tw32(HOSTCC_MODE, tp->coalesce_mode |
7311 HOSTCC_MODE_ENABLE |
7312 tnapi->coal_now);
7313 }
7314 break;
7315 }
7316 }
7317
7318 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7319 return work_done;
7320
7321 tx_recovery:
7322 /* work_done is guaranteed to be less than budget. */
7323 napi_complete(napi);
7324 tg3_reset_task_schedule(tp);
7325 return work_done;
7326 }
7327
tg3_process_error(struct tg3 * tp)7328 static void tg3_process_error(struct tg3 *tp)
7329 {
7330 u32 val;
7331 bool real_error = false;
7332
7333 if (tg3_flag(tp, ERROR_PROCESSED))
7334 return;
7335
7336 /* Check Flow Attention register */
7337 val = tr32(HOSTCC_FLOW_ATTN);
7338 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7339 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7340 real_error = true;
7341 }
7342
7343 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7344 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7345 real_error = true;
7346 }
7347
7348 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7349 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7350 real_error = true;
7351 }
7352
7353 if (!real_error)
7354 return;
7355
7356 tg3_dump_state(tp);
7357
7358 tg3_flag_set(tp, ERROR_PROCESSED);
7359 tg3_reset_task_schedule(tp);
7360 }
7361
tg3_poll(struct napi_struct * napi,int budget)7362 static int tg3_poll(struct napi_struct *napi, int budget)
7363 {
7364 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7365 struct tg3 *tp = tnapi->tp;
7366 int work_done = 0;
7367 struct tg3_hw_status *sblk = tnapi->hw_status;
7368
7369 while (1) {
7370 if (sblk->status & SD_STATUS_ERROR)
7371 tg3_process_error(tp);
7372
7373 tg3_poll_link(tp);
7374
7375 work_done = tg3_poll_work(tnapi, work_done, budget);
7376
7377 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7378 goto tx_recovery;
7379
7380 if (unlikely(work_done >= budget))
7381 break;
7382
7383 if (tg3_flag(tp, TAGGED_STATUS)) {
7384 /* tp->last_tag is used in tg3_int_reenable() below
7385 * to tell the hw how much work has been processed,
7386 * so we must read it before checking for more work.
7387 */
7388 tnapi->last_tag = sblk->status_tag;
7389 tnapi->last_irq_tag = tnapi->last_tag;
7390 rmb();
7391 } else
7392 sblk->status &= ~SD_STATUS_UPDATED;
7393
7394 if (likely(!tg3_has_work(tnapi))) {
7395 napi_complete_done(napi, work_done);
7396 tg3_int_reenable(tnapi);
7397 break;
7398 }
7399 }
7400
7401 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7402 return work_done;
7403
7404 tx_recovery:
7405 /* work_done is guaranteed to be less than budget. */
7406 napi_complete(napi);
7407 tg3_reset_task_schedule(tp);
7408 return work_done;
7409 }
7410
tg3_napi_disable(struct tg3 * tp)7411 static void tg3_napi_disable(struct tg3 *tp)
7412 {
7413 int i;
7414
7415 for (i = tp->irq_cnt - 1; i >= 0; i--)
7416 napi_disable(&tp->napi[i].napi);
7417 }
7418
tg3_napi_enable(struct tg3 * tp)7419 static void tg3_napi_enable(struct tg3 *tp)
7420 {
7421 int i;
7422
7423 for (i = 0; i < tp->irq_cnt; i++)
7424 napi_enable(&tp->napi[i].napi);
7425 }
7426
tg3_napi_init(struct tg3 * tp)7427 static void tg3_napi_init(struct tg3 *tp)
7428 {
7429 int i;
7430
7431 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7432 for (i = 1; i < tp->irq_cnt; i++)
7433 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7434 }
7435
tg3_napi_fini(struct tg3 * tp)7436 static void tg3_napi_fini(struct tg3 *tp)
7437 {
7438 int i;
7439
7440 for (i = 0; i < tp->irq_cnt; i++)
7441 netif_napi_del(&tp->napi[i].napi);
7442 }
7443
tg3_netif_stop(struct tg3 * tp)7444 static inline void tg3_netif_stop(struct tg3 *tp)
7445 {
7446 netif_trans_update(tp->dev); /* prevent tx timeout */
7447 tg3_napi_disable(tp);
7448 netif_carrier_off(tp->dev);
7449 netif_tx_disable(tp->dev);
7450 }
7451
7452 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7453 static inline void tg3_netif_start(struct tg3 *tp)
7454 {
7455 tg3_ptp_resume(tp);
7456
7457 /* NOTE: unconditional netif_tx_wake_all_queues is only
7458 * appropriate so long as all callers are assured to
7459 * have free tx slots (such as after tg3_init_hw)
7460 */
7461 netif_tx_wake_all_queues(tp->dev);
7462
7463 if (tp->link_up)
7464 netif_carrier_on(tp->dev);
7465
7466 tg3_napi_enable(tp);
7467 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7468 tg3_enable_ints(tp);
7469 }
7470
tg3_irq_quiesce(struct tg3 * tp)7471 static void tg3_irq_quiesce(struct tg3 *tp)
7472 __releases(tp->lock)
7473 __acquires(tp->lock)
7474 {
7475 int i;
7476
7477 BUG_ON(tp->irq_sync);
7478
7479 tp->irq_sync = 1;
7480 smp_mb();
7481
7482 spin_unlock_bh(&tp->lock);
7483
7484 for (i = 0; i < tp->irq_cnt; i++)
7485 synchronize_irq(tp->napi[i].irq_vec);
7486
7487 spin_lock_bh(&tp->lock);
7488 }
7489
7490 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7491 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7492 * with as well. Most of the time, this is not necessary except when
7493 * shutting down the device.
7494 */
tg3_full_lock(struct tg3 * tp,int irq_sync)7495 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7496 {
7497 spin_lock_bh(&tp->lock);
7498 if (irq_sync)
7499 tg3_irq_quiesce(tp);
7500 }
7501
tg3_full_unlock(struct tg3 * tp)7502 static inline void tg3_full_unlock(struct tg3 *tp)
7503 {
7504 spin_unlock_bh(&tp->lock);
7505 }
7506
7507 /* One-shot MSI handler - Chip automatically disables interrupt
7508 * after sending MSI so driver doesn't have to do it.
7509 */
tg3_msi_1shot(int irq,void * dev_id)7510 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7511 {
7512 struct tg3_napi *tnapi = dev_id;
7513 struct tg3 *tp = tnapi->tp;
7514
7515 prefetch(tnapi->hw_status);
7516 if (tnapi->rx_rcb)
7517 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7518
7519 if (likely(!tg3_irq_sync(tp)))
7520 napi_schedule(&tnapi->napi);
7521
7522 return IRQ_HANDLED;
7523 }
7524
7525 /* MSI ISR - No need to check for interrupt sharing and no need to
7526 * flush status block and interrupt mailbox. PCI ordering rules
7527 * guarantee that MSI will arrive after the status block.
7528 */
tg3_msi(int irq,void * dev_id)7529 static irqreturn_t tg3_msi(int irq, void *dev_id)
7530 {
7531 struct tg3_napi *tnapi = dev_id;
7532 struct tg3 *tp = tnapi->tp;
7533
7534 prefetch(tnapi->hw_status);
7535 if (tnapi->rx_rcb)
7536 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7537 /*
7538 * Writing any value to intr-mbox-0 clears PCI INTA# and
7539 * chip-internal interrupt pending events.
7540 * Writing non-zero to intr-mbox-0 additional tells the
7541 * NIC to stop sending us irqs, engaging "in-intr-handler"
7542 * event coalescing.
7543 */
7544 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7545 if (likely(!tg3_irq_sync(tp)))
7546 napi_schedule(&tnapi->napi);
7547
7548 return IRQ_RETVAL(1);
7549 }
7550
tg3_interrupt(int irq,void * dev_id)7551 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7552 {
7553 struct tg3_napi *tnapi = dev_id;
7554 struct tg3 *tp = tnapi->tp;
7555 struct tg3_hw_status *sblk = tnapi->hw_status;
7556 unsigned int handled = 1;
7557
7558 /* In INTx mode, it is possible for the interrupt to arrive at
7559 * the CPU before the status block posted prior to the interrupt.
7560 * Reading the PCI State register will confirm whether the
7561 * interrupt is ours and will flush the status block.
7562 */
7563 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7564 if (tg3_flag(tp, CHIP_RESETTING) ||
7565 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7566 handled = 0;
7567 goto out;
7568 }
7569 }
7570
7571 /*
7572 * Writing any value to intr-mbox-0 clears PCI INTA# and
7573 * chip-internal interrupt pending events.
7574 * Writing non-zero to intr-mbox-0 additional tells the
7575 * NIC to stop sending us irqs, engaging "in-intr-handler"
7576 * event coalescing.
7577 *
7578 * Flush the mailbox to de-assert the IRQ immediately to prevent
7579 * spurious interrupts. The flush impacts performance but
7580 * excessive spurious interrupts can be worse in some cases.
7581 */
7582 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7583 if (tg3_irq_sync(tp))
7584 goto out;
7585 sblk->status &= ~SD_STATUS_UPDATED;
7586 if (likely(tg3_has_work(tnapi))) {
7587 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7588 napi_schedule(&tnapi->napi);
7589 } else {
7590 /* No work, shared interrupt perhaps? re-enable
7591 * interrupts, and flush that PCI write
7592 */
7593 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7594 0x00000000);
7595 }
7596 out:
7597 return IRQ_RETVAL(handled);
7598 }
7599
tg3_interrupt_tagged(int irq,void * dev_id)7600 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7601 {
7602 struct tg3_napi *tnapi = dev_id;
7603 struct tg3 *tp = tnapi->tp;
7604 struct tg3_hw_status *sblk = tnapi->hw_status;
7605 unsigned int handled = 1;
7606
7607 /* In INTx mode, it is possible for the interrupt to arrive at
7608 * the CPU before the status block posted prior to the interrupt.
7609 * Reading the PCI State register will confirm whether the
7610 * interrupt is ours and will flush the status block.
7611 */
7612 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7613 if (tg3_flag(tp, CHIP_RESETTING) ||
7614 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7615 handled = 0;
7616 goto out;
7617 }
7618 }
7619
7620 /*
7621 * writing any value to intr-mbox-0 clears PCI INTA# and
7622 * chip-internal interrupt pending events.
7623 * writing non-zero to intr-mbox-0 additional tells the
7624 * NIC to stop sending us irqs, engaging "in-intr-handler"
7625 * event coalescing.
7626 *
7627 * Flush the mailbox to de-assert the IRQ immediately to prevent
7628 * spurious interrupts. The flush impacts performance but
7629 * excessive spurious interrupts can be worse in some cases.
7630 */
7631 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7632
7633 /*
7634 * In a shared interrupt configuration, sometimes other devices'
7635 * interrupts will scream. We record the current status tag here
7636 * so that the above check can report that the screaming interrupts
7637 * are unhandled. Eventually they will be silenced.
7638 */
7639 tnapi->last_irq_tag = sblk->status_tag;
7640
7641 if (tg3_irq_sync(tp))
7642 goto out;
7643
7644 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7645
7646 napi_schedule(&tnapi->napi);
7647
7648 out:
7649 return IRQ_RETVAL(handled);
7650 }
7651
7652 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7653 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7654 {
7655 struct tg3_napi *tnapi = dev_id;
7656 struct tg3 *tp = tnapi->tp;
7657 struct tg3_hw_status *sblk = tnapi->hw_status;
7658
7659 if ((sblk->status & SD_STATUS_UPDATED) ||
7660 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7661 tg3_disable_ints(tp);
7662 return IRQ_RETVAL(1);
7663 }
7664 return IRQ_RETVAL(0);
7665 }
7666
7667 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7668 static void tg3_poll_controller(struct net_device *dev)
7669 {
7670 int i;
7671 struct tg3 *tp = netdev_priv(dev);
7672
7673 if (tg3_irq_sync(tp))
7674 return;
7675
7676 for (i = 0; i < tp->irq_cnt; i++)
7677 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7678 }
7679 #endif
7680
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7681 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7682 {
7683 struct tg3 *tp = netdev_priv(dev);
7684
7685 if (netif_msg_tx_err(tp)) {
7686 netdev_err(dev, "transmit timed out, resetting\n");
7687 tg3_dump_state(tp);
7688 }
7689
7690 tg3_reset_task_schedule(tp);
7691 }
7692
7693 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7694 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7695 {
7696 u32 base = (u32) mapping & 0xffffffff;
7697
7698 return base + len + 8 < base;
7699 }
7700
7701 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7702 * of any 4GB boundaries: 4G, 8G, etc
7703 */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7704 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7705 u32 len, u32 mss)
7706 {
7707 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7708 u32 base = (u32) mapping & 0xffffffff;
7709
7710 return ((base + len + (mss & 0x3fff)) < base);
7711 }
7712 return 0;
7713 }
7714
7715 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7716 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7717 int len)
7718 {
7719 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7720 if (tg3_flag(tp, 40BIT_DMA_BUG))
7721 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7722 return 0;
7723 #else
7724 return 0;
7725 #endif
7726 }
7727
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7728 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7729 dma_addr_t mapping, u32 len, u32 flags,
7730 u32 mss, u32 vlan)
7731 {
7732 txbd->addr_hi = ((u64) mapping >> 32);
7733 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7734 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7735 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7736 }
7737
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7738 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7739 dma_addr_t map, u32 len, u32 flags,
7740 u32 mss, u32 vlan)
7741 {
7742 struct tg3 *tp = tnapi->tp;
7743 bool hwbug = false;
7744
7745 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7746 hwbug = true;
7747
7748 if (tg3_4g_overflow_test(map, len))
7749 hwbug = true;
7750
7751 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7752 hwbug = true;
7753
7754 if (tg3_40bit_overflow_test(tp, map, len))
7755 hwbug = true;
7756
7757 if (tp->dma_limit) {
7758 u32 prvidx = *entry;
7759 u32 tmp_flag = flags & ~TXD_FLAG_END;
7760 while (len > tp->dma_limit && *budget) {
7761 u32 frag_len = tp->dma_limit;
7762 len -= tp->dma_limit;
7763
7764 /* Avoid the 8byte DMA problem */
7765 if (len <= 8) {
7766 len += tp->dma_limit / 2;
7767 frag_len = tp->dma_limit / 2;
7768 }
7769
7770 tnapi->tx_buffers[*entry].fragmented = true;
7771
7772 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7773 frag_len, tmp_flag, mss, vlan);
7774 *budget -= 1;
7775 prvidx = *entry;
7776 *entry = NEXT_TX(*entry);
7777
7778 map += frag_len;
7779 }
7780
7781 if (len) {
7782 if (*budget) {
7783 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7784 len, flags, mss, vlan);
7785 *budget -= 1;
7786 *entry = NEXT_TX(*entry);
7787 } else {
7788 hwbug = true;
7789 tnapi->tx_buffers[prvidx].fragmented = false;
7790 }
7791 }
7792 } else {
7793 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7794 len, flags, mss, vlan);
7795 *entry = NEXT_TX(*entry);
7796 }
7797
7798 return hwbug;
7799 }
7800
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7801 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7802 {
7803 int i;
7804 struct sk_buff *skb;
7805 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7806
7807 skb = txb->skb;
7808 txb->skb = NULL;
7809
7810 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7811 skb_headlen(skb), DMA_TO_DEVICE);
7812
7813 while (txb->fragmented) {
7814 txb->fragmented = false;
7815 entry = NEXT_TX(entry);
7816 txb = &tnapi->tx_buffers[entry];
7817 }
7818
7819 for (i = 0; i <= last; i++) {
7820 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7821
7822 entry = NEXT_TX(entry);
7823 txb = &tnapi->tx_buffers[entry];
7824
7825 dma_unmap_page(&tnapi->tp->pdev->dev,
7826 dma_unmap_addr(txb, mapping),
7827 skb_frag_size(frag), DMA_TO_DEVICE);
7828
7829 while (txb->fragmented) {
7830 txb->fragmented = false;
7831 entry = NEXT_TX(entry);
7832 txb = &tnapi->tx_buffers[entry];
7833 }
7834 }
7835 }
7836
7837 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7838 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7839 struct sk_buff **pskb,
7840 u32 *entry, u32 *budget,
7841 u32 base_flags, u32 mss, u32 vlan)
7842 {
7843 struct tg3 *tp = tnapi->tp;
7844 struct sk_buff *new_skb, *skb = *pskb;
7845 dma_addr_t new_addr = 0;
7846 int ret = 0;
7847
7848 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7849 new_skb = skb_copy(skb, GFP_ATOMIC);
7850 else {
7851 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7852
7853 new_skb = skb_copy_expand(skb,
7854 skb_headroom(skb) + more_headroom,
7855 skb_tailroom(skb), GFP_ATOMIC);
7856 }
7857
7858 if (!new_skb) {
7859 ret = -1;
7860 } else {
7861 /* New SKB is guaranteed to be linear. */
7862 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7863 new_skb->len, DMA_TO_DEVICE);
7864 /* Make sure the mapping succeeded */
7865 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7866 dev_kfree_skb_any(new_skb);
7867 ret = -1;
7868 } else {
7869 u32 save_entry = *entry;
7870
7871 base_flags |= TXD_FLAG_END;
7872
7873 tnapi->tx_buffers[*entry].skb = new_skb;
7874 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7875 mapping, new_addr);
7876
7877 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7878 new_skb->len, base_flags,
7879 mss, vlan)) {
7880 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7881 dev_kfree_skb_any(new_skb);
7882 ret = -1;
7883 }
7884 }
7885 }
7886
7887 dev_consume_skb_any(skb);
7888 *pskb = new_skb;
7889 return ret;
7890 }
7891
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7892 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7893 {
7894 /* Check if we will never have enough descriptors,
7895 * as gso_segs can be more than current ring size
7896 */
7897 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7898 }
7899
7900 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7901
7902 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7903 * indicated in tg3_tx_frag_set()
7904 */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7905 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7906 struct netdev_queue *txq, struct sk_buff *skb)
7907 {
7908 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7909 struct sk_buff *segs, *seg, *next;
7910
7911 /* Estimate the number of fragments in the worst case */
7912 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7913 netif_tx_stop_queue(txq);
7914
7915 /* netif_tx_stop_queue() must be done before checking
7916 * checking tx index in tg3_tx_avail() below, because in
7917 * tg3_tx(), we update tx index before checking for
7918 * netif_tx_queue_stopped().
7919 */
7920 smp_mb();
7921 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7922 return NETDEV_TX_BUSY;
7923
7924 netif_tx_wake_queue(txq);
7925 }
7926
7927 segs = skb_gso_segment(skb, tp->dev->features &
7928 ~(NETIF_F_TSO | NETIF_F_TSO6));
7929 if (IS_ERR(segs) || !segs) {
7930 tnapi->tx_dropped++;
7931 goto tg3_tso_bug_end;
7932 }
7933
7934 skb_list_walk_safe(segs, seg, next) {
7935 skb_mark_not_on_list(seg);
7936 __tg3_start_xmit(seg, tp->dev);
7937 }
7938
7939 tg3_tso_bug_end:
7940 dev_consume_skb_any(skb);
7941
7942 return NETDEV_TX_OK;
7943 }
7944
7945 /* hard_start_xmit for all devices */
__tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7946 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7947 {
7948 struct tg3 *tp = netdev_priv(dev);
7949 u32 len, entry, base_flags, mss, vlan = 0;
7950 u32 budget;
7951 int i = -1, would_hit_hwbug;
7952 dma_addr_t mapping;
7953 struct tg3_napi *tnapi;
7954 struct netdev_queue *txq;
7955 unsigned int last;
7956 struct iphdr *iph = NULL;
7957 struct tcphdr *tcph = NULL;
7958 __sum16 tcp_csum = 0, ip_csum = 0;
7959 __be16 ip_tot_len = 0;
7960
7961 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7962 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7963 if (tg3_flag(tp, ENABLE_TSS))
7964 tnapi++;
7965
7966 budget = tg3_tx_avail(tnapi);
7967
7968 /* We are running in BH disabled context with netif_tx_lock
7969 * and TX reclaim runs via tp->napi.poll inside of a software
7970 * interrupt. Furthermore, IRQ processing runs lockless so we have
7971 * no IRQ context deadlocks to worry about either. Rejoice!
7972 */
7973 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7974 if (!netif_tx_queue_stopped(txq)) {
7975 netif_tx_stop_queue(txq);
7976
7977 /* This is a hard error, log it. */
7978 netdev_err(dev,
7979 "BUG! Tx Ring full when queue awake!\n");
7980 }
7981 return NETDEV_TX_BUSY;
7982 }
7983
7984 entry = tnapi->tx_prod;
7985 base_flags = 0;
7986
7987 mss = skb_shinfo(skb)->gso_size;
7988 if (mss) {
7989 u32 tcp_opt_len, hdr_len;
7990
7991 if (skb_cow_head(skb, 0))
7992 goto drop;
7993
7994 iph = ip_hdr(skb);
7995 tcp_opt_len = tcp_optlen(skb);
7996
7997 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7998
7999 /* HW/FW can not correctly segment packets that have been
8000 * vlan encapsulated.
8001 */
8002 if (skb->protocol == htons(ETH_P_8021Q) ||
8003 skb->protocol == htons(ETH_P_8021AD)) {
8004 if (tg3_tso_bug_gso_check(tnapi, skb))
8005 return tg3_tso_bug(tp, tnapi, txq, skb);
8006 goto drop;
8007 }
8008
8009 if (!skb_is_gso_v6(skb)) {
8010 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
8011 tg3_flag(tp, TSO_BUG)) {
8012 if (tg3_tso_bug_gso_check(tnapi, skb))
8013 return tg3_tso_bug(tp, tnapi, txq, skb);
8014 goto drop;
8015 }
8016 ip_csum = iph->check;
8017 ip_tot_len = iph->tot_len;
8018 iph->check = 0;
8019 iph->tot_len = htons(mss + hdr_len);
8020 }
8021
8022 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8023 TXD_FLAG_CPU_POST_DMA);
8024
8025 tcph = tcp_hdr(skb);
8026 tcp_csum = tcph->check;
8027
8028 if (tg3_flag(tp, HW_TSO_1) ||
8029 tg3_flag(tp, HW_TSO_2) ||
8030 tg3_flag(tp, HW_TSO_3)) {
8031 tcph->check = 0;
8032 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8033 } else {
8034 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8035 0, IPPROTO_TCP, 0);
8036 }
8037
8038 if (tg3_flag(tp, HW_TSO_3)) {
8039 mss |= (hdr_len & 0xc) << 12;
8040 if (hdr_len & 0x10)
8041 base_flags |= 0x00000010;
8042 base_flags |= (hdr_len & 0x3e0) << 5;
8043 } else if (tg3_flag(tp, HW_TSO_2))
8044 mss |= hdr_len << 9;
8045 else if (tg3_flag(tp, HW_TSO_1) ||
8046 tg3_asic_rev(tp) == ASIC_REV_5705) {
8047 if (tcp_opt_len || iph->ihl > 5) {
8048 int tsflags;
8049
8050 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8051 mss |= (tsflags << 11);
8052 }
8053 } else {
8054 if (tcp_opt_len || iph->ihl > 5) {
8055 int tsflags;
8056
8057 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8058 base_flags |= tsflags << 12;
8059 }
8060 }
8061 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8062 /* HW/FW can not correctly checksum packets that have been
8063 * vlan encapsulated.
8064 */
8065 if (skb->protocol == htons(ETH_P_8021Q) ||
8066 skb->protocol == htons(ETH_P_8021AD)) {
8067 if (skb_checksum_help(skb))
8068 goto drop;
8069 } else {
8070 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8071 }
8072 }
8073
8074 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8075 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8076 base_flags |= TXD_FLAG_JMB_PKT;
8077
8078 if (skb_vlan_tag_present(skb)) {
8079 base_flags |= TXD_FLAG_VLAN;
8080 vlan = skb_vlan_tag_get(skb);
8081 }
8082
8083 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8084 tg3_flag(tp, TX_TSTAMP_EN)) {
8085 tg3_full_lock(tp, 0);
8086 if (!tp->pre_tx_ts) {
8087 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8088 base_flags |= TXD_FLAG_HWTSTAMP;
8089 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8090 }
8091 tg3_full_unlock(tp);
8092 }
8093
8094 len = skb_headlen(skb);
8095
8096 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8097 DMA_TO_DEVICE);
8098 if (dma_mapping_error(&tp->pdev->dev, mapping))
8099 goto drop;
8100
8101
8102 tnapi->tx_buffers[entry].skb = skb;
8103 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8104
8105 would_hit_hwbug = 0;
8106
8107 if (tg3_flag(tp, 5701_DMA_BUG))
8108 would_hit_hwbug = 1;
8109
8110 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8111 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8112 mss, vlan)) {
8113 would_hit_hwbug = 1;
8114 } else if (skb_shinfo(skb)->nr_frags > 0) {
8115 u32 tmp_mss = mss;
8116
8117 if (!tg3_flag(tp, HW_TSO_1) &&
8118 !tg3_flag(tp, HW_TSO_2) &&
8119 !tg3_flag(tp, HW_TSO_3))
8120 tmp_mss = 0;
8121
8122 /* Now loop through additional data
8123 * fragments, and queue them.
8124 */
8125 last = skb_shinfo(skb)->nr_frags - 1;
8126 for (i = 0; i <= last; i++) {
8127 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8128
8129 len = skb_frag_size(frag);
8130 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8131 len, DMA_TO_DEVICE);
8132
8133 tnapi->tx_buffers[entry].skb = NULL;
8134 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8135 mapping);
8136 if (dma_mapping_error(&tp->pdev->dev, mapping))
8137 goto dma_error;
8138
8139 if (!budget ||
8140 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8141 len, base_flags |
8142 ((i == last) ? TXD_FLAG_END : 0),
8143 tmp_mss, vlan)) {
8144 would_hit_hwbug = 1;
8145 break;
8146 }
8147 }
8148 }
8149
8150 if (would_hit_hwbug) {
8151 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8152
8153 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8154 /* If it's a TSO packet, do GSO instead of
8155 * allocating and copying to a large linear SKB
8156 */
8157 if (ip_tot_len) {
8158 iph->check = ip_csum;
8159 iph->tot_len = ip_tot_len;
8160 }
8161 tcph->check = tcp_csum;
8162 return tg3_tso_bug(tp, tnapi, txq, skb);
8163 }
8164
8165 /* If the workaround fails due to memory/mapping
8166 * failure, silently drop this packet.
8167 */
8168 entry = tnapi->tx_prod;
8169 budget = tg3_tx_avail(tnapi);
8170 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8171 base_flags, mss, vlan))
8172 goto drop_nofree;
8173 }
8174
8175 skb_tx_timestamp(skb);
8176 netdev_tx_sent_queue(txq, skb->len);
8177
8178 /* Sync BD data before updating mailbox */
8179 wmb();
8180
8181 tnapi->tx_prod = entry;
8182 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8183 netif_tx_stop_queue(txq);
8184
8185 /* netif_tx_stop_queue() must be done before checking
8186 * checking tx index in tg3_tx_avail() below, because in
8187 * tg3_tx(), we update tx index before checking for
8188 * netif_tx_queue_stopped().
8189 */
8190 smp_mb();
8191 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8192 netif_tx_wake_queue(txq);
8193 }
8194
8195 return NETDEV_TX_OK;
8196
8197 dma_error:
8198 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8199 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8200 drop:
8201 dev_kfree_skb_any(skb);
8202 drop_nofree:
8203 tnapi->tx_dropped++;
8204 return NETDEV_TX_OK;
8205 }
8206
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)8207 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8208 {
8209 struct netdev_queue *txq;
8210 u16 skb_queue_mapping;
8211 netdev_tx_t ret;
8212
8213 skb_queue_mapping = skb_get_queue_mapping(skb);
8214 txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8215
8216 ret = __tg3_start_xmit(skb, dev);
8217
8218 /* Notify the hardware that packets are ready by updating the TX ring
8219 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8220 * the hardware for every packet. To guarantee forward progress the TX
8221 * ring must be drained when it is full as indicated by
8222 * netif_xmit_stopped(). This needs to happen even when the current
8223 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8224 * queued by previous __tg3_start_xmit() calls might get stuck in
8225 * the queue forever.
8226 */
8227 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8228 struct tg3_napi *tnapi;
8229 struct tg3 *tp;
8230
8231 tp = netdev_priv(dev);
8232 tnapi = &tp->napi[skb_queue_mapping];
8233
8234 if (tg3_flag(tp, ENABLE_TSS))
8235 tnapi++;
8236
8237 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8238 }
8239
8240 return ret;
8241 }
8242
tg3_mac_loopback(struct tg3 * tp,bool enable)8243 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8244 {
8245 if (enable) {
8246 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8247 MAC_MODE_PORT_MODE_MASK);
8248
8249 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8250
8251 if (!tg3_flag(tp, 5705_PLUS))
8252 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8253
8254 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8255 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8256 else
8257 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8258 } else {
8259 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8260
8261 if (tg3_flag(tp, 5705_PLUS) ||
8262 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8263 tg3_asic_rev(tp) == ASIC_REV_5700)
8264 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8265 }
8266
8267 tw32(MAC_MODE, tp->mac_mode);
8268 udelay(40);
8269 }
8270
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8271 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8272 {
8273 u32 val, bmcr, mac_mode, ptest = 0;
8274
8275 tg3_phy_toggle_apd(tp, false);
8276 tg3_phy_toggle_automdix(tp, false);
8277
8278 if (extlpbk && tg3_phy_set_extloopbk(tp))
8279 return -EIO;
8280
8281 bmcr = BMCR_FULLDPLX;
8282 switch (speed) {
8283 case SPEED_10:
8284 break;
8285 case SPEED_100:
8286 bmcr |= BMCR_SPEED100;
8287 break;
8288 case SPEED_1000:
8289 default:
8290 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8291 speed = SPEED_100;
8292 bmcr |= BMCR_SPEED100;
8293 } else {
8294 speed = SPEED_1000;
8295 bmcr |= BMCR_SPEED1000;
8296 }
8297 }
8298
8299 if (extlpbk) {
8300 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8301 tg3_readphy(tp, MII_CTRL1000, &val);
8302 val |= CTL1000_AS_MASTER |
8303 CTL1000_ENABLE_MASTER;
8304 tg3_writephy(tp, MII_CTRL1000, val);
8305 } else {
8306 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8307 MII_TG3_FET_PTEST_TRIM_2;
8308 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8309 }
8310 } else
8311 bmcr |= BMCR_LOOPBACK;
8312
8313 tg3_writephy(tp, MII_BMCR, bmcr);
8314
8315 /* The write needs to be flushed for the FETs */
8316 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8317 tg3_readphy(tp, MII_BMCR, &bmcr);
8318
8319 udelay(40);
8320
8321 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8322 tg3_asic_rev(tp) == ASIC_REV_5785) {
8323 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8324 MII_TG3_FET_PTEST_FRC_TX_LINK |
8325 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8326
8327 /* The write needs to be flushed for the AC131 */
8328 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8329 }
8330
8331 /* Reset to prevent losing 1st rx packet intermittently */
8332 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8333 tg3_flag(tp, 5780_CLASS)) {
8334 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8335 udelay(10);
8336 tw32_f(MAC_RX_MODE, tp->rx_mode);
8337 }
8338
8339 mac_mode = tp->mac_mode &
8340 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8341 if (speed == SPEED_1000)
8342 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8343 else
8344 mac_mode |= MAC_MODE_PORT_MODE_MII;
8345
8346 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8347 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8348
8349 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8350 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8351 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8352 mac_mode |= MAC_MODE_LINK_POLARITY;
8353
8354 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8355 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8356 }
8357
8358 tw32(MAC_MODE, mac_mode);
8359 udelay(40);
8360
8361 return 0;
8362 }
8363
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8364 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8365 {
8366 struct tg3 *tp = netdev_priv(dev);
8367
8368 if (features & NETIF_F_LOOPBACK) {
8369 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8370 return;
8371
8372 spin_lock_bh(&tp->lock);
8373 tg3_mac_loopback(tp, true);
8374 netif_carrier_on(tp->dev);
8375 spin_unlock_bh(&tp->lock);
8376 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8377 } else {
8378 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8379 return;
8380
8381 spin_lock_bh(&tp->lock);
8382 tg3_mac_loopback(tp, false);
8383 /* Force link status check */
8384 tg3_setup_phy(tp, true);
8385 spin_unlock_bh(&tp->lock);
8386 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8387 }
8388 }
8389
tg3_fix_features(struct net_device * dev,netdev_features_t features)8390 static netdev_features_t tg3_fix_features(struct net_device *dev,
8391 netdev_features_t features)
8392 {
8393 struct tg3 *tp = netdev_priv(dev);
8394
8395 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8396 features &= ~NETIF_F_ALL_TSO;
8397
8398 return features;
8399 }
8400
tg3_set_features(struct net_device * dev,netdev_features_t features)8401 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8402 {
8403 netdev_features_t changed = dev->features ^ features;
8404
8405 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8406 tg3_set_loopback(dev, features);
8407
8408 return 0;
8409 }
8410
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8411 static void tg3_rx_prodring_free(struct tg3 *tp,
8412 struct tg3_rx_prodring_set *tpr)
8413 {
8414 int i;
8415
8416 if (tpr != &tp->napi[0].prodring) {
8417 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8418 i = (i + 1) & tp->rx_std_ring_mask)
8419 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8420 tp->rx_pkt_map_sz);
8421
8422 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8423 for (i = tpr->rx_jmb_cons_idx;
8424 i != tpr->rx_jmb_prod_idx;
8425 i = (i + 1) & tp->rx_jmb_ring_mask) {
8426 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8427 TG3_RX_JMB_MAP_SZ);
8428 }
8429 }
8430
8431 return;
8432 }
8433
8434 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8435 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8436 tp->rx_pkt_map_sz);
8437
8438 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8439 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8440 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8441 TG3_RX_JMB_MAP_SZ);
8442 }
8443 }
8444
8445 /* Initialize rx rings for packet processing.
8446 *
8447 * The chip has been shut down and the driver detached from
8448 * the networking, so no interrupts or new tx packets will
8449 * end up in the driver. tp->{tx,}lock are held and thus
8450 * we may not sleep.
8451 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8452 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8453 struct tg3_rx_prodring_set *tpr)
8454 {
8455 u32 i, rx_pkt_dma_sz;
8456
8457 tpr->rx_std_cons_idx = 0;
8458 tpr->rx_std_prod_idx = 0;
8459 tpr->rx_jmb_cons_idx = 0;
8460 tpr->rx_jmb_prod_idx = 0;
8461
8462 if (tpr != &tp->napi[0].prodring) {
8463 memset(&tpr->rx_std_buffers[0], 0,
8464 TG3_RX_STD_BUFF_RING_SIZE(tp));
8465 if (tpr->rx_jmb_buffers)
8466 memset(&tpr->rx_jmb_buffers[0], 0,
8467 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8468 goto done;
8469 }
8470
8471 /* Zero out all descriptors. */
8472 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8473
8474 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8475 if (tg3_flag(tp, 5780_CLASS) &&
8476 tp->dev->mtu > ETH_DATA_LEN)
8477 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8478 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8479
8480 /* Initialize invariants of the rings, we only set this
8481 * stuff once. This works because the card does not
8482 * write into the rx buffer posting rings.
8483 */
8484 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8485 struct tg3_rx_buffer_desc *rxd;
8486
8487 rxd = &tpr->rx_std[i];
8488 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8489 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8490 rxd->opaque = (RXD_OPAQUE_RING_STD |
8491 (i << RXD_OPAQUE_INDEX_SHIFT));
8492 }
8493
8494 /* Now allocate fresh SKBs for each rx ring. */
8495 for (i = 0; i < tp->rx_pending; i++) {
8496 unsigned int frag_size;
8497
8498 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8499 &frag_size) < 0) {
8500 netdev_warn(tp->dev,
8501 "Using a smaller RX standard ring. Only "
8502 "%d out of %d buffers were allocated "
8503 "successfully\n", i, tp->rx_pending);
8504 if (i == 0)
8505 goto initfail;
8506 tp->rx_pending = i;
8507 break;
8508 }
8509 }
8510
8511 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8512 goto done;
8513
8514 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8515
8516 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8517 goto done;
8518
8519 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8520 struct tg3_rx_buffer_desc *rxd;
8521
8522 rxd = &tpr->rx_jmb[i].std;
8523 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8524 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8525 RXD_FLAG_JUMBO;
8526 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8527 (i << RXD_OPAQUE_INDEX_SHIFT));
8528 }
8529
8530 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8531 unsigned int frag_size;
8532
8533 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8534 &frag_size) < 0) {
8535 netdev_warn(tp->dev,
8536 "Using a smaller RX jumbo ring. Only %d "
8537 "out of %d buffers were allocated "
8538 "successfully\n", i, tp->rx_jumbo_pending);
8539 if (i == 0)
8540 goto initfail;
8541 tp->rx_jumbo_pending = i;
8542 break;
8543 }
8544 }
8545
8546 done:
8547 return 0;
8548
8549 initfail:
8550 tg3_rx_prodring_free(tp, tpr);
8551 return -ENOMEM;
8552 }
8553
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8554 static void tg3_rx_prodring_fini(struct tg3 *tp,
8555 struct tg3_rx_prodring_set *tpr)
8556 {
8557 kfree(tpr->rx_std_buffers);
8558 tpr->rx_std_buffers = NULL;
8559 kfree(tpr->rx_jmb_buffers);
8560 tpr->rx_jmb_buffers = NULL;
8561 if (tpr->rx_std) {
8562 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8563 tpr->rx_std, tpr->rx_std_mapping);
8564 tpr->rx_std = NULL;
8565 }
8566 if (tpr->rx_jmb) {
8567 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8568 tpr->rx_jmb, tpr->rx_jmb_mapping);
8569 tpr->rx_jmb = NULL;
8570 }
8571 }
8572
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8573 static int tg3_rx_prodring_init(struct tg3 *tp,
8574 struct tg3_rx_prodring_set *tpr)
8575 {
8576 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8577 GFP_KERNEL);
8578 if (!tpr->rx_std_buffers)
8579 return -ENOMEM;
8580
8581 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8582 TG3_RX_STD_RING_BYTES(tp),
8583 &tpr->rx_std_mapping,
8584 GFP_KERNEL);
8585 if (!tpr->rx_std)
8586 goto err_out;
8587
8588 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8589 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8590 GFP_KERNEL);
8591 if (!tpr->rx_jmb_buffers)
8592 goto err_out;
8593
8594 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8595 TG3_RX_JMB_RING_BYTES(tp),
8596 &tpr->rx_jmb_mapping,
8597 GFP_KERNEL);
8598 if (!tpr->rx_jmb)
8599 goto err_out;
8600 }
8601
8602 return 0;
8603
8604 err_out:
8605 tg3_rx_prodring_fini(tp, tpr);
8606 return -ENOMEM;
8607 }
8608
8609 /* Free up pending packets in all rx/tx rings.
8610 *
8611 * The chip has been shut down and the driver detached from
8612 * the networking, so no interrupts or new tx packets will
8613 * end up in the driver. tp->{tx,}lock is not held and we are not
8614 * in an interrupt context and thus may sleep.
8615 */
tg3_free_rings(struct tg3 * tp)8616 static void tg3_free_rings(struct tg3 *tp)
8617 {
8618 int i, j;
8619
8620 for (j = 0; j < tp->irq_cnt; j++) {
8621 struct tg3_napi *tnapi = &tp->napi[j];
8622
8623 tg3_rx_prodring_free(tp, &tnapi->prodring);
8624
8625 if (!tnapi->tx_buffers)
8626 continue;
8627
8628 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8629 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8630
8631 if (!skb)
8632 continue;
8633
8634 tg3_tx_skb_unmap(tnapi, i,
8635 skb_shinfo(skb)->nr_frags - 1);
8636
8637 dev_consume_skb_any(skb);
8638 }
8639 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8640 }
8641 }
8642
8643 /* Initialize tx/rx rings for packet processing.
8644 *
8645 * The chip has been shut down and the driver detached from
8646 * the networking, so no interrupts or new tx packets will
8647 * end up in the driver. tp->{tx,}lock are held and thus
8648 * we may not sleep.
8649 */
tg3_init_rings(struct tg3 * tp)8650 static int tg3_init_rings(struct tg3 *tp)
8651 {
8652 int i;
8653
8654 /* Free up all the SKBs. */
8655 tg3_free_rings(tp);
8656
8657 for (i = 0; i < tp->irq_cnt; i++) {
8658 struct tg3_napi *tnapi = &tp->napi[i];
8659
8660 tnapi->last_tag = 0;
8661 tnapi->last_irq_tag = 0;
8662 tnapi->hw_status->status = 0;
8663 tnapi->hw_status->status_tag = 0;
8664 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8665
8666 tnapi->tx_prod = 0;
8667 tnapi->tx_cons = 0;
8668 if (tnapi->tx_ring)
8669 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8670
8671 tnapi->rx_rcb_ptr = 0;
8672 if (tnapi->rx_rcb)
8673 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8674
8675 if (tnapi->prodring.rx_std &&
8676 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8677 tg3_free_rings(tp);
8678 return -ENOMEM;
8679 }
8680 }
8681
8682 return 0;
8683 }
8684
tg3_mem_tx_release(struct tg3 * tp)8685 static void tg3_mem_tx_release(struct tg3 *tp)
8686 {
8687 int i;
8688
8689 for (i = 0; i < tp->irq_max; i++) {
8690 struct tg3_napi *tnapi = &tp->napi[i];
8691
8692 if (tnapi->tx_ring) {
8693 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8694 tnapi->tx_ring, tnapi->tx_desc_mapping);
8695 tnapi->tx_ring = NULL;
8696 }
8697
8698 kfree(tnapi->tx_buffers);
8699 tnapi->tx_buffers = NULL;
8700 }
8701 }
8702
tg3_mem_tx_acquire(struct tg3 * tp)8703 static int tg3_mem_tx_acquire(struct tg3 *tp)
8704 {
8705 int i;
8706 struct tg3_napi *tnapi = &tp->napi[0];
8707
8708 /* If multivector TSS is enabled, vector 0 does not handle
8709 * tx interrupts. Don't allocate any resources for it.
8710 */
8711 if (tg3_flag(tp, ENABLE_TSS))
8712 tnapi++;
8713
8714 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8715 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8716 sizeof(struct tg3_tx_ring_info),
8717 GFP_KERNEL);
8718 if (!tnapi->tx_buffers)
8719 goto err_out;
8720
8721 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8722 TG3_TX_RING_BYTES,
8723 &tnapi->tx_desc_mapping,
8724 GFP_KERNEL);
8725 if (!tnapi->tx_ring)
8726 goto err_out;
8727 }
8728
8729 return 0;
8730
8731 err_out:
8732 tg3_mem_tx_release(tp);
8733 return -ENOMEM;
8734 }
8735
tg3_mem_rx_release(struct tg3 * tp)8736 static void tg3_mem_rx_release(struct tg3 *tp)
8737 {
8738 int i;
8739
8740 for (i = 0; i < tp->irq_max; i++) {
8741 struct tg3_napi *tnapi = &tp->napi[i];
8742
8743 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8744
8745 if (!tnapi->rx_rcb)
8746 continue;
8747
8748 dma_free_coherent(&tp->pdev->dev,
8749 TG3_RX_RCB_RING_BYTES(tp),
8750 tnapi->rx_rcb,
8751 tnapi->rx_rcb_mapping);
8752 tnapi->rx_rcb = NULL;
8753 }
8754 }
8755
tg3_mem_rx_acquire(struct tg3 * tp)8756 static int tg3_mem_rx_acquire(struct tg3 *tp)
8757 {
8758 unsigned int i, limit;
8759
8760 limit = tp->rxq_cnt;
8761
8762 /* If RSS is enabled, we need a (dummy) producer ring
8763 * set on vector zero. This is the true hw prodring.
8764 */
8765 if (tg3_flag(tp, ENABLE_RSS))
8766 limit++;
8767
8768 for (i = 0; i < limit; i++) {
8769 struct tg3_napi *tnapi = &tp->napi[i];
8770
8771 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8772 goto err_out;
8773
8774 /* If multivector RSS is enabled, vector 0
8775 * does not handle rx or tx interrupts.
8776 * Don't allocate any resources for it.
8777 */
8778 if (!i && tg3_flag(tp, ENABLE_RSS))
8779 continue;
8780
8781 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8782 TG3_RX_RCB_RING_BYTES(tp),
8783 &tnapi->rx_rcb_mapping,
8784 GFP_KERNEL);
8785 if (!tnapi->rx_rcb)
8786 goto err_out;
8787 }
8788
8789 return 0;
8790
8791 err_out:
8792 tg3_mem_rx_release(tp);
8793 return -ENOMEM;
8794 }
8795
8796 /*
8797 * Must not be invoked with interrupt sources disabled and
8798 * the hardware shutdown down.
8799 */
tg3_free_consistent(struct tg3 * tp)8800 static void tg3_free_consistent(struct tg3 *tp)
8801 {
8802 int i;
8803
8804 for (i = 0; i < tp->irq_cnt; i++) {
8805 struct tg3_napi *tnapi = &tp->napi[i];
8806
8807 if (tnapi->hw_status) {
8808 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8809 tnapi->hw_status,
8810 tnapi->status_mapping);
8811 tnapi->hw_status = NULL;
8812 }
8813 }
8814
8815 tg3_mem_rx_release(tp);
8816 tg3_mem_tx_release(tp);
8817
8818 /* tp->hw_stats can be referenced safely:
8819 * 1. under rtnl_lock
8820 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8821 */
8822 if (tp->hw_stats) {
8823 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8824 tp->hw_stats, tp->stats_mapping);
8825 tp->hw_stats = NULL;
8826 }
8827 }
8828
8829 /*
8830 * Must not be invoked with interrupt sources disabled and
8831 * the hardware shutdown down. Can sleep.
8832 */
tg3_alloc_consistent(struct tg3 * tp)8833 static int tg3_alloc_consistent(struct tg3 *tp)
8834 {
8835 int i;
8836
8837 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8838 sizeof(struct tg3_hw_stats),
8839 &tp->stats_mapping, GFP_KERNEL);
8840 if (!tp->hw_stats)
8841 goto err_out;
8842
8843 for (i = 0; i < tp->irq_cnt; i++) {
8844 struct tg3_napi *tnapi = &tp->napi[i];
8845 struct tg3_hw_status *sblk;
8846
8847 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8848 TG3_HW_STATUS_SIZE,
8849 &tnapi->status_mapping,
8850 GFP_KERNEL);
8851 if (!tnapi->hw_status)
8852 goto err_out;
8853
8854 sblk = tnapi->hw_status;
8855
8856 if (tg3_flag(tp, ENABLE_RSS)) {
8857 u16 *prodptr = NULL;
8858
8859 /*
8860 * When RSS is enabled, the status block format changes
8861 * slightly. The "rx_jumbo_consumer", "reserved",
8862 * and "rx_mini_consumer" members get mapped to the
8863 * other three rx return ring producer indexes.
8864 */
8865 switch (i) {
8866 case 1:
8867 prodptr = &sblk->idx[0].rx_producer;
8868 break;
8869 case 2:
8870 prodptr = &sblk->rx_jumbo_consumer;
8871 break;
8872 case 3:
8873 prodptr = &sblk->reserved;
8874 break;
8875 case 4:
8876 prodptr = &sblk->rx_mini_consumer;
8877 break;
8878 }
8879 tnapi->rx_rcb_prod_idx = prodptr;
8880 } else {
8881 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8882 }
8883 }
8884
8885 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8886 goto err_out;
8887
8888 return 0;
8889
8890 err_out:
8891 tg3_free_consistent(tp);
8892 return -ENOMEM;
8893 }
8894
8895 #define MAX_WAIT_CNT 1000
8896
8897 /* To stop a block, clear the enable bit and poll till it
8898 * clears. tp->lock is held.
8899 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8900 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8901 {
8902 unsigned int i;
8903 u32 val;
8904
8905 if (tg3_flag(tp, 5705_PLUS)) {
8906 switch (ofs) {
8907 case RCVLSC_MODE:
8908 case DMAC_MODE:
8909 case MBFREE_MODE:
8910 case BUFMGR_MODE:
8911 case MEMARB_MODE:
8912 /* We can't enable/disable these bits of the
8913 * 5705/5750, just say success.
8914 */
8915 return 0;
8916
8917 default:
8918 break;
8919 }
8920 }
8921
8922 val = tr32(ofs);
8923 val &= ~enable_bit;
8924 tw32_f(ofs, val);
8925
8926 for (i = 0; i < MAX_WAIT_CNT; i++) {
8927 if (pci_channel_offline(tp->pdev)) {
8928 dev_err(&tp->pdev->dev,
8929 "tg3_stop_block device offline, "
8930 "ofs=%lx enable_bit=%x\n",
8931 ofs, enable_bit);
8932 return -ENODEV;
8933 }
8934
8935 udelay(100);
8936 val = tr32(ofs);
8937 if ((val & enable_bit) == 0)
8938 break;
8939 }
8940
8941 if (i == MAX_WAIT_CNT && !silent) {
8942 dev_err(&tp->pdev->dev,
8943 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8944 ofs, enable_bit);
8945 return -ENODEV;
8946 }
8947
8948 return 0;
8949 }
8950
8951 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8952 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8953 {
8954 int i, err;
8955
8956 tg3_disable_ints(tp);
8957
8958 if (pci_channel_offline(tp->pdev)) {
8959 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8960 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8961 err = -ENODEV;
8962 goto err_no_dev;
8963 }
8964
8965 tp->rx_mode &= ~RX_MODE_ENABLE;
8966 tw32_f(MAC_RX_MODE, tp->rx_mode);
8967 udelay(10);
8968
8969 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8970 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8971 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8972 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8973 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8974 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8975
8976 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8977 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8978 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8979 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8980 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8981 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8982 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8983
8984 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8985 tw32_f(MAC_MODE, tp->mac_mode);
8986 udelay(40);
8987
8988 tp->tx_mode &= ~TX_MODE_ENABLE;
8989 tw32_f(MAC_TX_MODE, tp->tx_mode);
8990
8991 for (i = 0; i < MAX_WAIT_CNT; i++) {
8992 udelay(100);
8993 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8994 break;
8995 }
8996 if (i >= MAX_WAIT_CNT) {
8997 dev_err(&tp->pdev->dev,
8998 "%s timed out, TX_MODE_ENABLE will not clear "
8999 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
9000 err |= -ENODEV;
9001 }
9002
9003 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
9004 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
9005 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
9006
9007 tw32(FTQ_RESET, 0xffffffff);
9008 tw32(FTQ_RESET, 0x00000000);
9009
9010 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
9011 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9012
9013 err_no_dev:
9014 for (i = 0; i < tp->irq_cnt; i++) {
9015 struct tg3_napi *tnapi = &tp->napi[i];
9016 if (tnapi->hw_status)
9017 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9018 }
9019
9020 return err;
9021 }
9022
9023 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)9024 static void tg3_save_pci_state(struct tg3 *tp)
9025 {
9026 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9027 }
9028
9029 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)9030 static void tg3_restore_pci_state(struct tg3 *tp)
9031 {
9032 u32 val;
9033
9034 /* Re-enable indirect register accesses. */
9035 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9036 tp->misc_host_ctrl);
9037
9038 /* Set MAX PCI retry to zero. */
9039 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9040 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9041 tg3_flag(tp, PCIX_MODE))
9042 val |= PCISTATE_RETRY_SAME_DMA;
9043 /* Allow reads and writes to the APE register and memory space. */
9044 if (tg3_flag(tp, ENABLE_APE))
9045 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9046 PCISTATE_ALLOW_APE_SHMEM_WR |
9047 PCISTATE_ALLOW_APE_PSPACE_WR;
9048 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9049
9050 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9051
9052 if (!tg3_flag(tp, PCI_EXPRESS)) {
9053 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9054 tp->pci_cacheline_sz);
9055 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9056 tp->pci_lat_timer);
9057 }
9058
9059 /* Make sure PCI-X relaxed ordering bit is clear. */
9060 if (tg3_flag(tp, PCIX_MODE)) {
9061 u16 pcix_cmd;
9062
9063 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9064 &pcix_cmd);
9065 pcix_cmd &= ~PCI_X_CMD_ERO;
9066 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9067 pcix_cmd);
9068 }
9069
9070 if (tg3_flag(tp, 5780_CLASS)) {
9071
9072 /* Chip reset on 5780 will reset MSI enable bit,
9073 * so need to restore it.
9074 */
9075 if (tg3_flag(tp, USING_MSI)) {
9076 u16 ctrl;
9077
9078 pci_read_config_word(tp->pdev,
9079 tp->msi_cap + PCI_MSI_FLAGS,
9080 &ctrl);
9081 pci_write_config_word(tp->pdev,
9082 tp->msi_cap + PCI_MSI_FLAGS,
9083 ctrl | PCI_MSI_FLAGS_ENABLE);
9084 val = tr32(MSGINT_MODE);
9085 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9086 }
9087 }
9088 }
9089
tg3_override_clk(struct tg3 * tp)9090 static void tg3_override_clk(struct tg3 *tp)
9091 {
9092 u32 val;
9093
9094 switch (tg3_asic_rev(tp)) {
9095 case ASIC_REV_5717:
9096 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9097 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9098 TG3_CPMU_MAC_ORIDE_ENABLE);
9099 break;
9100
9101 case ASIC_REV_5719:
9102 case ASIC_REV_5720:
9103 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9104 break;
9105
9106 default:
9107 return;
9108 }
9109 }
9110
tg3_restore_clk(struct tg3 * tp)9111 static void tg3_restore_clk(struct tg3 *tp)
9112 {
9113 u32 val;
9114
9115 switch (tg3_asic_rev(tp)) {
9116 case ASIC_REV_5717:
9117 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9118 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9119 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9120 break;
9121
9122 case ASIC_REV_5719:
9123 case ASIC_REV_5720:
9124 val = tr32(TG3_CPMU_CLCK_ORIDE);
9125 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9126 break;
9127
9128 default:
9129 return;
9130 }
9131 }
9132
9133 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9134 static int tg3_chip_reset(struct tg3 *tp)
9135 __releases(tp->lock)
9136 __acquires(tp->lock)
9137 {
9138 u32 val;
9139 void (*write_op)(struct tg3 *, u32, u32);
9140 int i, err;
9141
9142 if (!pci_device_is_present(tp->pdev))
9143 return -ENODEV;
9144
9145 tg3_nvram_lock(tp);
9146
9147 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9148
9149 /* No matching tg3_nvram_unlock() after this because
9150 * chip reset below will undo the nvram lock.
9151 */
9152 tp->nvram_lock_cnt = 0;
9153
9154 /* GRC_MISC_CFG core clock reset will clear the memory
9155 * enable bit in PCI register 4 and the MSI enable bit
9156 * on some chips, so we save relevant registers here.
9157 */
9158 tg3_save_pci_state(tp);
9159
9160 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9161 tg3_flag(tp, 5755_PLUS))
9162 tw32(GRC_FASTBOOT_PC, 0);
9163
9164 /*
9165 * We must avoid the readl() that normally takes place.
9166 * It locks machines, causes machine checks, and other
9167 * fun things. So, temporarily disable the 5701
9168 * hardware workaround, while we do the reset.
9169 */
9170 write_op = tp->write32;
9171 if (write_op == tg3_write_flush_reg32)
9172 tp->write32 = tg3_write32;
9173
9174 /* Prevent the irq handler from reading or writing PCI registers
9175 * during chip reset when the memory enable bit in the PCI command
9176 * register may be cleared. The chip does not generate interrupt
9177 * at this time, but the irq handler may still be called due to irq
9178 * sharing or irqpoll.
9179 */
9180 tg3_flag_set(tp, CHIP_RESETTING);
9181 for (i = 0; i < tp->irq_cnt; i++) {
9182 struct tg3_napi *tnapi = &tp->napi[i];
9183 if (tnapi->hw_status) {
9184 tnapi->hw_status->status = 0;
9185 tnapi->hw_status->status_tag = 0;
9186 }
9187 tnapi->last_tag = 0;
9188 tnapi->last_irq_tag = 0;
9189 }
9190 smp_mb();
9191
9192 tg3_full_unlock(tp);
9193
9194 for (i = 0; i < tp->irq_cnt; i++)
9195 synchronize_irq(tp->napi[i].irq_vec);
9196
9197 tg3_full_lock(tp, 0);
9198
9199 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9200 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9201 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9202 }
9203
9204 /* do the reset */
9205 val = GRC_MISC_CFG_CORECLK_RESET;
9206
9207 if (tg3_flag(tp, PCI_EXPRESS)) {
9208 /* Force PCIe 1.0a mode */
9209 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9210 !tg3_flag(tp, 57765_PLUS) &&
9211 tr32(TG3_PCIE_PHY_TSTCTL) ==
9212 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9213 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9214
9215 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9216 tw32(GRC_MISC_CFG, (1 << 29));
9217 val |= (1 << 29);
9218 }
9219 }
9220
9221 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9222 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9223 tw32(GRC_VCPU_EXT_CTRL,
9224 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9225 }
9226
9227 /* Set the clock to the highest frequency to avoid timeouts. With link
9228 * aware mode, the clock speed could be slow and bootcode does not
9229 * complete within the expected time. Override the clock to allow the
9230 * bootcode to finish sooner and then restore it.
9231 */
9232 tg3_override_clk(tp);
9233
9234 /* Manage gphy power for all CPMU absent PCIe devices. */
9235 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9236 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9237
9238 tw32(GRC_MISC_CFG, val);
9239
9240 /* restore 5701 hardware bug workaround write method */
9241 tp->write32 = write_op;
9242
9243 /* Unfortunately, we have to delay before the PCI read back.
9244 * Some 575X chips even will not respond to a PCI cfg access
9245 * when the reset command is given to the chip.
9246 *
9247 * How do these hardware designers expect things to work
9248 * properly if the PCI write is posted for a long period
9249 * of time? It is always necessary to have some method by
9250 * which a register read back can occur to push the write
9251 * out which does the reset.
9252 *
9253 * For most tg3 variants the trick below was working.
9254 * Ho hum...
9255 */
9256 udelay(120);
9257
9258 /* Flush PCI posted writes. The normal MMIO registers
9259 * are inaccessible at this time so this is the only
9260 * way to make this reliably (actually, this is no longer
9261 * the case, see above). I tried to use indirect
9262 * register read/write but this upset some 5701 variants.
9263 */
9264 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9265
9266 udelay(120);
9267
9268 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9269 u16 val16;
9270
9271 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9272 int j;
9273 u32 cfg_val;
9274
9275 /* Wait for link training to complete. */
9276 for (j = 0; j < 5000; j++)
9277 udelay(100);
9278
9279 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9280 pci_write_config_dword(tp->pdev, 0xc4,
9281 cfg_val | (1 << 15));
9282 }
9283
9284 /* Clear the "no snoop" and "relaxed ordering" bits. */
9285 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9286 /*
9287 * Older PCIe devices only support the 128 byte
9288 * MPS setting. Enforce the restriction.
9289 */
9290 if (!tg3_flag(tp, CPMU_PRESENT))
9291 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9292 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9293
9294 /* Clear error status */
9295 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9296 PCI_EXP_DEVSTA_CED |
9297 PCI_EXP_DEVSTA_NFED |
9298 PCI_EXP_DEVSTA_FED |
9299 PCI_EXP_DEVSTA_URD);
9300 }
9301
9302 tg3_restore_pci_state(tp);
9303
9304 tg3_flag_clear(tp, CHIP_RESETTING);
9305 tg3_flag_clear(tp, ERROR_PROCESSED);
9306
9307 val = 0;
9308 if (tg3_flag(tp, 5780_CLASS))
9309 val = tr32(MEMARB_MODE);
9310 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9311
9312 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9313 tg3_stop_fw(tp);
9314 tw32(0x5000, 0x400);
9315 }
9316
9317 if (tg3_flag(tp, IS_SSB_CORE)) {
9318 /*
9319 * BCM4785: In order to avoid repercussions from using
9320 * potentially defective internal ROM, stop the Rx RISC CPU,
9321 * which is not required.
9322 */
9323 tg3_stop_fw(tp);
9324 tg3_halt_cpu(tp, RX_CPU_BASE);
9325 }
9326
9327 err = tg3_poll_fw(tp);
9328 if (err)
9329 return err;
9330
9331 tw32(GRC_MODE, tp->grc_mode);
9332
9333 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9334 val = tr32(0xc4);
9335
9336 tw32(0xc4, val | (1 << 15));
9337 }
9338
9339 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9340 tg3_asic_rev(tp) == ASIC_REV_5705) {
9341 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9342 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9343 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9344 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9345 }
9346
9347 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9348 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9349 val = tp->mac_mode;
9350 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9351 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9352 val = tp->mac_mode;
9353 } else
9354 val = 0;
9355
9356 tw32_f(MAC_MODE, val);
9357 udelay(40);
9358
9359 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9360
9361 tg3_mdio_start(tp);
9362
9363 if (tg3_flag(tp, PCI_EXPRESS) &&
9364 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9365 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9366 !tg3_flag(tp, 57765_PLUS)) {
9367 val = tr32(0x7c00);
9368
9369 tw32(0x7c00, val | (1 << 25));
9370 }
9371
9372 tg3_restore_clk(tp);
9373
9374 /* Increase the core clock speed to fix tx timeout issue for 5762
9375 * with 100Mbps link speed.
9376 */
9377 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9378 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9379 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9380 TG3_CPMU_MAC_ORIDE_ENABLE);
9381 }
9382
9383 /* Reprobe ASF enable state. */
9384 tg3_flag_clear(tp, ENABLE_ASF);
9385 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9386 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9387
9388 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9389 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9390 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9391 u32 nic_cfg;
9392
9393 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9394 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9395 tg3_flag_set(tp, ENABLE_ASF);
9396 tp->last_event_jiffies = jiffies;
9397 if (tg3_flag(tp, 5750_PLUS))
9398 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9399
9400 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9401 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9402 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9403 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9404 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9405 }
9406 }
9407
9408 return 0;
9409 }
9410
9411 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9412 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9413 static void __tg3_set_rx_mode(struct net_device *);
9414
9415 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9416 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9417 {
9418 int err, i;
9419
9420 tg3_stop_fw(tp);
9421
9422 tg3_write_sig_pre_reset(tp, kind);
9423
9424 tg3_abort_hw(tp, silent);
9425 err = tg3_chip_reset(tp);
9426
9427 __tg3_set_mac_addr(tp, false);
9428
9429 tg3_write_sig_legacy(tp, kind);
9430 tg3_write_sig_post_reset(tp, kind);
9431
9432 if (tp->hw_stats) {
9433 /* Save the stats across chip resets... */
9434 tg3_get_nstats(tp, &tp->net_stats_prev);
9435 tg3_get_estats(tp, &tp->estats_prev);
9436
9437 /* And make sure the next sample is new data */
9438 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9439
9440 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9441 struct tg3_napi *tnapi = &tp->napi[i];
9442
9443 tnapi->rx_dropped = 0;
9444 tnapi->tx_dropped = 0;
9445 }
9446 }
9447
9448 return err;
9449 }
9450
tg3_set_mac_addr(struct net_device * dev,void * p)9451 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9452 {
9453 struct tg3 *tp = netdev_priv(dev);
9454 struct sockaddr *addr = p;
9455 int err = 0;
9456 bool skip_mac_1 = false;
9457
9458 if (!is_valid_ether_addr(addr->sa_data))
9459 return -EADDRNOTAVAIL;
9460
9461 eth_hw_addr_set(dev, addr->sa_data);
9462
9463 if (!netif_running(dev))
9464 return 0;
9465
9466 if (tg3_flag(tp, ENABLE_ASF)) {
9467 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9468
9469 addr0_high = tr32(MAC_ADDR_0_HIGH);
9470 addr0_low = tr32(MAC_ADDR_0_LOW);
9471 addr1_high = tr32(MAC_ADDR_1_HIGH);
9472 addr1_low = tr32(MAC_ADDR_1_LOW);
9473
9474 /* Skip MAC addr 1 if ASF is using it. */
9475 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9476 !(addr1_high == 0 && addr1_low == 0))
9477 skip_mac_1 = true;
9478 }
9479 spin_lock_bh(&tp->lock);
9480 __tg3_set_mac_addr(tp, skip_mac_1);
9481 __tg3_set_rx_mode(dev);
9482 spin_unlock_bh(&tp->lock);
9483
9484 return err;
9485 }
9486
9487 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9488 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9489 dma_addr_t mapping, u32 maxlen_flags,
9490 u32 nic_addr)
9491 {
9492 tg3_write_mem(tp,
9493 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9494 ((u64) mapping >> 32));
9495 tg3_write_mem(tp,
9496 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9497 ((u64) mapping & 0xffffffff));
9498 tg3_write_mem(tp,
9499 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9500 maxlen_flags);
9501
9502 if (!tg3_flag(tp, 5705_PLUS))
9503 tg3_write_mem(tp,
9504 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9505 nic_addr);
9506 }
9507
9508
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9509 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9510 {
9511 int i = 0;
9512
9513 if (!tg3_flag(tp, ENABLE_TSS)) {
9514 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9515 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9516 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9517 } else {
9518 tw32(HOSTCC_TXCOL_TICKS, 0);
9519 tw32(HOSTCC_TXMAX_FRAMES, 0);
9520 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9521
9522 for (; i < tp->txq_cnt; i++) {
9523 u32 reg;
9524
9525 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9526 tw32(reg, ec->tx_coalesce_usecs);
9527 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9528 tw32(reg, ec->tx_max_coalesced_frames);
9529 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9530 tw32(reg, ec->tx_max_coalesced_frames_irq);
9531 }
9532 }
9533
9534 for (; i < tp->irq_max - 1; i++) {
9535 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9536 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9537 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9538 }
9539 }
9540
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9541 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9542 {
9543 int i = 0;
9544 u32 limit = tp->rxq_cnt;
9545
9546 if (!tg3_flag(tp, ENABLE_RSS)) {
9547 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9548 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9549 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9550 limit--;
9551 } else {
9552 tw32(HOSTCC_RXCOL_TICKS, 0);
9553 tw32(HOSTCC_RXMAX_FRAMES, 0);
9554 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9555 }
9556
9557 for (; i < limit; i++) {
9558 u32 reg;
9559
9560 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9561 tw32(reg, ec->rx_coalesce_usecs);
9562 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9563 tw32(reg, ec->rx_max_coalesced_frames);
9564 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9565 tw32(reg, ec->rx_max_coalesced_frames_irq);
9566 }
9567
9568 for (; i < tp->irq_max - 1; i++) {
9569 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9570 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9571 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9572 }
9573 }
9574
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9575 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9576 {
9577 tg3_coal_tx_init(tp, ec);
9578 tg3_coal_rx_init(tp, ec);
9579
9580 if (!tg3_flag(tp, 5705_PLUS)) {
9581 u32 val = ec->stats_block_coalesce_usecs;
9582
9583 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9584 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9585
9586 if (!tp->link_up)
9587 val = 0;
9588
9589 tw32(HOSTCC_STAT_COAL_TICKS, val);
9590 }
9591 }
9592
9593 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9594 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9595 {
9596 u32 txrcb, limit;
9597
9598 /* Disable all transmit rings but the first. */
9599 if (!tg3_flag(tp, 5705_PLUS))
9600 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9601 else if (tg3_flag(tp, 5717_PLUS))
9602 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9603 else if (tg3_flag(tp, 57765_CLASS) ||
9604 tg3_asic_rev(tp) == ASIC_REV_5762)
9605 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9606 else
9607 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9608
9609 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9610 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9611 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9612 BDINFO_FLAGS_DISABLED);
9613 }
9614
9615 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9616 static void tg3_tx_rcbs_init(struct tg3 *tp)
9617 {
9618 int i = 0;
9619 u32 txrcb = NIC_SRAM_SEND_RCB;
9620
9621 if (tg3_flag(tp, ENABLE_TSS))
9622 i++;
9623
9624 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9625 struct tg3_napi *tnapi = &tp->napi[i];
9626
9627 if (!tnapi->tx_ring)
9628 continue;
9629
9630 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9631 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9632 NIC_SRAM_TX_BUFFER_DESC);
9633 }
9634 }
9635
9636 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9637 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9638 {
9639 u32 rxrcb, limit;
9640
9641 /* Disable all receive return rings but the first. */
9642 if (tg3_flag(tp, 5717_PLUS))
9643 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9644 else if (!tg3_flag(tp, 5705_PLUS))
9645 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9646 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9647 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9648 tg3_flag(tp, 57765_CLASS))
9649 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9650 else
9651 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9652
9653 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9654 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9655 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9656 BDINFO_FLAGS_DISABLED);
9657 }
9658
9659 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9660 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9661 {
9662 int i = 0;
9663 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9664
9665 if (tg3_flag(tp, ENABLE_RSS))
9666 i++;
9667
9668 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9669 struct tg3_napi *tnapi = &tp->napi[i];
9670
9671 if (!tnapi->rx_rcb)
9672 continue;
9673
9674 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9675 (tp->rx_ret_ring_mask + 1) <<
9676 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9677 }
9678 }
9679
9680 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9681 static void tg3_rings_reset(struct tg3 *tp)
9682 {
9683 int i;
9684 u32 stblk;
9685 struct tg3_napi *tnapi = &tp->napi[0];
9686
9687 tg3_tx_rcbs_disable(tp);
9688
9689 tg3_rx_ret_rcbs_disable(tp);
9690
9691 /* Disable interrupts */
9692 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9693 tp->napi[0].chk_msi_cnt = 0;
9694 tp->napi[0].last_rx_cons = 0;
9695 tp->napi[0].last_tx_cons = 0;
9696
9697 /* Zero mailbox registers. */
9698 if (tg3_flag(tp, SUPPORT_MSIX)) {
9699 for (i = 1; i < tp->irq_max; i++) {
9700 tp->napi[i].tx_prod = 0;
9701 tp->napi[i].tx_cons = 0;
9702 if (tg3_flag(tp, ENABLE_TSS))
9703 tw32_mailbox(tp->napi[i].prodmbox, 0);
9704 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9705 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9706 tp->napi[i].chk_msi_cnt = 0;
9707 tp->napi[i].last_rx_cons = 0;
9708 tp->napi[i].last_tx_cons = 0;
9709 }
9710 if (!tg3_flag(tp, ENABLE_TSS))
9711 tw32_mailbox(tp->napi[0].prodmbox, 0);
9712 } else {
9713 tp->napi[0].tx_prod = 0;
9714 tp->napi[0].tx_cons = 0;
9715 tw32_mailbox(tp->napi[0].prodmbox, 0);
9716 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9717 }
9718
9719 /* Make sure the NIC-based send BD rings are disabled. */
9720 if (!tg3_flag(tp, 5705_PLUS)) {
9721 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9722 for (i = 0; i < 16; i++)
9723 tw32_tx_mbox(mbox + i * 8, 0);
9724 }
9725
9726 /* Clear status block in ram. */
9727 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9728
9729 /* Set status block DMA address */
9730 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9731 ((u64) tnapi->status_mapping >> 32));
9732 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9733 ((u64) tnapi->status_mapping & 0xffffffff));
9734
9735 stblk = HOSTCC_STATBLCK_RING1;
9736
9737 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9738 u64 mapping = (u64)tnapi->status_mapping;
9739 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9740 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9741 stblk += 8;
9742
9743 /* Clear status block in ram. */
9744 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9745 }
9746
9747 tg3_tx_rcbs_init(tp);
9748 tg3_rx_ret_rcbs_init(tp);
9749 }
9750
tg3_setup_rxbd_thresholds(struct tg3 * tp)9751 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9752 {
9753 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9754
9755 if (!tg3_flag(tp, 5750_PLUS) ||
9756 tg3_flag(tp, 5780_CLASS) ||
9757 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9758 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9759 tg3_flag(tp, 57765_PLUS))
9760 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9761 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9762 tg3_asic_rev(tp) == ASIC_REV_5787)
9763 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9764 else
9765 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9766
9767 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9768 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9769
9770 val = min(nic_rep_thresh, host_rep_thresh);
9771 tw32(RCVBDI_STD_THRESH, val);
9772
9773 if (tg3_flag(tp, 57765_PLUS))
9774 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9775
9776 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9777 return;
9778
9779 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9780
9781 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9782
9783 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9784 tw32(RCVBDI_JUMBO_THRESH, val);
9785
9786 if (tg3_flag(tp, 57765_PLUS))
9787 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9788 }
9789
calc_crc(unsigned char * buf,int len)9790 static inline u32 calc_crc(unsigned char *buf, int len)
9791 {
9792 u32 reg;
9793 u32 tmp;
9794 int j, k;
9795
9796 reg = 0xffffffff;
9797
9798 for (j = 0; j < len; j++) {
9799 reg ^= buf[j];
9800
9801 for (k = 0; k < 8; k++) {
9802 tmp = reg & 0x01;
9803
9804 reg >>= 1;
9805
9806 if (tmp)
9807 reg ^= CRC32_POLY_LE;
9808 }
9809 }
9810
9811 return ~reg;
9812 }
9813
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9814 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9815 {
9816 /* accept or reject all multicast frames */
9817 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9818 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9819 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9820 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9821 }
9822
__tg3_set_rx_mode(struct net_device * dev)9823 static void __tg3_set_rx_mode(struct net_device *dev)
9824 {
9825 struct tg3 *tp = netdev_priv(dev);
9826 u32 rx_mode;
9827
9828 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9829 RX_MODE_KEEP_VLAN_TAG);
9830
9831 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9832 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9833 * flag clear.
9834 */
9835 if (!tg3_flag(tp, ENABLE_ASF))
9836 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9837 #endif
9838
9839 if (dev->flags & IFF_PROMISC) {
9840 /* Promiscuous mode. */
9841 rx_mode |= RX_MODE_PROMISC;
9842 } else if (dev->flags & IFF_ALLMULTI) {
9843 /* Accept all multicast. */
9844 tg3_set_multi(tp, 1);
9845 } else if (netdev_mc_empty(dev)) {
9846 /* Reject all multicast. */
9847 tg3_set_multi(tp, 0);
9848 } else {
9849 /* Accept one or more multicast(s). */
9850 struct netdev_hw_addr *ha;
9851 u32 mc_filter[4] = { 0, };
9852 u32 regidx;
9853 u32 bit;
9854 u32 crc;
9855
9856 netdev_for_each_mc_addr(ha, dev) {
9857 crc = calc_crc(ha->addr, ETH_ALEN);
9858 bit = ~crc & 0x7f;
9859 regidx = (bit & 0x60) >> 5;
9860 bit &= 0x1f;
9861 mc_filter[regidx] |= (1 << bit);
9862 }
9863
9864 tw32(MAC_HASH_REG_0, mc_filter[0]);
9865 tw32(MAC_HASH_REG_1, mc_filter[1]);
9866 tw32(MAC_HASH_REG_2, mc_filter[2]);
9867 tw32(MAC_HASH_REG_3, mc_filter[3]);
9868 }
9869
9870 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9871 rx_mode |= RX_MODE_PROMISC;
9872 } else if (!(dev->flags & IFF_PROMISC)) {
9873 /* Add all entries into to the mac addr filter list */
9874 int i = 0;
9875 struct netdev_hw_addr *ha;
9876
9877 netdev_for_each_uc_addr(ha, dev) {
9878 __tg3_set_one_mac_addr(tp, ha->addr,
9879 i + TG3_UCAST_ADDR_IDX(tp));
9880 i++;
9881 }
9882 }
9883
9884 if (rx_mode != tp->rx_mode) {
9885 tp->rx_mode = rx_mode;
9886 tw32_f(MAC_RX_MODE, rx_mode);
9887 udelay(10);
9888 }
9889 }
9890
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9891 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9892 {
9893 int i;
9894
9895 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9896 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9897 }
9898
tg3_rss_check_indir_tbl(struct tg3 * tp)9899 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9900 {
9901 int i;
9902
9903 if (!tg3_flag(tp, SUPPORT_MSIX))
9904 return;
9905
9906 if (tp->rxq_cnt == 1) {
9907 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9908 return;
9909 }
9910
9911 /* Validate table against current IRQ count */
9912 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9913 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9914 break;
9915 }
9916
9917 if (i != TG3_RSS_INDIR_TBL_SIZE)
9918 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9919 }
9920
tg3_rss_write_indir_tbl(struct tg3 * tp)9921 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9922 {
9923 int i = 0;
9924 u32 reg = MAC_RSS_INDIR_TBL_0;
9925
9926 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9927 u32 val = tp->rss_ind_tbl[i];
9928 i++;
9929 for (; i % 8; i++) {
9930 val <<= 4;
9931 val |= tp->rss_ind_tbl[i];
9932 }
9933 tw32(reg, val);
9934 reg += 4;
9935 }
9936 }
9937
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9938 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9939 {
9940 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9941 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9942 else
9943 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9944 }
9945
9946 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9947 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9948 {
9949 u32 val, rdmac_mode;
9950 int i, err, limit;
9951 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9952
9953 tg3_disable_ints(tp);
9954
9955 tg3_stop_fw(tp);
9956
9957 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9958
9959 if (tg3_flag(tp, INIT_COMPLETE))
9960 tg3_abort_hw(tp, 1);
9961
9962 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9963 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9964 tg3_phy_pull_config(tp);
9965 tg3_eee_pull_config(tp, NULL);
9966 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9967 }
9968
9969 /* Enable MAC control of LPI */
9970 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9971 tg3_setup_eee(tp);
9972
9973 if (reset_phy)
9974 tg3_phy_reset(tp);
9975
9976 err = tg3_chip_reset(tp);
9977 if (err)
9978 return err;
9979
9980 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9981
9982 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9983 val = tr32(TG3_CPMU_CTRL);
9984 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9985 tw32(TG3_CPMU_CTRL, val);
9986
9987 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9988 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9989 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9990 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9991
9992 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9993 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9994 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9995 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9996
9997 val = tr32(TG3_CPMU_HST_ACC);
9998 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9999 val |= CPMU_HST_ACC_MACCLK_6_25;
10000 tw32(TG3_CPMU_HST_ACC, val);
10001 }
10002
10003 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
10004 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
10005 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
10006 PCIE_PWR_MGMT_L1_THRESH_4MS;
10007 tw32(PCIE_PWR_MGMT_THRESH, val);
10008
10009 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
10010 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
10011
10012 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10013
10014 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10015 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10016 }
10017
10018 if (tg3_flag(tp, L1PLLPD_EN)) {
10019 u32 grc_mode = tr32(GRC_MODE);
10020
10021 /* Access the lower 1K of PL PCIE block registers. */
10022 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10023 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10024
10025 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10026 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10027 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10028
10029 tw32(GRC_MODE, grc_mode);
10030 }
10031
10032 if (tg3_flag(tp, 57765_CLASS)) {
10033 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10034 u32 grc_mode = tr32(GRC_MODE);
10035
10036 /* Access the lower 1K of PL PCIE block registers. */
10037 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10038 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10039
10040 val = tr32(TG3_PCIE_TLDLPL_PORT +
10041 TG3_PCIE_PL_LO_PHYCTL5);
10042 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10043 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10044
10045 tw32(GRC_MODE, grc_mode);
10046 }
10047
10048 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10049 u32 grc_mode;
10050
10051 /* Fix transmit hangs */
10052 val = tr32(TG3_CPMU_PADRNG_CTL);
10053 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10054 tw32(TG3_CPMU_PADRNG_CTL, val);
10055
10056 grc_mode = tr32(GRC_MODE);
10057
10058 /* Access the lower 1K of DL PCIE block registers. */
10059 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10060 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10061
10062 val = tr32(TG3_PCIE_TLDLPL_PORT +
10063 TG3_PCIE_DL_LO_FTSMAX);
10064 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10065 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10066 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10067
10068 tw32(GRC_MODE, grc_mode);
10069 }
10070
10071 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10072 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10073 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10074 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10075 }
10076
10077 /* This works around an issue with Athlon chipsets on
10078 * B3 tigon3 silicon. This bit has no effect on any
10079 * other revision. But do not set this on PCI Express
10080 * chips and don't even touch the clocks if the CPMU is present.
10081 */
10082 if (!tg3_flag(tp, CPMU_PRESENT)) {
10083 if (!tg3_flag(tp, PCI_EXPRESS))
10084 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10085 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10086 }
10087
10088 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10089 tg3_flag(tp, PCIX_MODE)) {
10090 val = tr32(TG3PCI_PCISTATE);
10091 val |= PCISTATE_RETRY_SAME_DMA;
10092 tw32(TG3PCI_PCISTATE, val);
10093 }
10094
10095 if (tg3_flag(tp, ENABLE_APE)) {
10096 /* Allow reads and writes to the
10097 * APE register and memory space.
10098 */
10099 val = tr32(TG3PCI_PCISTATE);
10100 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10101 PCISTATE_ALLOW_APE_SHMEM_WR |
10102 PCISTATE_ALLOW_APE_PSPACE_WR;
10103 tw32(TG3PCI_PCISTATE, val);
10104 }
10105
10106 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10107 /* Enable some hw fixes. */
10108 val = tr32(TG3PCI_MSI_DATA);
10109 val |= (1 << 26) | (1 << 28) | (1 << 29);
10110 tw32(TG3PCI_MSI_DATA, val);
10111 }
10112
10113 /* Descriptor ring init may make accesses to the
10114 * NIC SRAM area to setup the TX descriptors, so we
10115 * can only do this after the hardware has been
10116 * successfully reset.
10117 */
10118 err = tg3_init_rings(tp);
10119 if (err)
10120 return err;
10121
10122 if (tg3_flag(tp, 57765_PLUS)) {
10123 val = tr32(TG3PCI_DMA_RW_CTRL) &
10124 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10125 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10126 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10127 if (!tg3_flag(tp, 57765_CLASS) &&
10128 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10129 tg3_asic_rev(tp) != ASIC_REV_5762)
10130 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10131 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10132 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10133 tg3_asic_rev(tp) != ASIC_REV_5761) {
10134 /* This value is determined during the probe time DMA
10135 * engine test, tg3_test_dma.
10136 */
10137 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10138 }
10139
10140 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10141 GRC_MODE_4X_NIC_SEND_RINGS |
10142 GRC_MODE_NO_TX_PHDR_CSUM |
10143 GRC_MODE_NO_RX_PHDR_CSUM);
10144 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10145
10146 /* Pseudo-header checksum is done by hardware logic and not
10147 * the offload processers, so make the chip do the pseudo-
10148 * header checksums on receive. For transmit it is more
10149 * convenient to do the pseudo-header checksum in software
10150 * as Linux does that on transmit for us in all cases.
10151 */
10152 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10153
10154 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10155 if (tp->rxptpctl)
10156 tw32(TG3_RX_PTP_CTL,
10157 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10158
10159 if (tg3_flag(tp, PTP_CAPABLE))
10160 val |= GRC_MODE_TIME_SYNC_ENABLE;
10161
10162 tw32(GRC_MODE, tp->grc_mode | val);
10163
10164 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10165 * south bridge limitation. As a workaround, Driver is setting MRRS
10166 * to 2048 instead of default 4096.
10167 */
10168 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10169 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10170 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10171 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10172 }
10173
10174 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10175 val = tr32(GRC_MISC_CFG);
10176 val &= ~0xff;
10177 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10178 tw32(GRC_MISC_CFG, val);
10179
10180 /* Initialize MBUF/DESC pool. */
10181 if (tg3_flag(tp, 5750_PLUS)) {
10182 /* Do nothing. */
10183 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10184 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10185 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10186 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10187 else
10188 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10189 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10190 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10191 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10192 int fw_len;
10193
10194 fw_len = tp->fw_len;
10195 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10196 tw32(BUFMGR_MB_POOL_ADDR,
10197 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10198 tw32(BUFMGR_MB_POOL_SIZE,
10199 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10200 }
10201
10202 if (tp->dev->mtu <= ETH_DATA_LEN) {
10203 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10204 tp->bufmgr_config.mbuf_read_dma_low_water);
10205 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10206 tp->bufmgr_config.mbuf_mac_rx_low_water);
10207 tw32(BUFMGR_MB_HIGH_WATER,
10208 tp->bufmgr_config.mbuf_high_water);
10209 } else {
10210 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10211 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10212 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10213 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10214 tw32(BUFMGR_MB_HIGH_WATER,
10215 tp->bufmgr_config.mbuf_high_water_jumbo);
10216 }
10217 tw32(BUFMGR_DMA_LOW_WATER,
10218 tp->bufmgr_config.dma_low_water);
10219 tw32(BUFMGR_DMA_HIGH_WATER,
10220 tp->bufmgr_config.dma_high_water);
10221
10222 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10223 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10224 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10225 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10226 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10227 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10228 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10229 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10230 tw32(BUFMGR_MODE, val);
10231 for (i = 0; i < 2000; i++) {
10232 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10233 break;
10234 udelay(10);
10235 }
10236 if (i >= 2000) {
10237 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10238 return -ENODEV;
10239 }
10240
10241 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10242 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10243
10244 tg3_setup_rxbd_thresholds(tp);
10245
10246 /* Initialize TG3_BDINFO's at:
10247 * RCVDBDI_STD_BD: standard eth size rx ring
10248 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10249 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10250 *
10251 * like so:
10252 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10253 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10254 * ring attribute flags
10255 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10256 *
10257 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10258 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10259 *
10260 * The size of each ring is fixed in the firmware, but the location is
10261 * configurable.
10262 */
10263 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10264 ((u64) tpr->rx_std_mapping >> 32));
10265 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10266 ((u64) tpr->rx_std_mapping & 0xffffffff));
10267 if (!tg3_flag(tp, 5717_PLUS))
10268 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10269 NIC_SRAM_RX_BUFFER_DESC);
10270
10271 /* Disable the mini ring */
10272 if (!tg3_flag(tp, 5705_PLUS))
10273 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10274 BDINFO_FLAGS_DISABLED);
10275
10276 /* Program the jumbo buffer descriptor ring control
10277 * blocks on those devices that have them.
10278 */
10279 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10280 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10281
10282 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10283 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10284 ((u64) tpr->rx_jmb_mapping >> 32));
10285 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10286 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10287 val = TG3_RX_JMB_RING_SIZE(tp) <<
10288 BDINFO_FLAGS_MAXLEN_SHIFT;
10289 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10290 val | BDINFO_FLAGS_USE_EXT_RECV);
10291 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10292 tg3_flag(tp, 57765_CLASS) ||
10293 tg3_asic_rev(tp) == ASIC_REV_5762)
10294 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10295 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10296 } else {
10297 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10298 BDINFO_FLAGS_DISABLED);
10299 }
10300
10301 if (tg3_flag(tp, 57765_PLUS)) {
10302 val = TG3_RX_STD_RING_SIZE(tp);
10303 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10304 val |= (TG3_RX_STD_DMA_SZ << 2);
10305 } else
10306 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10307 } else
10308 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10309
10310 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10311
10312 tpr->rx_std_prod_idx = tp->rx_pending;
10313 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10314
10315 tpr->rx_jmb_prod_idx =
10316 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10317 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10318
10319 tg3_rings_reset(tp);
10320
10321 /* Initialize MAC address and backoff seed. */
10322 __tg3_set_mac_addr(tp, false);
10323
10324 /* MTU + ethernet header + FCS + optional VLAN tag */
10325 tw32(MAC_RX_MTU_SIZE,
10326 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10327
10328 /* The slot time is changed by tg3_setup_phy if we
10329 * run at gigabit with half duplex.
10330 */
10331 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10332 (6 << TX_LENGTHS_IPG_SHIFT) |
10333 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10334
10335 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10336 tg3_asic_rev(tp) == ASIC_REV_5762)
10337 val |= tr32(MAC_TX_LENGTHS) &
10338 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10339 TX_LENGTHS_CNT_DWN_VAL_MSK);
10340
10341 tw32(MAC_TX_LENGTHS, val);
10342
10343 /* Receive rules. */
10344 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10345 tw32(RCVLPC_CONFIG, 0x0181);
10346
10347 /* Calculate RDMAC_MODE setting early, we need it to determine
10348 * the RCVLPC_STATE_ENABLE mask.
10349 */
10350 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10351 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10352 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10353 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10354 RDMAC_MODE_LNGREAD_ENAB);
10355
10356 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10357 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10358
10359 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10360 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10361 tg3_asic_rev(tp) == ASIC_REV_57780)
10362 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10363 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10364 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10365
10366 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10367 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10368 if (tg3_flag(tp, TSO_CAPABLE)) {
10369 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10370 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10371 !tg3_flag(tp, IS_5788)) {
10372 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10373 }
10374 }
10375
10376 if (tg3_flag(tp, PCI_EXPRESS))
10377 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10378
10379 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10380 tp->dma_limit = 0;
10381 if (tp->dev->mtu <= ETH_DATA_LEN) {
10382 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10383 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10384 }
10385 }
10386
10387 if (tg3_flag(tp, HW_TSO_1) ||
10388 tg3_flag(tp, HW_TSO_2) ||
10389 tg3_flag(tp, HW_TSO_3))
10390 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10391
10392 if (tg3_flag(tp, 57765_PLUS) ||
10393 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10394 tg3_asic_rev(tp) == ASIC_REV_57780)
10395 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10396
10397 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10398 tg3_asic_rev(tp) == ASIC_REV_5762)
10399 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10400
10401 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10402 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10403 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10404 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10405 tg3_flag(tp, 57765_PLUS)) {
10406 u32 tgtreg;
10407
10408 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10409 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10410 else
10411 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10412
10413 val = tr32(tgtreg);
10414 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10415 tg3_asic_rev(tp) == ASIC_REV_5762) {
10416 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10417 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10418 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10419 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10420 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10421 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10422 }
10423 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10424 }
10425
10426 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10427 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10428 tg3_asic_rev(tp) == ASIC_REV_5762) {
10429 u32 tgtreg;
10430
10431 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10432 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10433 else
10434 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10435
10436 val = tr32(tgtreg);
10437 tw32(tgtreg, val |
10438 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10439 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10440 }
10441
10442 /* Receive/send statistics. */
10443 if (tg3_flag(tp, 5750_PLUS)) {
10444 val = tr32(RCVLPC_STATS_ENABLE);
10445 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10446 tw32(RCVLPC_STATS_ENABLE, val);
10447 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10448 tg3_flag(tp, TSO_CAPABLE)) {
10449 val = tr32(RCVLPC_STATS_ENABLE);
10450 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10451 tw32(RCVLPC_STATS_ENABLE, val);
10452 } else {
10453 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10454 }
10455 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10456 tw32(SNDDATAI_STATSENAB, 0xffffff);
10457 tw32(SNDDATAI_STATSCTRL,
10458 (SNDDATAI_SCTRL_ENABLE |
10459 SNDDATAI_SCTRL_FASTUPD));
10460
10461 /* Setup host coalescing engine. */
10462 tw32(HOSTCC_MODE, 0);
10463 for (i = 0; i < 2000; i++) {
10464 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10465 break;
10466 udelay(10);
10467 }
10468
10469 __tg3_set_coalesce(tp, &tp->coal);
10470
10471 if (!tg3_flag(tp, 5705_PLUS)) {
10472 /* Status/statistics block address. See tg3_timer,
10473 * the tg3_periodic_fetch_stats call there, and
10474 * tg3_get_stats to see how this works for 5705/5750 chips.
10475 */
10476 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10477 ((u64) tp->stats_mapping >> 32));
10478 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10479 ((u64) tp->stats_mapping & 0xffffffff));
10480 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10481
10482 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10483
10484 /* Clear statistics and status block memory areas */
10485 for (i = NIC_SRAM_STATS_BLK;
10486 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10487 i += sizeof(u32)) {
10488 tg3_write_mem(tp, i, 0);
10489 udelay(40);
10490 }
10491 }
10492
10493 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10494
10495 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10496 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10497 if (!tg3_flag(tp, 5705_PLUS))
10498 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10499
10500 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10501 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10502 /* reset to prevent losing 1st rx packet intermittently */
10503 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10504 udelay(10);
10505 }
10506
10507 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10508 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10509 MAC_MODE_FHDE_ENABLE;
10510 if (tg3_flag(tp, ENABLE_APE))
10511 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10512 if (!tg3_flag(tp, 5705_PLUS) &&
10513 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10514 tg3_asic_rev(tp) != ASIC_REV_5700)
10515 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10516 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10517 udelay(40);
10518
10519 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10520 * If TG3_FLAG_IS_NIC is zero, we should read the
10521 * register to preserve the GPIO settings for LOMs. The GPIOs,
10522 * whether used as inputs or outputs, are set by boot code after
10523 * reset.
10524 */
10525 if (!tg3_flag(tp, IS_NIC)) {
10526 u32 gpio_mask;
10527
10528 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10529 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10530 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10531
10532 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10533 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10534 GRC_LCLCTRL_GPIO_OUTPUT3;
10535
10536 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10537 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10538
10539 tp->grc_local_ctrl &= ~gpio_mask;
10540 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10541
10542 /* GPIO1 must be driven high for eeprom write protect */
10543 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10544 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10545 GRC_LCLCTRL_GPIO_OUTPUT1);
10546 }
10547 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10548 udelay(100);
10549
10550 if (tg3_flag(tp, USING_MSIX)) {
10551 val = tr32(MSGINT_MODE);
10552 val |= MSGINT_MODE_ENABLE;
10553 if (tp->irq_cnt > 1)
10554 val |= MSGINT_MODE_MULTIVEC_EN;
10555 if (!tg3_flag(tp, 1SHOT_MSI))
10556 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10557 tw32(MSGINT_MODE, val);
10558 }
10559
10560 if (!tg3_flag(tp, 5705_PLUS)) {
10561 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10562 udelay(40);
10563 }
10564
10565 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10566 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10567 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10568 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10569 WDMAC_MODE_LNGREAD_ENAB);
10570
10571 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10572 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10573 if (tg3_flag(tp, TSO_CAPABLE) &&
10574 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10575 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10576 /* nothing */
10577 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10578 !tg3_flag(tp, IS_5788)) {
10579 val |= WDMAC_MODE_RX_ACCEL;
10580 }
10581 }
10582
10583 /* Enable host coalescing bug fix */
10584 if (tg3_flag(tp, 5755_PLUS))
10585 val |= WDMAC_MODE_STATUS_TAG_FIX;
10586
10587 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10588 val |= WDMAC_MODE_BURST_ALL_DATA;
10589
10590 tw32_f(WDMAC_MODE, val);
10591 udelay(40);
10592
10593 if (tg3_flag(tp, PCIX_MODE)) {
10594 u16 pcix_cmd;
10595
10596 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10597 &pcix_cmd);
10598 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10599 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10600 pcix_cmd |= PCI_X_CMD_READ_2K;
10601 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10602 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10603 pcix_cmd |= PCI_X_CMD_READ_2K;
10604 }
10605 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10606 pcix_cmd);
10607 }
10608
10609 tw32_f(RDMAC_MODE, rdmac_mode);
10610 udelay(40);
10611
10612 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10613 tg3_asic_rev(tp) == ASIC_REV_5720) {
10614 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10615 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10616 break;
10617 }
10618 if (i < TG3_NUM_RDMA_CHANNELS) {
10619 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10620 val |= tg3_lso_rd_dma_workaround_bit(tp);
10621 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10622 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10623 }
10624 }
10625
10626 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10627 if (!tg3_flag(tp, 5705_PLUS))
10628 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10629
10630 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10631 tw32(SNDDATAC_MODE,
10632 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10633 else
10634 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10635
10636 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10637 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10638 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10639 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10640 val |= RCVDBDI_MODE_LRG_RING_SZ;
10641 tw32(RCVDBDI_MODE, val);
10642 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10643 if (tg3_flag(tp, HW_TSO_1) ||
10644 tg3_flag(tp, HW_TSO_2) ||
10645 tg3_flag(tp, HW_TSO_3))
10646 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10647 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10648 if (tg3_flag(tp, ENABLE_TSS))
10649 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10650 tw32(SNDBDI_MODE, val);
10651 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10652
10653 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10654 err = tg3_load_5701_a0_firmware_fix(tp);
10655 if (err)
10656 return err;
10657 }
10658
10659 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10660 /* Ignore any errors for the firmware download. If download
10661 * fails, the device will operate with EEE disabled
10662 */
10663 tg3_load_57766_firmware(tp);
10664 }
10665
10666 if (tg3_flag(tp, TSO_CAPABLE)) {
10667 err = tg3_load_tso_firmware(tp);
10668 if (err)
10669 return err;
10670 }
10671
10672 tp->tx_mode = TX_MODE_ENABLE;
10673
10674 if (tg3_flag(tp, 5755_PLUS) ||
10675 tg3_asic_rev(tp) == ASIC_REV_5906)
10676 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10677
10678 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10679 tg3_asic_rev(tp) == ASIC_REV_5762) {
10680 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10681 tp->tx_mode &= ~val;
10682 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10683 }
10684
10685 tw32_f(MAC_TX_MODE, tp->tx_mode);
10686 udelay(100);
10687
10688 if (tg3_flag(tp, ENABLE_RSS)) {
10689 u32 rss_key[10];
10690
10691 tg3_rss_write_indir_tbl(tp);
10692
10693 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10694
10695 for (i = 0; i < 10 ; i++)
10696 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10697 }
10698
10699 tp->rx_mode = RX_MODE_ENABLE;
10700 if (tg3_flag(tp, 5755_PLUS))
10701 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10702
10703 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10704 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10705
10706 if (tg3_flag(tp, ENABLE_RSS))
10707 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10708 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10709 RX_MODE_RSS_IPV6_HASH_EN |
10710 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10711 RX_MODE_RSS_IPV4_HASH_EN |
10712 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10713
10714 tw32_f(MAC_RX_MODE, tp->rx_mode);
10715 udelay(10);
10716
10717 tw32(MAC_LED_CTRL, tp->led_ctrl);
10718
10719 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10720 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10721 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10722 udelay(10);
10723 }
10724 tw32_f(MAC_RX_MODE, tp->rx_mode);
10725 udelay(10);
10726
10727 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10728 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10729 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10730 /* Set drive transmission level to 1.2V */
10731 /* only if the signal pre-emphasis bit is not set */
10732 val = tr32(MAC_SERDES_CFG);
10733 val &= 0xfffff000;
10734 val |= 0x880;
10735 tw32(MAC_SERDES_CFG, val);
10736 }
10737 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10738 tw32(MAC_SERDES_CFG, 0x616000);
10739 }
10740
10741 /* Prevent chip from dropping frames when flow control
10742 * is enabled.
10743 */
10744 if (tg3_flag(tp, 57765_CLASS))
10745 val = 1;
10746 else
10747 val = 2;
10748 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10749
10750 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10751 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10752 /* Use hardware link auto-negotiation */
10753 tg3_flag_set(tp, HW_AUTONEG);
10754 }
10755
10756 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10757 tg3_asic_rev(tp) == ASIC_REV_5714) {
10758 u32 tmp;
10759
10760 tmp = tr32(SERDES_RX_CTRL);
10761 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10762 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10763 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10764 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10765 }
10766
10767 if (!tg3_flag(tp, USE_PHYLIB)) {
10768 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10769 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10770
10771 err = tg3_setup_phy(tp, false);
10772 if (err)
10773 return err;
10774
10775 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10776 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10777 u32 tmp;
10778
10779 /* Clear CRC stats. */
10780 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10781 tg3_writephy(tp, MII_TG3_TEST1,
10782 tmp | MII_TG3_TEST1_CRC_EN);
10783 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10784 }
10785 }
10786 }
10787
10788 __tg3_set_rx_mode(tp->dev);
10789
10790 /* Initialize receive rules. */
10791 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10792 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10793 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10794 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10795
10796 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10797 limit = 8;
10798 else
10799 limit = 16;
10800 if (tg3_flag(tp, ENABLE_ASF))
10801 limit -= 4;
10802 switch (limit) {
10803 case 16:
10804 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10805 fallthrough;
10806 case 15:
10807 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10808 fallthrough;
10809 case 14:
10810 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10811 fallthrough;
10812 case 13:
10813 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10814 fallthrough;
10815 case 12:
10816 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10817 fallthrough;
10818 case 11:
10819 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10820 fallthrough;
10821 case 10:
10822 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10823 fallthrough;
10824 case 9:
10825 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10826 fallthrough;
10827 case 8:
10828 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10829 fallthrough;
10830 case 7:
10831 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10832 fallthrough;
10833 case 6:
10834 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10835 fallthrough;
10836 case 5:
10837 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10838 fallthrough;
10839 case 4:
10840 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10841 case 3:
10842 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10843 case 2:
10844 case 1:
10845
10846 default:
10847 break;
10848 }
10849
10850 if (tg3_flag(tp, ENABLE_APE))
10851 /* Write our heartbeat update interval to APE. */
10852 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10853 APE_HOST_HEARTBEAT_INT_5SEC);
10854
10855 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10856
10857 return 0;
10858 }
10859
10860 /* Called at device open time to get the chip ready for
10861 * packet processing. Invoked with tp->lock held.
10862 */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10863 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10864 {
10865 /* Chip may have been just powered on. If so, the boot code may still
10866 * be running initialization. Wait for it to finish to avoid races in
10867 * accessing the hardware.
10868 */
10869 tg3_enable_register_access(tp);
10870 tg3_poll_fw(tp);
10871
10872 tg3_switch_clocks(tp);
10873
10874 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10875
10876 return tg3_reset_hw(tp, reset_phy);
10877 }
10878
10879 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10880 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10881 {
10882 u32 off, len = TG3_OCIR_LEN;
10883 int i;
10884
10885 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10886 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10887
10888 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10889 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10890 memset(ocir, 0, len);
10891 }
10892 }
10893
10894 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10895 static ssize_t tg3_show_temp(struct device *dev,
10896 struct device_attribute *devattr, char *buf)
10897 {
10898 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10899 struct tg3 *tp = dev_get_drvdata(dev);
10900 u32 temperature;
10901
10902 spin_lock_bh(&tp->lock);
10903 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10904 sizeof(temperature));
10905 spin_unlock_bh(&tp->lock);
10906 return sprintf(buf, "%u\n", temperature * 1000);
10907 }
10908
10909
10910 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10911 TG3_TEMP_SENSOR_OFFSET);
10912 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10913 TG3_TEMP_CAUTION_OFFSET);
10914 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10915 TG3_TEMP_MAX_OFFSET);
10916
10917 static struct attribute *tg3_attrs[] = {
10918 &sensor_dev_attr_temp1_input.dev_attr.attr,
10919 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10920 &sensor_dev_attr_temp1_max.dev_attr.attr,
10921 NULL
10922 };
10923 ATTRIBUTE_GROUPS(tg3);
10924
tg3_hwmon_close(struct tg3 * tp)10925 static void tg3_hwmon_close(struct tg3 *tp)
10926 {
10927 if (tp->hwmon_dev) {
10928 hwmon_device_unregister(tp->hwmon_dev);
10929 tp->hwmon_dev = NULL;
10930 }
10931 }
10932
tg3_hwmon_open(struct tg3 * tp)10933 static void tg3_hwmon_open(struct tg3 *tp)
10934 {
10935 int i;
10936 u32 size = 0;
10937 struct pci_dev *pdev = tp->pdev;
10938 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10939
10940 tg3_sd_scan_scratchpad(tp, ocirs);
10941
10942 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10943 if (!ocirs[i].src_data_length)
10944 continue;
10945
10946 size += ocirs[i].src_hdr_length;
10947 size += ocirs[i].src_data_length;
10948 }
10949
10950 if (!size)
10951 return;
10952
10953 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10954 tp, tg3_groups);
10955 if (IS_ERR(tp->hwmon_dev)) {
10956 tp->hwmon_dev = NULL;
10957 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10958 }
10959 }
10960 #else
tg3_hwmon_close(struct tg3 * tp)10961 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10962 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10963 #endif /* CONFIG_TIGON3_HWMON */
10964
10965
10966 #define TG3_STAT_ADD32(PSTAT, REG) \
10967 do { u32 __val = tr32(REG); \
10968 (PSTAT)->low += __val; \
10969 if ((PSTAT)->low < __val) \
10970 (PSTAT)->high += 1; \
10971 } while (0)
10972
tg3_periodic_fetch_stats(struct tg3 * tp)10973 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10974 {
10975 struct tg3_hw_stats *sp = tp->hw_stats;
10976
10977 if (!tp->link_up)
10978 return;
10979
10980 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10981 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10982 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10983 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10984 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10985 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10986 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10987 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10988 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10989 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10990 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10991 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10992 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10993 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10994 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10995 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10996 u32 val;
10997
10998 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10999 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
11000 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
11001 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
11002 }
11003
11004 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
11005 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
11006 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
11007 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
11008 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
11009 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
11010 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
11011 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
11012 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11013 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11014 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11015 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11016 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11017 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11018
11019 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11020 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11021 tg3_asic_rev(tp) != ASIC_REV_5762 &&
11022 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11023 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11024 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11025 } else {
11026 u32 val = tr32(HOSTCC_FLOW_ATTN);
11027 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11028 if (val) {
11029 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11030 sp->rx_discards.low += val;
11031 if (sp->rx_discards.low < val)
11032 sp->rx_discards.high += 1;
11033 }
11034 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11035 }
11036 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11037 }
11038
tg3_chk_missed_msi(struct tg3 * tp)11039 static void tg3_chk_missed_msi(struct tg3 *tp)
11040 {
11041 u32 i;
11042
11043 for (i = 0; i < tp->irq_cnt; i++) {
11044 struct tg3_napi *tnapi = &tp->napi[i];
11045
11046 if (tg3_has_work(tnapi)) {
11047 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11048 tnapi->last_tx_cons == tnapi->tx_cons) {
11049 if (tnapi->chk_msi_cnt < 1) {
11050 tnapi->chk_msi_cnt++;
11051 return;
11052 }
11053 tg3_msi(0, tnapi);
11054 }
11055 }
11056 tnapi->chk_msi_cnt = 0;
11057 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11058 tnapi->last_tx_cons = tnapi->tx_cons;
11059 }
11060 }
11061
tg3_timer(struct timer_list * t)11062 static void tg3_timer(struct timer_list *t)
11063 {
11064 struct tg3 *tp = from_timer(tp, t, timer);
11065
11066 spin_lock(&tp->lock);
11067
11068 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11069 spin_unlock(&tp->lock);
11070 goto restart_timer;
11071 }
11072
11073 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11074 tg3_flag(tp, 57765_CLASS))
11075 tg3_chk_missed_msi(tp);
11076
11077 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11078 /* BCM4785: Flush posted writes from GbE to host memory. */
11079 tr32(HOSTCC_MODE);
11080 }
11081
11082 if (!tg3_flag(tp, TAGGED_STATUS)) {
11083 /* All of this garbage is because when using non-tagged
11084 * IRQ status the mailbox/status_block protocol the chip
11085 * uses with the cpu is race prone.
11086 */
11087 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11088 tw32(GRC_LOCAL_CTRL,
11089 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11090 } else {
11091 tw32(HOSTCC_MODE, tp->coalesce_mode |
11092 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11093 }
11094
11095 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11096 spin_unlock(&tp->lock);
11097 tg3_reset_task_schedule(tp);
11098 goto restart_timer;
11099 }
11100 }
11101
11102 /* This part only runs once per second. */
11103 if (!--tp->timer_counter) {
11104 if (tg3_flag(tp, 5705_PLUS))
11105 tg3_periodic_fetch_stats(tp);
11106
11107 if (tp->setlpicnt && !--tp->setlpicnt)
11108 tg3_phy_eee_enable(tp);
11109
11110 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11111 u32 mac_stat;
11112 int phy_event;
11113
11114 mac_stat = tr32(MAC_STATUS);
11115
11116 phy_event = 0;
11117 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11118 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11119 phy_event = 1;
11120 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11121 phy_event = 1;
11122
11123 if (phy_event)
11124 tg3_setup_phy(tp, false);
11125 } else if (tg3_flag(tp, POLL_SERDES)) {
11126 u32 mac_stat = tr32(MAC_STATUS);
11127 int need_setup = 0;
11128
11129 if (tp->link_up &&
11130 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11131 need_setup = 1;
11132 }
11133 if (!tp->link_up &&
11134 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11135 MAC_STATUS_SIGNAL_DET))) {
11136 need_setup = 1;
11137 }
11138 if (need_setup) {
11139 if (!tp->serdes_counter) {
11140 tw32_f(MAC_MODE,
11141 (tp->mac_mode &
11142 ~MAC_MODE_PORT_MODE_MASK));
11143 udelay(40);
11144 tw32_f(MAC_MODE, tp->mac_mode);
11145 udelay(40);
11146 }
11147 tg3_setup_phy(tp, false);
11148 }
11149 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11150 tg3_flag(tp, 5780_CLASS)) {
11151 tg3_serdes_parallel_detect(tp);
11152 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11153 u32 cpmu = tr32(TG3_CPMU_STATUS);
11154 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11155 TG3_CPMU_STATUS_LINK_MASK);
11156
11157 if (link_up != tp->link_up)
11158 tg3_setup_phy(tp, false);
11159 }
11160
11161 tp->timer_counter = tp->timer_multiplier;
11162 }
11163
11164 /* Heartbeat is only sent once every 2 seconds.
11165 *
11166 * The heartbeat is to tell the ASF firmware that the host
11167 * driver is still alive. In the event that the OS crashes,
11168 * ASF needs to reset the hardware to free up the FIFO space
11169 * that may be filled with rx packets destined for the host.
11170 * If the FIFO is full, ASF will no longer function properly.
11171 *
11172 * Unintended resets have been reported on real time kernels
11173 * where the timer doesn't run on time. Netpoll will also have
11174 * same problem.
11175 *
11176 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11177 * to check the ring condition when the heartbeat is expiring
11178 * before doing the reset. This will prevent most unintended
11179 * resets.
11180 */
11181 if (!--tp->asf_counter) {
11182 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11183 tg3_wait_for_event_ack(tp);
11184
11185 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11186 FWCMD_NICDRV_ALIVE3);
11187 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11188 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11189 TG3_FW_UPDATE_TIMEOUT_SEC);
11190
11191 tg3_generate_fw_event(tp);
11192 }
11193 tp->asf_counter = tp->asf_multiplier;
11194 }
11195
11196 /* Update the APE heartbeat every 5 seconds.*/
11197 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11198
11199 spin_unlock(&tp->lock);
11200
11201 restart_timer:
11202 tp->timer.expires = jiffies + tp->timer_offset;
11203 add_timer(&tp->timer);
11204 }
11205
tg3_timer_init(struct tg3 * tp)11206 static void tg3_timer_init(struct tg3 *tp)
11207 {
11208 if (tg3_flag(tp, TAGGED_STATUS) &&
11209 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11210 !tg3_flag(tp, 57765_CLASS))
11211 tp->timer_offset = HZ;
11212 else
11213 tp->timer_offset = HZ / 10;
11214
11215 BUG_ON(tp->timer_offset > HZ);
11216
11217 tp->timer_multiplier = (HZ / tp->timer_offset);
11218 tp->asf_multiplier = (HZ / tp->timer_offset) *
11219 TG3_FW_UPDATE_FREQ_SEC;
11220
11221 timer_setup(&tp->timer, tg3_timer, 0);
11222 }
11223
tg3_timer_start(struct tg3 * tp)11224 static void tg3_timer_start(struct tg3 *tp)
11225 {
11226 tp->asf_counter = tp->asf_multiplier;
11227 tp->timer_counter = tp->timer_multiplier;
11228
11229 tp->timer.expires = jiffies + tp->timer_offset;
11230 add_timer(&tp->timer);
11231 }
11232
tg3_timer_stop(struct tg3 * tp)11233 static void tg3_timer_stop(struct tg3 *tp)
11234 {
11235 del_timer_sync(&tp->timer);
11236 }
11237
11238 /* Restart hardware after configuration changes, self-test, etc.
11239 * Invoked with tp->lock held.
11240 */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11241 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11242 __releases(tp->lock)
11243 __acquires(tp->lock)
11244 {
11245 int err;
11246
11247 err = tg3_init_hw(tp, reset_phy);
11248 if (err) {
11249 netdev_err(tp->dev,
11250 "Failed to re-initialize device, aborting\n");
11251 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11252 tg3_full_unlock(tp);
11253 tg3_timer_stop(tp);
11254 tp->irq_sync = 0;
11255 tg3_napi_enable(tp);
11256 dev_close(tp->dev);
11257 tg3_full_lock(tp, 0);
11258 }
11259 return err;
11260 }
11261
tg3_reset_task(struct work_struct * work)11262 static void tg3_reset_task(struct work_struct *work)
11263 {
11264 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11265 int err;
11266
11267 rtnl_lock();
11268 tg3_full_lock(tp, 0);
11269
11270 if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11271 tp->pdev->error_state != pci_channel_io_normal) {
11272 tg3_flag_clear(tp, RESET_TASK_PENDING);
11273 tg3_full_unlock(tp);
11274 rtnl_unlock();
11275 return;
11276 }
11277
11278 tg3_full_unlock(tp);
11279
11280 tg3_phy_stop(tp);
11281
11282 tg3_netif_stop(tp);
11283
11284 tg3_full_lock(tp, 1);
11285
11286 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11287 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11288 tp->write32_rx_mbox = tg3_write_flush_reg32;
11289 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11290 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11291 }
11292
11293 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11294 err = tg3_init_hw(tp, true);
11295 if (err) {
11296 tg3_full_unlock(tp);
11297 tp->irq_sync = 0;
11298 tg3_napi_enable(tp);
11299 /* Clear this flag so that tg3_reset_task_cancel() will not
11300 * call cancel_work_sync() and wait forever.
11301 */
11302 tg3_flag_clear(tp, RESET_TASK_PENDING);
11303 dev_close(tp->dev);
11304 goto out;
11305 }
11306
11307 tg3_netif_start(tp);
11308 tg3_full_unlock(tp);
11309 tg3_phy_start(tp);
11310 tg3_flag_clear(tp, RESET_TASK_PENDING);
11311 out:
11312 rtnl_unlock();
11313 }
11314
tg3_request_irq(struct tg3 * tp,int irq_num)11315 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11316 {
11317 irq_handler_t fn;
11318 unsigned long flags;
11319 char *name;
11320 struct tg3_napi *tnapi = &tp->napi[irq_num];
11321
11322 if (tp->irq_cnt == 1)
11323 name = tp->dev->name;
11324 else {
11325 name = &tnapi->irq_lbl[0];
11326 if (tnapi->tx_buffers && tnapi->rx_rcb)
11327 snprintf(name, IFNAMSIZ,
11328 "%s-txrx-%d", tp->dev->name, irq_num);
11329 else if (tnapi->tx_buffers)
11330 snprintf(name, IFNAMSIZ,
11331 "%s-tx-%d", tp->dev->name, irq_num);
11332 else if (tnapi->rx_rcb)
11333 snprintf(name, IFNAMSIZ,
11334 "%s-rx-%d", tp->dev->name, irq_num);
11335 else
11336 snprintf(name, IFNAMSIZ,
11337 "%s-%d", tp->dev->name, irq_num);
11338 name[IFNAMSIZ-1] = 0;
11339 }
11340
11341 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11342 fn = tg3_msi;
11343 if (tg3_flag(tp, 1SHOT_MSI))
11344 fn = tg3_msi_1shot;
11345 flags = 0;
11346 } else {
11347 fn = tg3_interrupt;
11348 if (tg3_flag(tp, TAGGED_STATUS))
11349 fn = tg3_interrupt_tagged;
11350 flags = IRQF_SHARED;
11351 }
11352
11353 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11354 }
11355
tg3_test_interrupt(struct tg3 * tp)11356 static int tg3_test_interrupt(struct tg3 *tp)
11357 {
11358 struct tg3_napi *tnapi = &tp->napi[0];
11359 struct net_device *dev = tp->dev;
11360 int err, i, intr_ok = 0;
11361 u32 val;
11362
11363 if (!netif_running(dev))
11364 return -ENODEV;
11365
11366 tg3_disable_ints(tp);
11367
11368 free_irq(tnapi->irq_vec, tnapi);
11369
11370 /*
11371 * Turn off MSI one shot mode. Otherwise this test has no
11372 * observable way to know whether the interrupt was delivered.
11373 */
11374 if (tg3_flag(tp, 57765_PLUS)) {
11375 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11376 tw32(MSGINT_MODE, val);
11377 }
11378
11379 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11380 IRQF_SHARED, dev->name, tnapi);
11381 if (err)
11382 return err;
11383
11384 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11385 tg3_enable_ints(tp);
11386
11387 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11388 tnapi->coal_now);
11389
11390 for (i = 0; i < 5; i++) {
11391 u32 int_mbox, misc_host_ctrl;
11392
11393 int_mbox = tr32_mailbox(tnapi->int_mbox);
11394 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11395
11396 if ((int_mbox != 0) ||
11397 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11398 intr_ok = 1;
11399 break;
11400 }
11401
11402 if (tg3_flag(tp, 57765_PLUS) &&
11403 tnapi->hw_status->status_tag != tnapi->last_tag)
11404 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11405
11406 msleep(10);
11407 }
11408
11409 tg3_disable_ints(tp);
11410
11411 free_irq(tnapi->irq_vec, tnapi);
11412
11413 err = tg3_request_irq(tp, 0);
11414
11415 if (err)
11416 return err;
11417
11418 if (intr_ok) {
11419 /* Reenable MSI one shot mode. */
11420 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11421 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11422 tw32(MSGINT_MODE, val);
11423 }
11424 return 0;
11425 }
11426
11427 return -EIO;
11428 }
11429
11430 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11431 * successfully restored
11432 */
tg3_test_msi(struct tg3 * tp)11433 static int tg3_test_msi(struct tg3 *tp)
11434 {
11435 int err;
11436 u16 pci_cmd;
11437
11438 if (!tg3_flag(tp, USING_MSI))
11439 return 0;
11440
11441 /* Turn off SERR reporting in case MSI terminates with Master
11442 * Abort.
11443 */
11444 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11445 pci_write_config_word(tp->pdev, PCI_COMMAND,
11446 pci_cmd & ~PCI_COMMAND_SERR);
11447
11448 err = tg3_test_interrupt(tp);
11449
11450 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11451
11452 if (!err)
11453 return 0;
11454
11455 /* other failures */
11456 if (err != -EIO)
11457 return err;
11458
11459 /* MSI test failed, go back to INTx mode */
11460 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11461 "to INTx mode. Please report this failure to the PCI "
11462 "maintainer and include system chipset information\n");
11463
11464 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11465
11466 pci_disable_msi(tp->pdev);
11467
11468 tg3_flag_clear(tp, USING_MSI);
11469 tp->napi[0].irq_vec = tp->pdev->irq;
11470
11471 err = tg3_request_irq(tp, 0);
11472 if (err)
11473 return err;
11474
11475 /* Need to reset the chip because the MSI cycle may have terminated
11476 * with Master Abort.
11477 */
11478 tg3_full_lock(tp, 1);
11479
11480 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11481 err = tg3_init_hw(tp, true);
11482
11483 tg3_full_unlock(tp);
11484
11485 if (err)
11486 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11487
11488 return err;
11489 }
11490
tg3_request_firmware(struct tg3 * tp)11491 static int tg3_request_firmware(struct tg3 *tp)
11492 {
11493 const struct tg3_firmware_hdr *fw_hdr;
11494
11495 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11496 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11497 tp->fw_needed);
11498 return -ENOENT;
11499 }
11500
11501 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11502
11503 /* Firmware blob starts with version numbers, followed by
11504 * start address and _full_ length including BSS sections
11505 * (which must be longer than the actual data, of course
11506 */
11507
11508 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11509 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11510 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11511 tp->fw_len, tp->fw_needed);
11512 release_firmware(tp->fw);
11513 tp->fw = NULL;
11514 return -EINVAL;
11515 }
11516
11517 /* We no longer need firmware; we have it. */
11518 tp->fw_needed = NULL;
11519 return 0;
11520 }
11521
tg3_irq_count(struct tg3 * tp)11522 static u32 tg3_irq_count(struct tg3 *tp)
11523 {
11524 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11525
11526 if (irq_cnt > 1) {
11527 /* We want as many rx rings enabled as there are cpus.
11528 * In multiqueue MSI-X mode, the first MSI-X vector
11529 * only deals with link interrupts, etc, so we add
11530 * one to the number of vectors we are requesting.
11531 */
11532 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11533 }
11534
11535 return irq_cnt;
11536 }
11537
tg3_enable_msix(struct tg3 * tp)11538 static bool tg3_enable_msix(struct tg3 *tp)
11539 {
11540 int i, rc;
11541 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11542
11543 tp->txq_cnt = tp->txq_req;
11544 tp->rxq_cnt = tp->rxq_req;
11545 if (!tp->rxq_cnt)
11546 tp->rxq_cnt = netif_get_num_default_rss_queues();
11547 if (tp->rxq_cnt > tp->rxq_max)
11548 tp->rxq_cnt = tp->rxq_max;
11549
11550 /* Disable multiple TX rings by default. Simple round-robin hardware
11551 * scheduling of the TX rings can cause starvation of rings with
11552 * small packets when other rings have TSO or jumbo packets.
11553 */
11554 if (!tp->txq_req)
11555 tp->txq_cnt = 1;
11556
11557 tp->irq_cnt = tg3_irq_count(tp);
11558
11559 for (i = 0; i < tp->irq_max; i++) {
11560 msix_ent[i].entry = i;
11561 msix_ent[i].vector = 0;
11562 }
11563
11564 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11565 if (rc < 0) {
11566 return false;
11567 } else if (rc < tp->irq_cnt) {
11568 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11569 tp->irq_cnt, rc);
11570 tp->irq_cnt = rc;
11571 tp->rxq_cnt = max(rc - 1, 1);
11572 if (tp->txq_cnt)
11573 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11574 }
11575
11576 for (i = 0; i < tp->irq_max; i++)
11577 tp->napi[i].irq_vec = msix_ent[i].vector;
11578
11579 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11580 pci_disable_msix(tp->pdev);
11581 return false;
11582 }
11583
11584 if (tp->irq_cnt == 1)
11585 return true;
11586
11587 tg3_flag_set(tp, ENABLE_RSS);
11588
11589 if (tp->txq_cnt > 1)
11590 tg3_flag_set(tp, ENABLE_TSS);
11591
11592 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11593
11594 return true;
11595 }
11596
tg3_ints_init(struct tg3 * tp)11597 static void tg3_ints_init(struct tg3 *tp)
11598 {
11599 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11600 !tg3_flag(tp, TAGGED_STATUS)) {
11601 /* All MSI supporting chips should support tagged
11602 * status. Assert that this is the case.
11603 */
11604 netdev_warn(tp->dev,
11605 "MSI without TAGGED_STATUS? Not using MSI\n");
11606 goto defcfg;
11607 }
11608
11609 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11610 tg3_flag_set(tp, USING_MSIX);
11611 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11612 tg3_flag_set(tp, USING_MSI);
11613
11614 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11615 u32 msi_mode = tr32(MSGINT_MODE);
11616 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11617 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11618 if (!tg3_flag(tp, 1SHOT_MSI))
11619 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11620 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11621 }
11622 defcfg:
11623 if (!tg3_flag(tp, USING_MSIX)) {
11624 tp->irq_cnt = 1;
11625 tp->napi[0].irq_vec = tp->pdev->irq;
11626 }
11627
11628 if (tp->irq_cnt == 1) {
11629 tp->txq_cnt = 1;
11630 tp->rxq_cnt = 1;
11631 netif_set_real_num_tx_queues(tp->dev, 1);
11632 netif_set_real_num_rx_queues(tp->dev, 1);
11633 }
11634 }
11635
tg3_ints_fini(struct tg3 * tp)11636 static void tg3_ints_fini(struct tg3 *tp)
11637 {
11638 if (tg3_flag(tp, USING_MSIX))
11639 pci_disable_msix(tp->pdev);
11640 else if (tg3_flag(tp, USING_MSI))
11641 pci_disable_msi(tp->pdev);
11642 tg3_flag_clear(tp, USING_MSI);
11643 tg3_flag_clear(tp, USING_MSIX);
11644 tg3_flag_clear(tp, ENABLE_RSS);
11645 tg3_flag_clear(tp, ENABLE_TSS);
11646 }
11647
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11648 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11649 bool init)
11650 {
11651 struct net_device *dev = tp->dev;
11652 int i, err;
11653
11654 /*
11655 * Setup interrupts first so we know how
11656 * many NAPI resources to allocate
11657 */
11658 tg3_ints_init(tp);
11659
11660 tg3_rss_check_indir_tbl(tp);
11661
11662 /* The placement of this call is tied
11663 * to the setup and use of Host TX descriptors.
11664 */
11665 err = tg3_alloc_consistent(tp);
11666 if (err)
11667 goto out_ints_fini;
11668
11669 tg3_napi_init(tp);
11670
11671 tg3_napi_enable(tp);
11672
11673 for (i = 0; i < tp->irq_cnt; i++) {
11674 err = tg3_request_irq(tp, i);
11675 if (err) {
11676 for (i--; i >= 0; i--) {
11677 struct tg3_napi *tnapi = &tp->napi[i];
11678
11679 free_irq(tnapi->irq_vec, tnapi);
11680 }
11681 goto out_napi_fini;
11682 }
11683 }
11684
11685 tg3_full_lock(tp, 0);
11686
11687 if (init)
11688 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11689
11690 err = tg3_init_hw(tp, reset_phy);
11691 if (err) {
11692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11693 tg3_free_rings(tp);
11694 }
11695
11696 tg3_full_unlock(tp);
11697
11698 if (err)
11699 goto out_free_irq;
11700
11701 if (test_irq && tg3_flag(tp, USING_MSI)) {
11702 err = tg3_test_msi(tp);
11703
11704 if (err) {
11705 tg3_full_lock(tp, 0);
11706 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11707 tg3_free_rings(tp);
11708 tg3_full_unlock(tp);
11709
11710 goto out_napi_fini;
11711 }
11712
11713 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11714 u32 val = tr32(PCIE_TRANSACTION_CFG);
11715
11716 tw32(PCIE_TRANSACTION_CFG,
11717 val | PCIE_TRANS_CFG_1SHOT_MSI);
11718 }
11719 }
11720
11721 tg3_phy_start(tp);
11722
11723 tg3_hwmon_open(tp);
11724
11725 tg3_full_lock(tp, 0);
11726
11727 tg3_timer_start(tp);
11728 tg3_flag_set(tp, INIT_COMPLETE);
11729 tg3_enable_ints(tp);
11730
11731 tg3_ptp_resume(tp);
11732
11733 tg3_full_unlock(tp);
11734
11735 netif_tx_start_all_queues(dev);
11736
11737 /*
11738 * Reset loopback feature if it was turned on while the device was down
11739 * make sure that it's installed properly now.
11740 */
11741 if (dev->features & NETIF_F_LOOPBACK)
11742 tg3_set_loopback(dev, dev->features);
11743
11744 return 0;
11745
11746 out_free_irq:
11747 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11748 struct tg3_napi *tnapi = &tp->napi[i];
11749 free_irq(tnapi->irq_vec, tnapi);
11750 }
11751
11752 out_napi_fini:
11753 tg3_napi_disable(tp);
11754 tg3_napi_fini(tp);
11755 tg3_free_consistent(tp);
11756
11757 out_ints_fini:
11758 tg3_ints_fini(tp);
11759
11760 return err;
11761 }
11762
tg3_stop(struct tg3 * tp)11763 static void tg3_stop(struct tg3 *tp)
11764 {
11765 int i;
11766
11767 tg3_reset_task_cancel(tp);
11768 tg3_netif_stop(tp);
11769
11770 tg3_timer_stop(tp);
11771
11772 tg3_hwmon_close(tp);
11773
11774 tg3_phy_stop(tp);
11775
11776 tg3_full_lock(tp, 1);
11777
11778 tg3_disable_ints(tp);
11779
11780 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11781 tg3_free_rings(tp);
11782 tg3_flag_clear(tp, INIT_COMPLETE);
11783
11784 tg3_full_unlock(tp);
11785
11786 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11787 struct tg3_napi *tnapi = &tp->napi[i];
11788 free_irq(tnapi->irq_vec, tnapi);
11789 }
11790
11791 tg3_ints_fini(tp);
11792
11793 tg3_napi_fini(tp);
11794
11795 tg3_free_consistent(tp);
11796 }
11797
tg3_open(struct net_device * dev)11798 static int tg3_open(struct net_device *dev)
11799 {
11800 struct tg3 *tp = netdev_priv(dev);
11801 int err;
11802
11803 if (tp->pcierr_recovery) {
11804 netdev_err(dev, "Failed to open device. PCI error recovery "
11805 "in progress\n");
11806 return -EAGAIN;
11807 }
11808
11809 if (tp->fw_needed) {
11810 err = tg3_request_firmware(tp);
11811 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11812 if (err) {
11813 netdev_warn(tp->dev, "EEE capability disabled\n");
11814 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11815 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11816 netdev_warn(tp->dev, "EEE capability restored\n");
11817 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11818 }
11819 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11820 if (err)
11821 return err;
11822 } else if (err) {
11823 netdev_warn(tp->dev, "TSO capability disabled\n");
11824 tg3_flag_clear(tp, TSO_CAPABLE);
11825 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11826 netdev_notice(tp->dev, "TSO capability restored\n");
11827 tg3_flag_set(tp, TSO_CAPABLE);
11828 }
11829 }
11830
11831 tg3_carrier_off(tp);
11832
11833 err = tg3_power_up(tp);
11834 if (err)
11835 return err;
11836
11837 tg3_full_lock(tp, 0);
11838
11839 tg3_disable_ints(tp);
11840 tg3_flag_clear(tp, INIT_COMPLETE);
11841
11842 tg3_full_unlock(tp);
11843
11844 err = tg3_start(tp,
11845 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11846 true, true);
11847 if (err) {
11848 tg3_frob_aux_power(tp, false);
11849 pci_set_power_state(tp->pdev, PCI_D3hot);
11850 }
11851
11852 return err;
11853 }
11854
tg3_close(struct net_device * dev)11855 static int tg3_close(struct net_device *dev)
11856 {
11857 struct tg3 *tp = netdev_priv(dev);
11858
11859 if (tp->pcierr_recovery) {
11860 netdev_err(dev, "Failed to close device. PCI error recovery "
11861 "in progress\n");
11862 return -EAGAIN;
11863 }
11864
11865 tg3_stop(tp);
11866
11867 if (pci_device_is_present(tp->pdev)) {
11868 tg3_power_down_prepare(tp);
11869
11870 tg3_carrier_off(tp);
11871 }
11872 return 0;
11873 }
11874
get_stat64(tg3_stat64_t * val)11875 static inline u64 get_stat64(tg3_stat64_t *val)
11876 {
11877 return ((u64)val->high << 32) | ((u64)val->low);
11878 }
11879
tg3_calc_crc_errors(struct tg3 * tp)11880 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11881 {
11882 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11883
11884 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11885 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11886 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11887 u32 val;
11888
11889 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11890 tg3_writephy(tp, MII_TG3_TEST1,
11891 val | MII_TG3_TEST1_CRC_EN);
11892 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11893 } else
11894 val = 0;
11895
11896 tp->phy_crc_errors += val;
11897
11898 return tp->phy_crc_errors;
11899 }
11900
11901 return get_stat64(&hw_stats->rx_fcs_errors);
11902 }
11903
11904 #define ESTAT_ADD(member) \
11905 estats->member = old_estats->member + \
11906 get_stat64(&hw_stats->member)
11907
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11908 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11909 {
11910 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11911 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11912
11913 ESTAT_ADD(rx_octets);
11914 ESTAT_ADD(rx_fragments);
11915 ESTAT_ADD(rx_ucast_packets);
11916 ESTAT_ADD(rx_mcast_packets);
11917 ESTAT_ADD(rx_bcast_packets);
11918 ESTAT_ADD(rx_fcs_errors);
11919 ESTAT_ADD(rx_align_errors);
11920 ESTAT_ADD(rx_xon_pause_rcvd);
11921 ESTAT_ADD(rx_xoff_pause_rcvd);
11922 ESTAT_ADD(rx_mac_ctrl_rcvd);
11923 ESTAT_ADD(rx_xoff_entered);
11924 ESTAT_ADD(rx_frame_too_long_errors);
11925 ESTAT_ADD(rx_jabbers);
11926 ESTAT_ADD(rx_undersize_packets);
11927 ESTAT_ADD(rx_in_length_errors);
11928 ESTAT_ADD(rx_out_length_errors);
11929 ESTAT_ADD(rx_64_or_less_octet_packets);
11930 ESTAT_ADD(rx_65_to_127_octet_packets);
11931 ESTAT_ADD(rx_128_to_255_octet_packets);
11932 ESTAT_ADD(rx_256_to_511_octet_packets);
11933 ESTAT_ADD(rx_512_to_1023_octet_packets);
11934 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11935 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11936 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11937 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11938 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11939
11940 ESTAT_ADD(tx_octets);
11941 ESTAT_ADD(tx_collisions);
11942 ESTAT_ADD(tx_xon_sent);
11943 ESTAT_ADD(tx_xoff_sent);
11944 ESTAT_ADD(tx_flow_control);
11945 ESTAT_ADD(tx_mac_errors);
11946 ESTAT_ADD(tx_single_collisions);
11947 ESTAT_ADD(tx_mult_collisions);
11948 ESTAT_ADD(tx_deferred);
11949 ESTAT_ADD(tx_excessive_collisions);
11950 ESTAT_ADD(tx_late_collisions);
11951 ESTAT_ADD(tx_collide_2times);
11952 ESTAT_ADD(tx_collide_3times);
11953 ESTAT_ADD(tx_collide_4times);
11954 ESTAT_ADD(tx_collide_5times);
11955 ESTAT_ADD(tx_collide_6times);
11956 ESTAT_ADD(tx_collide_7times);
11957 ESTAT_ADD(tx_collide_8times);
11958 ESTAT_ADD(tx_collide_9times);
11959 ESTAT_ADD(tx_collide_10times);
11960 ESTAT_ADD(tx_collide_11times);
11961 ESTAT_ADD(tx_collide_12times);
11962 ESTAT_ADD(tx_collide_13times);
11963 ESTAT_ADD(tx_collide_14times);
11964 ESTAT_ADD(tx_collide_15times);
11965 ESTAT_ADD(tx_ucast_packets);
11966 ESTAT_ADD(tx_mcast_packets);
11967 ESTAT_ADD(tx_bcast_packets);
11968 ESTAT_ADD(tx_carrier_sense_errors);
11969 ESTAT_ADD(tx_discards);
11970 ESTAT_ADD(tx_errors);
11971
11972 ESTAT_ADD(dma_writeq_full);
11973 ESTAT_ADD(dma_write_prioq_full);
11974 ESTAT_ADD(rxbds_empty);
11975 ESTAT_ADD(rx_discards);
11976 ESTAT_ADD(rx_errors);
11977 ESTAT_ADD(rx_threshold_hit);
11978
11979 ESTAT_ADD(dma_readq_full);
11980 ESTAT_ADD(dma_read_prioq_full);
11981 ESTAT_ADD(tx_comp_queue_full);
11982
11983 ESTAT_ADD(ring_set_send_prod_index);
11984 ESTAT_ADD(ring_status_update);
11985 ESTAT_ADD(nic_irqs);
11986 ESTAT_ADD(nic_avoided_irqs);
11987 ESTAT_ADD(nic_tx_threshold_hit);
11988
11989 ESTAT_ADD(mbuf_lwm_thresh_hit);
11990 }
11991
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11992 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11993 {
11994 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11995 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11996 unsigned long rx_dropped;
11997 unsigned long tx_dropped;
11998 int i;
11999
12000 stats->rx_packets = old_stats->rx_packets +
12001 get_stat64(&hw_stats->rx_ucast_packets) +
12002 get_stat64(&hw_stats->rx_mcast_packets) +
12003 get_stat64(&hw_stats->rx_bcast_packets);
12004
12005 stats->tx_packets = old_stats->tx_packets +
12006 get_stat64(&hw_stats->tx_ucast_packets) +
12007 get_stat64(&hw_stats->tx_mcast_packets) +
12008 get_stat64(&hw_stats->tx_bcast_packets);
12009
12010 stats->rx_bytes = old_stats->rx_bytes +
12011 get_stat64(&hw_stats->rx_octets);
12012 stats->tx_bytes = old_stats->tx_bytes +
12013 get_stat64(&hw_stats->tx_octets);
12014
12015 stats->rx_errors = old_stats->rx_errors +
12016 get_stat64(&hw_stats->rx_errors);
12017 stats->tx_errors = old_stats->tx_errors +
12018 get_stat64(&hw_stats->tx_errors) +
12019 get_stat64(&hw_stats->tx_mac_errors) +
12020 get_stat64(&hw_stats->tx_carrier_sense_errors) +
12021 get_stat64(&hw_stats->tx_discards);
12022
12023 stats->multicast = old_stats->multicast +
12024 get_stat64(&hw_stats->rx_mcast_packets);
12025 stats->collisions = old_stats->collisions +
12026 get_stat64(&hw_stats->tx_collisions);
12027
12028 stats->rx_length_errors = old_stats->rx_length_errors +
12029 get_stat64(&hw_stats->rx_frame_too_long_errors) +
12030 get_stat64(&hw_stats->rx_undersize_packets);
12031
12032 stats->rx_frame_errors = old_stats->rx_frame_errors +
12033 get_stat64(&hw_stats->rx_align_errors);
12034 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12035 get_stat64(&hw_stats->tx_discards);
12036 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12037 get_stat64(&hw_stats->tx_carrier_sense_errors);
12038
12039 stats->rx_crc_errors = old_stats->rx_crc_errors +
12040 tg3_calc_crc_errors(tp);
12041
12042 stats->rx_missed_errors = old_stats->rx_missed_errors +
12043 get_stat64(&hw_stats->rx_discards);
12044
12045 /* Aggregate per-queue counters. The per-queue counters are updated
12046 * by a single writer, race-free. The result computed by this loop
12047 * might not be 100% accurate (counters can be updated in the middle of
12048 * the loop) but the next tg3_get_nstats() will recompute the current
12049 * value so it is acceptable.
12050 *
12051 * Note that these counters wrap around at 4G on 32bit machines.
12052 */
12053 rx_dropped = (unsigned long)(old_stats->rx_dropped);
12054 tx_dropped = (unsigned long)(old_stats->tx_dropped);
12055
12056 for (i = 0; i < tp->irq_cnt; i++) {
12057 struct tg3_napi *tnapi = &tp->napi[i];
12058
12059 rx_dropped += tnapi->rx_dropped;
12060 tx_dropped += tnapi->tx_dropped;
12061 }
12062
12063 stats->rx_dropped = rx_dropped;
12064 stats->tx_dropped = tx_dropped;
12065 }
12066
tg3_get_regs_len(struct net_device * dev)12067 static int tg3_get_regs_len(struct net_device *dev)
12068 {
12069 return TG3_REG_BLK_SIZE;
12070 }
12071
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)12072 static void tg3_get_regs(struct net_device *dev,
12073 struct ethtool_regs *regs, void *_p)
12074 {
12075 struct tg3 *tp = netdev_priv(dev);
12076
12077 regs->version = 0;
12078
12079 memset(_p, 0, TG3_REG_BLK_SIZE);
12080
12081 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12082 return;
12083
12084 tg3_full_lock(tp, 0);
12085
12086 tg3_dump_legacy_regs(tp, (u32 *)_p);
12087
12088 tg3_full_unlock(tp);
12089 }
12090
tg3_get_eeprom_len(struct net_device * dev)12091 static int tg3_get_eeprom_len(struct net_device *dev)
12092 {
12093 struct tg3 *tp = netdev_priv(dev);
12094
12095 return tp->nvram_size;
12096 }
12097
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12098 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12099 {
12100 struct tg3 *tp = netdev_priv(dev);
12101 int ret, cpmu_restore = 0;
12102 u8 *pd;
12103 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12104 __be32 val;
12105
12106 if (tg3_flag(tp, NO_NVRAM))
12107 return -EINVAL;
12108
12109 offset = eeprom->offset;
12110 len = eeprom->len;
12111 eeprom->len = 0;
12112
12113 eeprom->magic = TG3_EEPROM_MAGIC;
12114
12115 /* Override clock, link aware and link idle modes */
12116 if (tg3_flag(tp, CPMU_PRESENT)) {
12117 cpmu_val = tr32(TG3_CPMU_CTRL);
12118 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12119 CPMU_CTRL_LINK_IDLE_MODE)) {
12120 tw32(TG3_CPMU_CTRL, cpmu_val &
12121 ~(CPMU_CTRL_LINK_AWARE_MODE |
12122 CPMU_CTRL_LINK_IDLE_MODE));
12123 cpmu_restore = 1;
12124 }
12125 }
12126 tg3_override_clk(tp);
12127
12128 if (offset & 3) {
12129 /* adjustments to start on required 4 byte boundary */
12130 b_offset = offset & 3;
12131 b_count = 4 - b_offset;
12132 if (b_count > len) {
12133 /* i.e. offset=1 len=2 */
12134 b_count = len;
12135 }
12136 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12137 if (ret)
12138 goto eeprom_done;
12139 memcpy(data, ((char *)&val) + b_offset, b_count);
12140 len -= b_count;
12141 offset += b_count;
12142 eeprom->len += b_count;
12143 }
12144
12145 /* read bytes up to the last 4 byte boundary */
12146 pd = &data[eeprom->len];
12147 for (i = 0; i < (len - (len & 3)); i += 4) {
12148 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12149 if (ret) {
12150 if (i)
12151 i -= 4;
12152 eeprom->len += i;
12153 goto eeprom_done;
12154 }
12155 memcpy(pd + i, &val, 4);
12156 if (need_resched()) {
12157 if (signal_pending(current)) {
12158 eeprom->len += i;
12159 ret = -EINTR;
12160 goto eeprom_done;
12161 }
12162 cond_resched();
12163 }
12164 }
12165 eeprom->len += i;
12166
12167 if (len & 3) {
12168 /* read last bytes not ending on 4 byte boundary */
12169 pd = &data[eeprom->len];
12170 b_count = len & 3;
12171 b_offset = offset + len - b_count;
12172 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12173 if (ret)
12174 goto eeprom_done;
12175 memcpy(pd, &val, b_count);
12176 eeprom->len += b_count;
12177 }
12178 ret = 0;
12179
12180 eeprom_done:
12181 /* Restore clock, link aware and link idle modes */
12182 tg3_restore_clk(tp);
12183 if (cpmu_restore)
12184 tw32(TG3_CPMU_CTRL, cpmu_val);
12185
12186 return ret;
12187 }
12188
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12189 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12190 {
12191 struct tg3 *tp = netdev_priv(dev);
12192 int ret;
12193 u32 offset, len, b_offset, odd_len;
12194 u8 *buf;
12195 __be32 start = 0, end;
12196
12197 if (tg3_flag(tp, NO_NVRAM) ||
12198 eeprom->magic != TG3_EEPROM_MAGIC)
12199 return -EINVAL;
12200
12201 offset = eeprom->offset;
12202 len = eeprom->len;
12203
12204 if ((b_offset = (offset & 3))) {
12205 /* adjustments to start on required 4 byte boundary */
12206 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12207 if (ret)
12208 return ret;
12209 len += b_offset;
12210 offset &= ~3;
12211 if (len < 4)
12212 len = 4;
12213 }
12214
12215 odd_len = 0;
12216 if (len & 3) {
12217 /* adjustments to end on required 4 byte boundary */
12218 odd_len = 1;
12219 len = (len + 3) & ~3;
12220 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12221 if (ret)
12222 return ret;
12223 }
12224
12225 buf = data;
12226 if (b_offset || odd_len) {
12227 buf = kmalloc(len, GFP_KERNEL);
12228 if (!buf)
12229 return -ENOMEM;
12230 if (b_offset)
12231 memcpy(buf, &start, 4);
12232 if (odd_len)
12233 memcpy(buf+len-4, &end, 4);
12234 memcpy(buf + b_offset, data, eeprom->len);
12235 }
12236
12237 ret = tg3_nvram_write_block(tp, offset, len, buf);
12238
12239 if (buf != data)
12240 kfree(buf);
12241
12242 return ret;
12243 }
12244
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12245 static int tg3_get_link_ksettings(struct net_device *dev,
12246 struct ethtool_link_ksettings *cmd)
12247 {
12248 struct tg3 *tp = netdev_priv(dev);
12249 u32 supported, advertising;
12250
12251 if (tg3_flag(tp, USE_PHYLIB)) {
12252 struct phy_device *phydev;
12253 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12254 return -EAGAIN;
12255 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12256 phy_ethtool_ksettings_get(phydev, cmd);
12257
12258 return 0;
12259 }
12260
12261 supported = (SUPPORTED_Autoneg);
12262
12263 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12264 supported |= (SUPPORTED_1000baseT_Half |
12265 SUPPORTED_1000baseT_Full);
12266
12267 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12268 supported |= (SUPPORTED_100baseT_Half |
12269 SUPPORTED_100baseT_Full |
12270 SUPPORTED_10baseT_Half |
12271 SUPPORTED_10baseT_Full |
12272 SUPPORTED_TP);
12273 cmd->base.port = PORT_TP;
12274 } else {
12275 supported |= SUPPORTED_FIBRE;
12276 cmd->base.port = PORT_FIBRE;
12277 }
12278 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12279 supported);
12280
12281 advertising = tp->link_config.advertising;
12282 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12283 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12284 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12285 advertising |= ADVERTISED_Pause;
12286 } else {
12287 advertising |= ADVERTISED_Pause |
12288 ADVERTISED_Asym_Pause;
12289 }
12290 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12291 advertising |= ADVERTISED_Asym_Pause;
12292 }
12293 }
12294 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12295 advertising);
12296
12297 if (netif_running(dev) && tp->link_up) {
12298 cmd->base.speed = tp->link_config.active_speed;
12299 cmd->base.duplex = tp->link_config.active_duplex;
12300 ethtool_convert_legacy_u32_to_link_mode(
12301 cmd->link_modes.lp_advertising,
12302 tp->link_config.rmt_adv);
12303
12304 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12305 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12306 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12307 else
12308 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12309 }
12310 } else {
12311 cmd->base.speed = SPEED_UNKNOWN;
12312 cmd->base.duplex = DUPLEX_UNKNOWN;
12313 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12314 }
12315 cmd->base.phy_address = tp->phy_addr;
12316 cmd->base.autoneg = tp->link_config.autoneg;
12317 return 0;
12318 }
12319
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12320 static int tg3_set_link_ksettings(struct net_device *dev,
12321 const struct ethtool_link_ksettings *cmd)
12322 {
12323 struct tg3 *tp = netdev_priv(dev);
12324 u32 speed = cmd->base.speed;
12325 u32 advertising;
12326
12327 if (tg3_flag(tp, USE_PHYLIB)) {
12328 struct phy_device *phydev;
12329 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12330 return -EAGAIN;
12331 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12332 return phy_ethtool_ksettings_set(phydev, cmd);
12333 }
12334
12335 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12336 cmd->base.autoneg != AUTONEG_DISABLE)
12337 return -EINVAL;
12338
12339 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12340 cmd->base.duplex != DUPLEX_FULL &&
12341 cmd->base.duplex != DUPLEX_HALF)
12342 return -EINVAL;
12343
12344 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12345 cmd->link_modes.advertising);
12346
12347 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12348 u32 mask = ADVERTISED_Autoneg |
12349 ADVERTISED_Pause |
12350 ADVERTISED_Asym_Pause;
12351
12352 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12353 mask |= ADVERTISED_1000baseT_Half |
12354 ADVERTISED_1000baseT_Full;
12355
12356 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12357 mask |= ADVERTISED_100baseT_Half |
12358 ADVERTISED_100baseT_Full |
12359 ADVERTISED_10baseT_Half |
12360 ADVERTISED_10baseT_Full |
12361 ADVERTISED_TP;
12362 else
12363 mask |= ADVERTISED_FIBRE;
12364
12365 if (advertising & ~mask)
12366 return -EINVAL;
12367
12368 mask &= (ADVERTISED_1000baseT_Half |
12369 ADVERTISED_1000baseT_Full |
12370 ADVERTISED_100baseT_Half |
12371 ADVERTISED_100baseT_Full |
12372 ADVERTISED_10baseT_Half |
12373 ADVERTISED_10baseT_Full);
12374
12375 advertising &= mask;
12376 } else {
12377 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12378 if (speed != SPEED_1000)
12379 return -EINVAL;
12380
12381 if (cmd->base.duplex != DUPLEX_FULL)
12382 return -EINVAL;
12383 } else {
12384 if (speed != SPEED_100 &&
12385 speed != SPEED_10)
12386 return -EINVAL;
12387 }
12388 }
12389
12390 tg3_full_lock(tp, 0);
12391
12392 tp->link_config.autoneg = cmd->base.autoneg;
12393 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12394 tp->link_config.advertising = (advertising |
12395 ADVERTISED_Autoneg);
12396 tp->link_config.speed = SPEED_UNKNOWN;
12397 tp->link_config.duplex = DUPLEX_UNKNOWN;
12398 } else {
12399 tp->link_config.advertising = 0;
12400 tp->link_config.speed = speed;
12401 tp->link_config.duplex = cmd->base.duplex;
12402 }
12403
12404 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12405
12406 tg3_warn_mgmt_link_flap(tp);
12407
12408 if (netif_running(dev))
12409 tg3_setup_phy(tp, true);
12410
12411 tg3_full_unlock(tp);
12412
12413 return 0;
12414 }
12415
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12416 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12417 {
12418 struct tg3 *tp = netdev_priv(dev);
12419
12420 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12421 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12422 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12423 }
12424
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12425 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12426 {
12427 struct tg3 *tp = netdev_priv(dev);
12428
12429 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12430 wol->supported = WAKE_MAGIC;
12431 else
12432 wol->supported = 0;
12433 wol->wolopts = 0;
12434 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12435 wol->wolopts = WAKE_MAGIC;
12436 memset(&wol->sopass, 0, sizeof(wol->sopass));
12437 }
12438
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12439 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12440 {
12441 struct tg3 *tp = netdev_priv(dev);
12442 struct device *dp = &tp->pdev->dev;
12443
12444 if (wol->wolopts & ~WAKE_MAGIC)
12445 return -EINVAL;
12446 if ((wol->wolopts & WAKE_MAGIC) &&
12447 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12448 return -EINVAL;
12449
12450 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12451
12452 if (device_may_wakeup(dp))
12453 tg3_flag_set(tp, WOL_ENABLE);
12454 else
12455 tg3_flag_clear(tp, WOL_ENABLE);
12456
12457 return 0;
12458 }
12459
tg3_get_msglevel(struct net_device * dev)12460 static u32 tg3_get_msglevel(struct net_device *dev)
12461 {
12462 struct tg3 *tp = netdev_priv(dev);
12463 return tp->msg_enable;
12464 }
12465
tg3_set_msglevel(struct net_device * dev,u32 value)12466 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12467 {
12468 struct tg3 *tp = netdev_priv(dev);
12469 tp->msg_enable = value;
12470 }
12471
tg3_nway_reset(struct net_device * dev)12472 static int tg3_nway_reset(struct net_device *dev)
12473 {
12474 struct tg3 *tp = netdev_priv(dev);
12475 int r;
12476
12477 if (!netif_running(dev))
12478 return -EAGAIN;
12479
12480 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12481 return -EINVAL;
12482
12483 tg3_warn_mgmt_link_flap(tp);
12484
12485 if (tg3_flag(tp, USE_PHYLIB)) {
12486 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12487 return -EAGAIN;
12488 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12489 } else {
12490 u32 bmcr;
12491
12492 spin_lock_bh(&tp->lock);
12493 r = -EINVAL;
12494 tg3_readphy(tp, MII_BMCR, &bmcr);
12495 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12496 ((bmcr & BMCR_ANENABLE) ||
12497 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12498 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12499 BMCR_ANENABLE);
12500 r = 0;
12501 }
12502 spin_unlock_bh(&tp->lock);
12503 }
12504
12505 return r;
12506 }
12507
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12508 static void tg3_get_ringparam(struct net_device *dev,
12509 struct ethtool_ringparam *ering,
12510 struct kernel_ethtool_ringparam *kernel_ering,
12511 struct netlink_ext_ack *extack)
12512 {
12513 struct tg3 *tp = netdev_priv(dev);
12514
12515 ering->rx_max_pending = tp->rx_std_ring_mask;
12516 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12517 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12518 else
12519 ering->rx_jumbo_max_pending = 0;
12520
12521 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12522
12523 ering->rx_pending = tp->rx_pending;
12524 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12525 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12526 else
12527 ering->rx_jumbo_pending = 0;
12528
12529 ering->tx_pending = tp->napi[0].tx_pending;
12530 }
12531
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12532 static int tg3_set_ringparam(struct net_device *dev,
12533 struct ethtool_ringparam *ering,
12534 struct kernel_ethtool_ringparam *kernel_ering,
12535 struct netlink_ext_ack *extack)
12536 {
12537 struct tg3 *tp = netdev_priv(dev);
12538 int i, irq_sync = 0, err = 0;
12539 bool reset_phy = false;
12540
12541 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12542 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12543 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12544 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12545 (tg3_flag(tp, TSO_BUG) &&
12546 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12547 return -EINVAL;
12548
12549 if (netif_running(dev)) {
12550 tg3_phy_stop(tp);
12551 tg3_netif_stop(tp);
12552 irq_sync = 1;
12553 }
12554
12555 tg3_full_lock(tp, irq_sync);
12556
12557 tp->rx_pending = ering->rx_pending;
12558
12559 if (tg3_flag(tp, MAX_RXPEND_64) &&
12560 tp->rx_pending > 63)
12561 tp->rx_pending = 63;
12562
12563 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12564 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12565
12566 for (i = 0; i < tp->irq_max; i++)
12567 tp->napi[i].tx_pending = ering->tx_pending;
12568
12569 if (netif_running(dev)) {
12570 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12571 /* Reset PHY to avoid PHY lock up */
12572 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12573 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12574 tg3_asic_rev(tp) == ASIC_REV_5720)
12575 reset_phy = true;
12576
12577 err = tg3_restart_hw(tp, reset_phy);
12578 if (!err)
12579 tg3_netif_start(tp);
12580 }
12581
12582 tg3_full_unlock(tp);
12583
12584 if (irq_sync && !err)
12585 tg3_phy_start(tp);
12586
12587 return err;
12588 }
12589
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12590 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12591 {
12592 struct tg3 *tp = netdev_priv(dev);
12593
12594 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12595
12596 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12597 epause->rx_pause = 1;
12598 else
12599 epause->rx_pause = 0;
12600
12601 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12602 epause->tx_pause = 1;
12603 else
12604 epause->tx_pause = 0;
12605 }
12606
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12607 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12608 {
12609 struct tg3 *tp = netdev_priv(dev);
12610 int err = 0;
12611 bool reset_phy = false;
12612
12613 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12614 tg3_warn_mgmt_link_flap(tp);
12615
12616 if (tg3_flag(tp, USE_PHYLIB)) {
12617 struct phy_device *phydev;
12618
12619 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12620
12621 if (!phy_validate_pause(phydev, epause))
12622 return -EINVAL;
12623
12624 tp->link_config.flowctrl = 0;
12625 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12626 if (epause->rx_pause) {
12627 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12628
12629 if (epause->tx_pause) {
12630 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12631 }
12632 } else if (epause->tx_pause) {
12633 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12634 }
12635
12636 if (epause->autoneg)
12637 tg3_flag_set(tp, PAUSE_AUTONEG);
12638 else
12639 tg3_flag_clear(tp, PAUSE_AUTONEG);
12640
12641 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12642 if (phydev->autoneg) {
12643 /* phy_set_asym_pause() will
12644 * renegotiate the link to inform our
12645 * link partner of our flow control
12646 * settings, even if the flow control
12647 * is forced. Let tg3_adjust_link()
12648 * do the final flow control setup.
12649 */
12650 return 0;
12651 }
12652
12653 if (!epause->autoneg)
12654 tg3_setup_flow_control(tp, 0, 0);
12655 }
12656 } else {
12657 int irq_sync = 0;
12658
12659 if (netif_running(dev)) {
12660 tg3_netif_stop(tp);
12661 irq_sync = 1;
12662 }
12663
12664 tg3_full_lock(tp, irq_sync);
12665
12666 if (epause->autoneg)
12667 tg3_flag_set(tp, PAUSE_AUTONEG);
12668 else
12669 tg3_flag_clear(tp, PAUSE_AUTONEG);
12670 if (epause->rx_pause)
12671 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12672 else
12673 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12674 if (epause->tx_pause)
12675 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12676 else
12677 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12678
12679 if (netif_running(dev)) {
12680 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12681 /* Reset PHY to avoid PHY lock up */
12682 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12683 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12684 tg3_asic_rev(tp) == ASIC_REV_5720)
12685 reset_phy = true;
12686
12687 err = tg3_restart_hw(tp, reset_phy);
12688 if (!err)
12689 tg3_netif_start(tp);
12690 }
12691
12692 tg3_full_unlock(tp);
12693 }
12694
12695 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12696
12697 return err;
12698 }
12699
tg3_get_sset_count(struct net_device * dev,int sset)12700 static int tg3_get_sset_count(struct net_device *dev, int sset)
12701 {
12702 switch (sset) {
12703 case ETH_SS_TEST:
12704 return TG3_NUM_TEST;
12705 case ETH_SS_STATS:
12706 return TG3_NUM_STATS;
12707 default:
12708 return -EOPNOTSUPP;
12709 }
12710 }
12711
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12712 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12713 u32 *rules __always_unused)
12714 {
12715 struct tg3 *tp = netdev_priv(dev);
12716
12717 if (!tg3_flag(tp, SUPPORT_MSIX))
12718 return -EOPNOTSUPP;
12719
12720 switch (info->cmd) {
12721 case ETHTOOL_GRXRINGS:
12722 if (netif_running(tp->dev))
12723 info->data = tp->rxq_cnt;
12724 else {
12725 info->data = num_online_cpus();
12726 if (info->data > TG3_RSS_MAX_NUM_QS)
12727 info->data = TG3_RSS_MAX_NUM_QS;
12728 }
12729
12730 return 0;
12731
12732 default:
12733 return -EOPNOTSUPP;
12734 }
12735 }
12736
tg3_get_rxfh_indir_size(struct net_device * dev)12737 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12738 {
12739 u32 size = 0;
12740 struct tg3 *tp = netdev_priv(dev);
12741
12742 if (tg3_flag(tp, SUPPORT_MSIX))
12743 size = TG3_RSS_INDIR_TBL_SIZE;
12744
12745 return size;
12746 }
12747
tg3_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)12748 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12749 {
12750 struct tg3 *tp = netdev_priv(dev);
12751 int i;
12752
12753 rxfh->hfunc = ETH_RSS_HASH_TOP;
12754 if (!rxfh->indir)
12755 return 0;
12756
12757 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12758 rxfh->indir[i] = tp->rss_ind_tbl[i];
12759
12760 return 0;
12761 }
12762
tg3_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)12763 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12764 struct netlink_ext_ack *extack)
12765 {
12766 struct tg3 *tp = netdev_priv(dev);
12767 size_t i;
12768
12769 /* We require at least one supported parameter to be changed and no
12770 * change in any of the unsupported parameters
12771 */
12772 if (rxfh->key ||
12773 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12774 rxfh->hfunc != ETH_RSS_HASH_TOP))
12775 return -EOPNOTSUPP;
12776
12777 if (!rxfh->indir)
12778 return 0;
12779
12780 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12781 tp->rss_ind_tbl[i] = rxfh->indir[i];
12782
12783 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12784 return 0;
12785
12786 /* It is legal to write the indirection
12787 * table while the device is running.
12788 */
12789 tg3_full_lock(tp, 0);
12790 tg3_rss_write_indir_tbl(tp);
12791 tg3_full_unlock(tp);
12792
12793 return 0;
12794 }
12795
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12796 static void tg3_get_channels(struct net_device *dev,
12797 struct ethtool_channels *channel)
12798 {
12799 struct tg3 *tp = netdev_priv(dev);
12800 u32 deflt_qs = netif_get_num_default_rss_queues();
12801
12802 channel->max_rx = tp->rxq_max;
12803 channel->max_tx = tp->txq_max;
12804
12805 if (netif_running(dev)) {
12806 channel->rx_count = tp->rxq_cnt;
12807 channel->tx_count = tp->txq_cnt;
12808 } else {
12809 if (tp->rxq_req)
12810 channel->rx_count = tp->rxq_req;
12811 else
12812 channel->rx_count = min(deflt_qs, tp->rxq_max);
12813
12814 if (tp->txq_req)
12815 channel->tx_count = tp->txq_req;
12816 else
12817 channel->tx_count = min(deflt_qs, tp->txq_max);
12818 }
12819 }
12820
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12821 static int tg3_set_channels(struct net_device *dev,
12822 struct ethtool_channels *channel)
12823 {
12824 struct tg3 *tp = netdev_priv(dev);
12825
12826 if (!tg3_flag(tp, SUPPORT_MSIX))
12827 return -EOPNOTSUPP;
12828
12829 if (channel->rx_count > tp->rxq_max ||
12830 channel->tx_count > tp->txq_max)
12831 return -EINVAL;
12832
12833 tp->rxq_req = channel->rx_count;
12834 tp->txq_req = channel->tx_count;
12835
12836 if (!netif_running(dev))
12837 return 0;
12838
12839 tg3_stop(tp);
12840
12841 tg3_carrier_off(tp);
12842
12843 tg3_start(tp, true, false, false);
12844
12845 return 0;
12846 }
12847
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12848 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12849 {
12850 switch (stringset) {
12851 case ETH_SS_STATS:
12852 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12853 break;
12854 case ETH_SS_TEST:
12855 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12856 break;
12857 default:
12858 WARN_ON(1); /* we need a WARN() */
12859 break;
12860 }
12861 }
12862
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12863 static int tg3_set_phys_id(struct net_device *dev,
12864 enum ethtool_phys_id_state state)
12865 {
12866 struct tg3 *tp = netdev_priv(dev);
12867
12868 switch (state) {
12869 case ETHTOOL_ID_ACTIVE:
12870 return 1; /* cycle on/off once per second */
12871
12872 case ETHTOOL_ID_ON:
12873 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12874 LED_CTRL_1000MBPS_ON |
12875 LED_CTRL_100MBPS_ON |
12876 LED_CTRL_10MBPS_ON |
12877 LED_CTRL_TRAFFIC_OVERRIDE |
12878 LED_CTRL_TRAFFIC_BLINK |
12879 LED_CTRL_TRAFFIC_LED);
12880 break;
12881
12882 case ETHTOOL_ID_OFF:
12883 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12884 LED_CTRL_TRAFFIC_OVERRIDE);
12885 break;
12886
12887 case ETHTOOL_ID_INACTIVE:
12888 tw32(MAC_LED_CTRL, tp->led_ctrl);
12889 break;
12890 }
12891
12892 return 0;
12893 }
12894
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12895 static void tg3_get_ethtool_stats(struct net_device *dev,
12896 struct ethtool_stats *estats, u64 *tmp_stats)
12897 {
12898 struct tg3 *tp = netdev_priv(dev);
12899
12900 if (tp->hw_stats)
12901 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12902 else
12903 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12904 }
12905
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12906 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12907 {
12908 int i;
12909 __be32 *buf;
12910 u32 offset = 0, len = 0;
12911 u32 magic, val;
12912
12913 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12914 return NULL;
12915
12916 if (magic == TG3_EEPROM_MAGIC) {
12917 for (offset = TG3_NVM_DIR_START;
12918 offset < TG3_NVM_DIR_END;
12919 offset += TG3_NVM_DIRENT_SIZE) {
12920 if (tg3_nvram_read(tp, offset, &val))
12921 return NULL;
12922
12923 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12924 TG3_NVM_DIRTYPE_EXTVPD)
12925 break;
12926 }
12927
12928 if (offset != TG3_NVM_DIR_END) {
12929 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12930 if (tg3_nvram_read(tp, offset + 4, &offset))
12931 return NULL;
12932
12933 offset = tg3_nvram_logical_addr(tp, offset);
12934 }
12935
12936 if (!offset || !len) {
12937 offset = TG3_NVM_VPD_OFF;
12938 len = TG3_NVM_VPD_LEN;
12939 }
12940
12941 buf = kmalloc(len, GFP_KERNEL);
12942 if (!buf)
12943 return NULL;
12944
12945 for (i = 0; i < len; i += 4) {
12946 /* The data is in little-endian format in NVRAM.
12947 * Use the big-endian read routines to preserve
12948 * the byte order as it exists in NVRAM.
12949 */
12950 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12951 goto error;
12952 }
12953 *vpdlen = len;
12954 } else {
12955 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12956 if (IS_ERR(buf))
12957 return NULL;
12958 }
12959
12960 return buf;
12961
12962 error:
12963 kfree(buf);
12964 return NULL;
12965 }
12966
12967 #define NVRAM_TEST_SIZE 0x100
12968 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12969 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12970 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12971 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12972 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12973 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12974 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12975 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12976
tg3_test_nvram(struct tg3 * tp)12977 static int tg3_test_nvram(struct tg3 *tp)
12978 {
12979 u32 csum, magic;
12980 __be32 *buf;
12981 int i, j, k, err = 0, size;
12982 unsigned int len;
12983
12984 if (tg3_flag(tp, NO_NVRAM))
12985 return 0;
12986
12987 if (tg3_nvram_read(tp, 0, &magic) != 0)
12988 return -EIO;
12989
12990 if (magic == TG3_EEPROM_MAGIC)
12991 size = NVRAM_TEST_SIZE;
12992 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12993 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12994 TG3_EEPROM_SB_FORMAT_1) {
12995 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12996 case TG3_EEPROM_SB_REVISION_0:
12997 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12998 break;
12999 case TG3_EEPROM_SB_REVISION_2:
13000 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
13001 break;
13002 case TG3_EEPROM_SB_REVISION_3:
13003 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
13004 break;
13005 case TG3_EEPROM_SB_REVISION_4:
13006 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
13007 break;
13008 case TG3_EEPROM_SB_REVISION_5:
13009 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
13010 break;
13011 case TG3_EEPROM_SB_REVISION_6:
13012 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13013 break;
13014 default:
13015 return -EIO;
13016 }
13017 } else
13018 return 0;
13019 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13020 size = NVRAM_SELFBOOT_HW_SIZE;
13021 else
13022 return -EIO;
13023
13024 buf = kmalloc(size, GFP_KERNEL);
13025 if (buf == NULL)
13026 return -ENOMEM;
13027
13028 err = -EIO;
13029 for (i = 0, j = 0; i < size; i += 4, j++) {
13030 err = tg3_nvram_read_be32(tp, i, &buf[j]);
13031 if (err)
13032 break;
13033 }
13034 if (i < size)
13035 goto out;
13036
13037 /* Selfboot format */
13038 magic = be32_to_cpu(buf[0]);
13039 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13040 TG3_EEPROM_MAGIC_FW) {
13041 u8 *buf8 = (u8 *) buf, csum8 = 0;
13042
13043 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13044 TG3_EEPROM_SB_REVISION_2) {
13045 /* For rev 2, the csum doesn't include the MBA. */
13046 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13047 csum8 += buf8[i];
13048 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13049 csum8 += buf8[i];
13050 } else {
13051 for (i = 0; i < size; i++)
13052 csum8 += buf8[i];
13053 }
13054
13055 if (csum8 == 0) {
13056 err = 0;
13057 goto out;
13058 }
13059
13060 err = -EIO;
13061 goto out;
13062 }
13063
13064 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13065 TG3_EEPROM_MAGIC_HW) {
13066 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13067 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13068 u8 *buf8 = (u8 *) buf;
13069
13070 /* Separate the parity bits and the data bytes. */
13071 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13072 if ((i == 0) || (i == 8)) {
13073 int l;
13074 u8 msk;
13075
13076 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13077 parity[k++] = buf8[i] & msk;
13078 i++;
13079 } else if (i == 16) {
13080 int l;
13081 u8 msk;
13082
13083 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13084 parity[k++] = buf8[i] & msk;
13085 i++;
13086
13087 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13088 parity[k++] = buf8[i] & msk;
13089 i++;
13090 }
13091 data[j++] = buf8[i];
13092 }
13093
13094 err = -EIO;
13095 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13096 u8 hw8 = hweight8(data[i]);
13097
13098 if ((hw8 & 0x1) && parity[i])
13099 goto out;
13100 else if (!(hw8 & 0x1) && !parity[i])
13101 goto out;
13102 }
13103 err = 0;
13104 goto out;
13105 }
13106
13107 err = -EIO;
13108
13109 /* Bootstrap checksum at offset 0x10 */
13110 csum = calc_crc((unsigned char *) buf, 0x10);
13111 if (csum != le32_to_cpu(buf[0x10/4]))
13112 goto out;
13113
13114 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13115 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13116 if (csum != le32_to_cpu(buf[0xfc/4]))
13117 goto out;
13118
13119 kfree(buf);
13120
13121 buf = tg3_vpd_readblock(tp, &len);
13122 if (!buf)
13123 return -ENOMEM;
13124
13125 err = pci_vpd_check_csum(buf, len);
13126 /* go on if no checksum found */
13127 if (err == 1)
13128 err = 0;
13129 out:
13130 kfree(buf);
13131 return err;
13132 }
13133
13134 #define TG3_SERDES_TIMEOUT_SEC 2
13135 #define TG3_COPPER_TIMEOUT_SEC 6
13136
tg3_test_link(struct tg3 * tp)13137 static int tg3_test_link(struct tg3 *tp)
13138 {
13139 int i, max;
13140
13141 if (!netif_running(tp->dev))
13142 return -ENODEV;
13143
13144 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13145 max = TG3_SERDES_TIMEOUT_SEC;
13146 else
13147 max = TG3_COPPER_TIMEOUT_SEC;
13148
13149 for (i = 0; i < max; i++) {
13150 if (tp->link_up)
13151 return 0;
13152
13153 if (msleep_interruptible(1000))
13154 break;
13155 }
13156
13157 return -EIO;
13158 }
13159
13160 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13161 static int tg3_test_registers(struct tg3 *tp)
13162 {
13163 int i, is_5705, is_5750;
13164 u32 offset, read_mask, write_mask, val, save_val, read_val;
13165 static struct {
13166 u16 offset;
13167 u16 flags;
13168 #define TG3_FL_5705 0x1
13169 #define TG3_FL_NOT_5705 0x2
13170 #define TG3_FL_NOT_5788 0x4
13171 #define TG3_FL_NOT_5750 0x8
13172 u32 read_mask;
13173 u32 write_mask;
13174 } reg_tbl[] = {
13175 /* MAC Control Registers */
13176 { MAC_MODE, TG3_FL_NOT_5705,
13177 0x00000000, 0x00ef6f8c },
13178 { MAC_MODE, TG3_FL_5705,
13179 0x00000000, 0x01ef6b8c },
13180 { MAC_STATUS, TG3_FL_NOT_5705,
13181 0x03800107, 0x00000000 },
13182 { MAC_STATUS, TG3_FL_5705,
13183 0x03800100, 0x00000000 },
13184 { MAC_ADDR_0_HIGH, 0x0000,
13185 0x00000000, 0x0000ffff },
13186 { MAC_ADDR_0_LOW, 0x0000,
13187 0x00000000, 0xffffffff },
13188 { MAC_RX_MTU_SIZE, 0x0000,
13189 0x00000000, 0x0000ffff },
13190 { MAC_TX_MODE, 0x0000,
13191 0x00000000, 0x00000070 },
13192 { MAC_TX_LENGTHS, 0x0000,
13193 0x00000000, 0x00003fff },
13194 { MAC_RX_MODE, TG3_FL_NOT_5705,
13195 0x00000000, 0x000007fc },
13196 { MAC_RX_MODE, TG3_FL_5705,
13197 0x00000000, 0x000007dc },
13198 { MAC_HASH_REG_0, 0x0000,
13199 0x00000000, 0xffffffff },
13200 { MAC_HASH_REG_1, 0x0000,
13201 0x00000000, 0xffffffff },
13202 { MAC_HASH_REG_2, 0x0000,
13203 0x00000000, 0xffffffff },
13204 { MAC_HASH_REG_3, 0x0000,
13205 0x00000000, 0xffffffff },
13206
13207 /* Receive Data and Receive BD Initiator Control Registers. */
13208 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13209 0x00000000, 0xffffffff },
13210 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13211 0x00000000, 0xffffffff },
13212 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13213 0x00000000, 0x00000003 },
13214 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13215 0x00000000, 0xffffffff },
13216 { RCVDBDI_STD_BD+0, 0x0000,
13217 0x00000000, 0xffffffff },
13218 { RCVDBDI_STD_BD+4, 0x0000,
13219 0x00000000, 0xffffffff },
13220 { RCVDBDI_STD_BD+8, 0x0000,
13221 0x00000000, 0xffff0002 },
13222 { RCVDBDI_STD_BD+0xc, 0x0000,
13223 0x00000000, 0xffffffff },
13224
13225 /* Receive BD Initiator Control Registers. */
13226 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13227 0x00000000, 0xffffffff },
13228 { RCVBDI_STD_THRESH, TG3_FL_5705,
13229 0x00000000, 0x000003ff },
13230 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13231 0x00000000, 0xffffffff },
13232
13233 /* Host Coalescing Control Registers. */
13234 { HOSTCC_MODE, TG3_FL_NOT_5705,
13235 0x00000000, 0x00000004 },
13236 { HOSTCC_MODE, TG3_FL_5705,
13237 0x00000000, 0x000000f6 },
13238 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13239 0x00000000, 0xffffffff },
13240 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13241 0x00000000, 0x000003ff },
13242 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13243 0x00000000, 0xffffffff },
13244 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13245 0x00000000, 0x000003ff },
13246 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13247 0x00000000, 0xffffffff },
13248 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13249 0x00000000, 0x000000ff },
13250 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13251 0x00000000, 0xffffffff },
13252 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13253 0x00000000, 0x000000ff },
13254 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13255 0x00000000, 0xffffffff },
13256 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13257 0x00000000, 0xffffffff },
13258 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13259 0x00000000, 0xffffffff },
13260 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13261 0x00000000, 0x000000ff },
13262 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13263 0x00000000, 0xffffffff },
13264 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13265 0x00000000, 0x000000ff },
13266 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13267 0x00000000, 0xffffffff },
13268 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13269 0x00000000, 0xffffffff },
13270 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13271 0x00000000, 0xffffffff },
13272 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13273 0x00000000, 0xffffffff },
13274 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13275 0x00000000, 0xffffffff },
13276 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13277 0xffffffff, 0x00000000 },
13278 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13279 0xffffffff, 0x00000000 },
13280
13281 /* Buffer Manager Control Registers. */
13282 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13283 0x00000000, 0x007fff80 },
13284 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13285 0x00000000, 0x007fffff },
13286 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13287 0x00000000, 0x0000003f },
13288 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13289 0x00000000, 0x000001ff },
13290 { BUFMGR_MB_HIGH_WATER, 0x0000,
13291 0x00000000, 0x000001ff },
13292 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13293 0xffffffff, 0x00000000 },
13294 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13295 0xffffffff, 0x00000000 },
13296
13297 /* Mailbox Registers */
13298 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13299 0x00000000, 0x000001ff },
13300 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13301 0x00000000, 0x000001ff },
13302 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13303 0x00000000, 0x000007ff },
13304 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13305 0x00000000, 0x000001ff },
13306
13307 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13308 };
13309
13310 is_5705 = is_5750 = 0;
13311 if (tg3_flag(tp, 5705_PLUS)) {
13312 is_5705 = 1;
13313 if (tg3_flag(tp, 5750_PLUS))
13314 is_5750 = 1;
13315 }
13316
13317 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13318 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13319 continue;
13320
13321 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13322 continue;
13323
13324 if (tg3_flag(tp, IS_5788) &&
13325 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13326 continue;
13327
13328 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13329 continue;
13330
13331 offset = (u32) reg_tbl[i].offset;
13332 read_mask = reg_tbl[i].read_mask;
13333 write_mask = reg_tbl[i].write_mask;
13334
13335 /* Save the original register content */
13336 save_val = tr32(offset);
13337
13338 /* Determine the read-only value. */
13339 read_val = save_val & read_mask;
13340
13341 /* Write zero to the register, then make sure the read-only bits
13342 * are not changed and the read/write bits are all zeros.
13343 */
13344 tw32(offset, 0);
13345
13346 val = tr32(offset);
13347
13348 /* Test the read-only and read/write bits. */
13349 if (((val & read_mask) != read_val) || (val & write_mask))
13350 goto out;
13351
13352 /* Write ones to all the bits defined by RdMask and WrMask, then
13353 * make sure the read-only bits are not changed and the
13354 * read/write bits are all ones.
13355 */
13356 tw32(offset, read_mask | write_mask);
13357
13358 val = tr32(offset);
13359
13360 /* Test the read-only bits. */
13361 if ((val & read_mask) != read_val)
13362 goto out;
13363
13364 /* Test the read/write bits. */
13365 if ((val & write_mask) != write_mask)
13366 goto out;
13367
13368 tw32(offset, save_val);
13369 }
13370
13371 return 0;
13372
13373 out:
13374 if (netif_msg_hw(tp))
13375 netdev_err(tp->dev,
13376 "Register test failed at offset %x\n", offset);
13377 tw32(offset, save_val);
13378 return -EIO;
13379 }
13380
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13381 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13382 {
13383 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13384 int i;
13385 u32 j;
13386
13387 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13388 for (j = 0; j < len; j += 4) {
13389 u32 val;
13390
13391 tg3_write_mem(tp, offset + j, test_pattern[i]);
13392 tg3_read_mem(tp, offset + j, &val);
13393 if (val != test_pattern[i])
13394 return -EIO;
13395 }
13396 }
13397 return 0;
13398 }
13399
tg3_test_memory(struct tg3 * tp)13400 static int tg3_test_memory(struct tg3 *tp)
13401 {
13402 static struct mem_entry {
13403 u32 offset;
13404 u32 len;
13405 } mem_tbl_570x[] = {
13406 { 0x00000000, 0x00b50},
13407 { 0x00002000, 0x1c000},
13408 { 0xffffffff, 0x00000}
13409 }, mem_tbl_5705[] = {
13410 { 0x00000100, 0x0000c},
13411 { 0x00000200, 0x00008},
13412 { 0x00004000, 0x00800},
13413 { 0x00006000, 0x01000},
13414 { 0x00008000, 0x02000},
13415 { 0x00010000, 0x0e000},
13416 { 0xffffffff, 0x00000}
13417 }, mem_tbl_5755[] = {
13418 { 0x00000200, 0x00008},
13419 { 0x00004000, 0x00800},
13420 { 0x00006000, 0x00800},
13421 { 0x00008000, 0x02000},
13422 { 0x00010000, 0x0c000},
13423 { 0xffffffff, 0x00000}
13424 }, mem_tbl_5906[] = {
13425 { 0x00000200, 0x00008},
13426 { 0x00004000, 0x00400},
13427 { 0x00006000, 0x00400},
13428 { 0x00008000, 0x01000},
13429 { 0x00010000, 0x01000},
13430 { 0xffffffff, 0x00000}
13431 }, mem_tbl_5717[] = {
13432 { 0x00000200, 0x00008},
13433 { 0x00010000, 0x0a000},
13434 { 0x00020000, 0x13c00},
13435 { 0xffffffff, 0x00000}
13436 }, mem_tbl_57765[] = {
13437 { 0x00000200, 0x00008},
13438 { 0x00004000, 0x00800},
13439 { 0x00006000, 0x09800},
13440 { 0x00010000, 0x0a000},
13441 { 0xffffffff, 0x00000}
13442 };
13443 struct mem_entry *mem_tbl;
13444 int err = 0;
13445 int i;
13446
13447 if (tg3_flag(tp, 5717_PLUS))
13448 mem_tbl = mem_tbl_5717;
13449 else if (tg3_flag(tp, 57765_CLASS) ||
13450 tg3_asic_rev(tp) == ASIC_REV_5762)
13451 mem_tbl = mem_tbl_57765;
13452 else if (tg3_flag(tp, 5755_PLUS))
13453 mem_tbl = mem_tbl_5755;
13454 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13455 mem_tbl = mem_tbl_5906;
13456 else if (tg3_flag(tp, 5705_PLUS))
13457 mem_tbl = mem_tbl_5705;
13458 else
13459 mem_tbl = mem_tbl_570x;
13460
13461 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13462 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13463 if (err)
13464 break;
13465 }
13466
13467 return err;
13468 }
13469
13470 #define TG3_TSO_MSS 500
13471
13472 #define TG3_TSO_IP_HDR_LEN 20
13473 #define TG3_TSO_TCP_HDR_LEN 20
13474 #define TG3_TSO_TCP_OPT_LEN 12
13475
13476 static const u8 tg3_tso_header[] = {
13477 0x08, 0x00,
13478 0x45, 0x00, 0x00, 0x00,
13479 0x00, 0x00, 0x40, 0x00,
13480 0x40, 0x06, 0x00, 0x00,
13481 0x0a, 0x00, 0x00, 0x01,
13482 0x0a, 0x00, 0x00, 0x02,
13483 0x0d, 0x00, 0xe0, 0x00,
13484 0x00, 0x00, 0x01, 0x00,
13485 0x00, 0x00, 0x02, 0x00,
13486 0x80, 0x10, 0x10, 0x00,
13487 0x14, 0x09, 0x00, 0x00,
13488 0x01, 0x01, 0x08, 0x0a,
13489 0x11, 0x11, 0x11, 0x11,
13490 0x11, 0x11, 0x11, 0x11,
13491 };
13492
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13493 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13494 {
13495 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13496 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13497 u32 budget;
13498 struct sk_buff *skb;
13499 u8 *tx_data, *rx_data;
13500 dma_addr_t map;
13501 int num_pkts, tx_len, rx_len, i, err;
13502 struct tg3_rx_buffer_desc *desc;
13503 struct tg3_napi *tnapi, *rnapi;
13504 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13505
13506 tnapi = &tp->napi[0];
13507 rnapi = &tp->napi[0];
13508 if (tp->irq_cnt > 1) {
13509 if (tg3_flag(tp, ENABLE_RSS))
13510 rnapi = &tp->napi[1];
13511 if (tg3_flag(tp, ENABLE_TSS))
13512 tnapi = &tp->napi[1];
13513 }
13514 coal_now = tnapi->coal_now | rnapi->coal_now;
13515
13516 err = -EIO;
13517
13518 tx_len = pktsz;
13519 skb = netdev_alloc_skb(tp->dev, tx_len);
13520 if (!skb)
13521 return -ENOMEM;
13522
13523 tx_data = skb_put(skb, tx_len);
13524 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13525 memset(tx_data + ETH_ALEN, 0x0, 8);
13526
13527 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13528
13529 if (tso_loopback) {
13530 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13531
13532 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13533 TG3_TSO_TCP_OPT_LEN;
13534
13535 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13536 sizeof(tg3_tso_header));
13537 mss = TG3_TSO_MSS;
13538
13539 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13540 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13541
13542 /* Set the total length field in the IP header */
13543 iph->tot_len = htons((u16)(mss + hdr_len));
13544
13545 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13546 TXD_FLAG_CPU_POST_DMA);
13547
13548 if (tg3_flag(tp, HW_TSO_1) ||
13549 tg3_flag(tp, HW_TSO_2) ||
13550 tg3_flag(tp, HW_TSO_3)) {
13551 struct tcphdr *th;
13552 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13553 th = (struct tcphdr *)&tx_data[val];
13554 th->check = 0;
13555 } else
13556 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13557
13558 if (tg3_flag(tp, HW_TSO_3)) {
13559 mss |= (hdr_len & 0xc) << 12;
13560 if (hdr_len & 0x10)
13561 base_flags |= 0x00000010;
13562 base_flags |= (hdr_len & 0x3e0) << 5;
13563 } else if (tg3_flag(tp, HW_TSO_2))
13564 mss |= hdr_len << 9;
13565 else if (tg3_flag(tp, HW_TSO_1) ||
13566 tg3_asic_rev(tp) == ASIC_REV_5705) {
13567 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13568 } else {
13569 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13570 }
13571
13572 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13573 } else {
13574 num_pkts = 1;
13575 data_off = ETH_HLEN;
13576
13577 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13578 tx_len > VLAN_ETH_FRAME_LEN)
13579 base_flags |= TXD_FLAG_JMB_PKT;
13580 }
13581
13582 for (i = data_off; i < tx_len; i++)
13583 tx_data[i] = (u8) (i & 0xff);
13584
13585 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13586 if (dma_mapping_error(&tp->pdev->dev, map)) {
13587 dev_kfree_skb(skb);
13588 return -EIO;
13589 }
13590
13591 val = tnapi->tx_prod;
13592 tnapi->tx_buffers[val].skb = skb;
13593 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13594
13595 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13596 rnapi->coal_now);
13597
13598 udelay(10);
13599
13600 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13601
13602 budget = tg3_tx_avail(tnapi);
13603 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13604 base_flags | TXD_FLAG_END, mss, 0)) {
13605 tnapi->tx_buffers[val].skb = NULL;
13606 dev_kfree_skb(skb);
13607 return -EIO;
13608 }
13609
13610 tnapi->tx_prod++;
13611
13612 /* Sync BD data before updating mailbox */
13613 wmb();
13614
13615 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13616 tr32_mailbox(tnapi->prodmbox);
13617
13618 udelay(10);
13619
13620 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13621 for (i = 0; i < 35; i++) {
13622 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13623 coal_now);
13624
13625 udelay(10);
13626
13627 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13628 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13629 if ((tx_idx == tnapi->tx_prod) &&
13630 (rx_idx == (rx_start_idx + num_pkts)))
13631 break;
13632 }
13633
13634 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13635 dev_kfree_skb(skb);
13636
13637 if (tx_idx != tnapi->tx_prod)
13638 goto out;
13639
13640 if (rx_idx != rx_start_idx + num_pkts)
13641 goto out;
13642
13643 val = data_off;
13644 while (rx_idx != rx_start_idx) {
13645 desc = &rnapi->rx_rcb[rx_start_idx++];
13646 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13647 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13648
13649 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13650 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13651 goto out;
13652
13653 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13654 - ETH_FCS_LEN;
13655
13656 if (!tso_loopback) {
13657 if (rx_len != tx_len)
13658 goto out;
13659
13660 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13661 if (opaque_key != RXD_OPAQUE_RING_STD)
13662 goto out;
13663 } else {
13664 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13665 goto out;
13666 }
13667 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13668 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13669 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13670 goto out;
13671 }
13672
13673 if (opaque_key == RXD_OPAQUE_RING_STD) {
13674 rx_data = tpr->rx_std_buffers[desc_idx].data;
13675 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13676 mapping);
13677 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13678 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13679 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13680 mapping);
13681 } else
13682 goto out;
13683
13684 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13685 DMA_FROM_DEVICE);
13686
13687 rx_data += TG3_RX_OFFSET(tp);
13688 for (i = data_off; i < rx_len; i++, val++) {
13689 if (*(rx_data + i) != (u8) (val & 0xff))
13690 goto out;
13691 }
13692 }
13693
13694 err = 0;
13695
13696 /* tg3_free_rings will unmap and free the rx_data */
13697 out:
13698 return err;
13699 }
13700
13701 #define TG3_STD_LOOPBACK_FAILED 1
13702 #define TG3_JMB_LOOPBACK_FAILED 2
13703 #define TG3_TSO_LOOPBACK_FAILED 4
13704 #define TG3_LOOPBACK_FAILED \
13705 (TG3_STD_LOOPBACK_FAILED | \
13706 TG3_JMB_LOOPBACK_FAILED | \
13707 TG3_TSO_LOOPBACK_FAILED)
13708
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13709 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13710 {
13711 int err = -EIO;
13712 u32 eee_cap;
13713 u32 jmb_pkt_sz = 9000;
13714
13715 if (tp->dma_limit)
13716 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13717
13718 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13719 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13720
13721 if (!netif_running(tp->dev)) {
13722 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13723 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13724 if (do_extlpbk)
13725 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13726 goto done;
13727 }
13728
13729 err = tg3_reset_hw(tp, true);
13730 if (err) {
13731 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13732 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13733 if (do_extlpbk)
13734 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13735 goto done;
13736 }
13737
13738 if (tg3_flag(tp, ENABLE_RSS)) {
13739 int i;
13740
13741 /* Reroute all rx packets to the 1st queue */
13742 for (i = MAC_RSS_INDIR_TBL_0;
13743 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13744 tw32(i, 0x0);
13745 }
13746
13747 /* HW errata - mac loopback fails in some cases on 5780.
13748 * Normal traffic and PHY loopback are not affected by
13749 * errata. Also, the MAC loopback test is deprecated for
13750 * all newer ASIC revisions.
13751 */
13752 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13753 !tg3_flag(tp, CPMU_PRESENT)) {
13754 tg3_mac_loopback(tp, true);
13755
13756 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13757 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13758
13759 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13760 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13761 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13762
13763 tg3_mac_loopback(tp, false);
13764 }
13765
13766 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13767 !tg3_flag(tp, USE_PHYLIB)) {
13768 int i;
13769
13770 tg3_phy_lpbk_set(tp, 0, false);
13771
13772 /* Wait for link */
13773 for (i = 0; i < 100; i++) {
13774 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13775 break;
13776 mdelay(1);
13777 }
13778
13779 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13780 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13781 if (tg3_flag(tp, TSO_CAPABLE) &&
13782 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13783 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13784 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13785 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13786 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13787
13788 if (do_extlpbk) {
13789 tg3_phy_lpbk_set(tp, 0, true);
13790
13791 /* All link indications report up, but the hardware
13792 * isn't really ready for about 20 msec. Double it
13793 * to be sure.
13794 */
13795 mdelay(40);
13796
13797 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13798 data[TG3_EXT_LOOPB_TEST] |=
13799 TG3_STD_LOOPBACK_FAILED;
13800 if (tg3_flag(tp, TSO_CAPABLE) &&
13801 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13802 data[TG3_EXT_LOOPB_TEST] |=
13803 TG3_TSO_LOOPBACK_FAILED;
13804 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13805 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13806 data[TG3_EXT_LOOPB_TEST] |=
13807 TG3_JMB_LOOPBACK_FAILED;
13808 }
13809
13810 /* Re-enable gphy autopowerdown. */
13811 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13812 tg3_phy_toggle_apd(tp, true);
13813 }
13814
13815 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13816 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13817
13818 done:
13819 tp->phy_flags |= eee_cap;
13820
13821 return err;
13822 }
13823
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13824 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13825 u64 *data)
13826 {
13827 struct tg3 *tp = netdev_priv(dev);
13828 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13829
13830 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13831 if (tg3_power_up(tp)) {
13832 etest->flags |= ETH_TEST_FL_FAILED;
13833 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13834 return;
13835 }
13836 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13837 }
13838
13839 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13840
13841 if (tg3_test_nvram(tp) != 0) {
13842 etest->flags |= ETH_TEST_FL_FAILED;
13843 data[TG3_NVRAM_TEST] = 1;
13844 }
13845 if (!doextlpbk && tg3_test_link(tp)) {
13846 etest->flags |= ETH_TEST_FL_FAILED;
13847 data[TG3_LINK_TEST] = 1;
13848 }
13849 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13850 int err, err2 = 0, irq_sync = 0;
13851
13852 if (netif_running(dev)) {
13853 tg3_phy_stop(tp);
13854 tg3_netif_stop(tp);
13855 irq_sync = 1;
13856 }
13857
13858 tg3_full_lock(tp, irq_sync);
13859 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13860 err = tg3_nvram_lock(tp);
13861 tg3_halt_cpu(tp, RX_CPU_BASE);
13862 if (!tg3_flag(tp, 5705_PLUS))
13863 tg3_halt_cpu(tp, TX_CPU_BASE);
13864 if (!err)
13865 tg3_nvram_unlock(tp);
13866
13867 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13868 tg3_phy_reset(tp);
13869
13870 if (tg3_test_registers(tp) != 0) {
13871 etest->flags |= ETH_TEST_FL_FAILED;
13872 data[TG3_REGISTER_TEST] = 1;
13873 }
13874
13875 if (tg3_test_memory(tp) != 0) {
13876 etest->flags |= ETH_TEST_FL_FAILED;
13877 data[TG3_MEMORY_TEST] = 1;
13878 }
13879
13880 if (doextlpbk)
13881 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13882
13883 if (tg3_test_loopback(tp, data, doextlpbk))
13884 etest->flags |= ETH_TEST_FL_FAILED;
13885
13886 tg3_full_unlock(tp);
13887
13888 if (tg3_test_interrupt(tp) != 0) {
13889 etest->flags |= ETH_TEST_FL_FAILED;
13890 data[TG3_INTERRUPT_TEST] = 1;
13891 }
13892
13893 tg3_full_lock(tp, 0);
13894
13895 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13896 if (netif_running(dev)) {
13897 tg3_flag_set(tp, INIT_COMPLETE);
13898 err2 = tg3_restart_hw(tp, true);
13899 if (!err2)
13900 tg3_netif_start(tp);
13901 }
13902
13903 tg3_full_unlock(tp);
13904
13905 if (irq_sync && !err2)
13906 tg3_phy_start(tp);
13907 }
13908 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13909 tg3_power_down_prepare(tp);
13910
13911 }
13912
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13913 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13914 {
13915 struct tg3 *tp = netdev_priv(dev);
13916 struct hwtstamp_config stmpconf;
13917
13918 if (!tg3_flag(tp, PTP_CAPABLE))
13919 return -EOPNOTSUPP;
13920
13921 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13922 return -EFAULT;
13923
13924 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13925 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13926 return -ERANGE;
13927
13928 switch (stmpconf.rx_filter) {
13929 case HWTSTAMP_FILTER_NONE:
13930 tp->rxptpctl = 0;
13931 break;
13932 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13933 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13934 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13935 break;
13936 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13937 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13938 TG3_RX_PTP_CTL_SYNC_EVNT;
13939 break;
13940 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13941 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13942 TG3_RX_PTP_CTL_DELAY_REQ;
13943 break;
13944 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13945 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13946 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13947 break;
13948 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13949 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13950 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13951 break;
13952 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13953 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13954 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13955 break;
13956 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13957 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13958 TG3_RX_PTP_CTL_SYNC_EVNT;
13959 break;
13960 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13961 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13962 TG3_RX_PTP_CTL_SYNC_EVNT;
13963 break;
13964 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13965 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13966 TG3_RX_PTP_CTL_SYNC_EVNT;
13967 break;
13968 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13969 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13970 TG3_RX_PTP_CTL_DELAY_REQ;
13971 break;
13972 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13973 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13974 TG3_RX_PTP_CTL_DELAY_REQ;
13975 break;
13976 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13977 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13978 TG3_RX_PTP_CTL_DELAY_REQ;
13979 break;
13980 default:
13981 return -ERANGE;
13982 }
13983
13984 if (netif_running(dev) && tp->rxptpctl)
13985 tw32(TG3_RX_PTP_CTL,
13986 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13987
13988 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13989 tg3_flag_set(tp, TX_TSTAMP_EN);
13990 else
13991 tg3_flag_clear(tp, TX_TSTAMP_EN);
13992
13993 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13994 -EFAULT : 0;
13995 }
13996
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)13997 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13998 {
13999 struct tg3 *tp = netdev_priv(dev);
14000 struct hwtstamp_config stmpconf;
14001
14002 if (!tg3_flag(tp, PTP_CAPABLE))
14003 return -EOPNOTSUPP;
14004
14005 stmpconf.flags = 0;
14006 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
14007 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
14008
14009 switch (tp->rxptpctl) {
14010 case 0:
14011 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
14012 break;
14013 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14014 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14015 break;
14016 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14017 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14018 break;
14019 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14020 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14021 break;
14022 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14023 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14024 break;
14025 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14026 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14027 break;
14028 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14029 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14030 break;
14031 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14032 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14033 break;
14034 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14035 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14036 break;
14037 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14038 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14039 break;
14040 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14041 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14042 break;
14043 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14044 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14045 break;
14046 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14047 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14048 break;
14049 default:
14050 WARN_ON_ONCE(1);
14051 return -ERANGE;
14052 }
14053
14054 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14055 -EFAULT : 0;
14056 }
14057
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)14058 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14059 {
14060 struct mii_ioctl_data *data = if_mii(ifr);
14061 struct tg3 *tp = netdev_priv(dev);
14062 int err;
14063
14064 if (tg3_flag(tp, USE_PHYLIB)) {
14065 struct phy_device *phydev;
14066 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14067 return -EAGAIN;
14068 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14069 return phy_mii_ioctl(phydev, ifr, cmd);
14070 }
14071
14072 switch (cmd) {
14073 case SIOCGMIIPHY:
14074 data->phy_id = tp->phy_addr;
14075
14076 fallthrough;
14077 case SIOCGMIIREG: {
14078 u32 mii_regval;
14079
14080 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14081 break; /* We have no PHY */
14082
14083 if (!netif_running(dev))
14084 return -EAGAIN;
14085
14086 spin_lock_bh(&tp->lock);
14087 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14088 data->reg_num & 0x1f, &mii_regval);
14089 spin_unlock_bh(&tp->lock);
14090
14091 data->val_out = mii_regval;
14092
14093 return err;
14094 }
14095
14096 case SIOCSMIIREG:
14097 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14098 break; /* We have no PHY */
14099
14100 if (!netif_running(dev))
14101 return -EAGAIN;
14102
14103 spin_lock_bh(&tp->lock);
14104 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14105 data->reg_num & 0x1f, data->val_in);
14106 spin_unlock_bh(&tp->lock);
14107
14108 return err;
14109
14110 case SIOCSHWTSTAMP:
14111 return tg3_hwtstamp_set(dev, ifr);
14112
14113 case SIOCGHWTSTAMP:
14114 return tg3_hwtstamp_get(dev, ifr);
14115
14116 default:
14117 /* do nothing */
14118 break;
14119 }
14120 return -EOPNOTSUPP;
14121 }
14122
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14123 static int tg3_get_coalesce(struct net_device *dev,
14124 struct ethtool_coalesce *ec,
14125 struct kernel_ethtool_coalesce *kernel_coal,
14126 struct netlink_ext_ack *extack)
14127 {
14128 struct tg3 *tp = netdev_priv(dev);
14129
14130 memcpy(ec, &tp->coal, sizeof(*ec));
14131 return 0;
14132 }
14133
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14134 static int tg3_set_coalesce(struct net_device *dev,
14135 struct ethtool_coalesce *ec,
14136 struct kernel_ethtool_coalesce *kernel_coal,
14137 struct netlink_ext_ack *extack)
14138 {
14139 struct tg3 *tp = netdev_priv(dev);
14140 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14141 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14142
14143 if (!tg3_flag(tp, 5705_PLUS)) {
14144 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14145 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14146 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14147 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14148 }
14149
14150 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14151 (!ec->rx_coalesce_usecs) ||
14152 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14153 (!ec->tx_coalesce_usecs) ||
14154 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14155 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14156 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14157 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14158 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14159 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14160 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14161 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14162 return -EINVAL;
14163
14164 /* Only copy relevant parameters, ignore all others. */
14165 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14166 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14167 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14168 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14169 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14170 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14171 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14172 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14173 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14174
14175 if (netif_running(dev)) {
14176 tg3_full_lock(tp, 0);
14177 __tg3_set_coalesce(tp, &tp->coal);
14178 tg3_full_unlock(tp);
14179 }
14180 return 0;
14181 }
14182
tg3_set_eee(struct net_device * dev,struct ethtool_eee * edata)14183 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14184 {
14185 struct tg3 *tp = netdev_priv(dev);
14186
14187 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14188 netdev_warn(tp->dev, "Board does not support EEE!\n");
14189 return -EOPNOTSUPP;
14190 }
14191
14192 if (edata->advertised != tp->eee.advertised) {
14193 netdev_warn(tp->dev,
14194 "Direct manipulation of EEE advertisement is not supported\n");
14195 return -EINVAL;
14196 }
14197
14198 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14199 netdev_warn(tp->dev,
14200 "Maximal Tx Lpi timer supported is %#x(u)\n",
14201 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14202 return -EINVAL;
14203 }
14204
14205 tp->eee = *edata;
14206
14207 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14208 tg3_warn_mgmt_link_flap(tp);
14209
14210 if (netif_running(tp->dev)) {
14211 tg3_full_lock(tp, 0);
14212 tg3_setup_eee(tp);
14213 tg3_phy_reset(tp);
14214 tg3_full_unlock(tp);
14215 }
14216
14217 return 0;
14218 }
14219
tg3_get_eee(struct net_device * dev,struct ethtool_eee * edata)14220 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14221 {
14222 struct tg3 *tp = netdev_priv(dev);
14223
14224 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14225 netdev_warn(tp->dev,
14226 "Board does not support EEE!\n");
14227 return -EOPNOTSUPP;
14228 }
14229
14230 *edata = tp->eee;
14231 return 0;
14232 }
14233
14234 static const struct ethtool_ops tg3_ethtool_ops = {
14235 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14236 ETHTOOL_COALESCE_MAX_FRAMES |
14237 ETHTOOL_COALESCE_USECS_IRQ |
14238 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14239 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14240 .get_drvinfo = tg3_get_drvinfo,
14241 .get_regs_len = tg3_get_regs_len,
14242 .get_regs = tg3_get_regs,
14243 .get_wol = tg3_get_wol,
14244 .set_wol = tg3_set_wol,
14245 .get_msglevel = tg3_get_msglevel,
14246 .set_msglevel = tg3_set_msglevel,
14247 .nway_reset = tg3_nway_reset,
14248 .get_link = ethtool_op_get_link,
14249 .get_eeprom_len = tg3_get_eeprom_len,
14250 .get_eeprom = tg3_get_eeprom,
14251 .set_eeprom = tg3_set_eeprom,
14252 .get_ringparam = tg3_get_ringparam,
14253 .set_ringparam = tg3_set_ringparam,
14254 .get_pauseparam = tg3_get_pauseparam,
14255 .set_pauseparam = tg3_set_pauseparam,
14256 .self_test = tg3_self_test,
14257 .get_strings = tg3_get_strings,
14258 .set_phys_id = tg3_set_phys_id,
14259 .get_ethtool_stats = tg3_get_ethtool_stats,
14260 .get_coalesce = tg3_get_coalesce,
14261 .set_coalesce = tg3_set_coalesce,
14262 .get_sset_count = tg3_get_sset_count,
14263 .get_rxnfc = tg3_get_rxnfc,
14264 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14265 .get_rxfh = tg3_get_rxfh,
14266 .set_rxfh = tg3_set_rxfh,
14267 .get_channels = tg3_get_channels,
14268 .set_channels = tg3_set_channels,
14269 .get_ts_info = tg3_get_ts_info,
14270 .get_eee = tg3_get_eee,
14271 .set_eee = tg3_set_eee,
14272 .get_link_ksettings = tg3_get_link_ksettings,
14273 .set_link_ksettings = tg3_set_link_ksettings,
14274 };
14275
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14276 static void tg3_get_stats64(struct net_device *dev,
14277 struct rtnl_link_stats64 *stats)
14278 {
14279 struct tg3 *tp = netdev_priv(dev);
14280
14281 spin_lock_bh(&tp->lock);
14282 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14283 *stats = tp->net_stats_prev;
14284 spin_unlock_bh(&tp->lock);
14285 return;
14286 }
14287
14288 tg3_get_nstats(tp, stats);
14289 spin_unlock_bh(&tp->lock);
14290 }
14291
tg3_set_rx_mode(struct net_device * dev)14292 static void tg3_set_rx_mode(struct net_device *dev)
14293 {
14294 struct tg3 *tp = netdev_priv(dev);
14295
14296 if (!netif_running(dev))
14297 return;
14298
14299 tg3_full_lock(tp, 0);
14300 __tg3_set_rx_mode(dev);
14301 tg3_full_unlock(tp);
14302 }
14303
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14304 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14305 int new_mtu)
14306 {
14307 dev->mtu = new_mtu;
14308
14309 if (new_mtu > ETH_DATA_LEN) {
14310 if (tg3_flag(tp, 5780_CLASS)) {
14311 netdev_update_features(dev);
14312 tg3_flag_clear(tp, TSO_CAPABLE);
14313 } else {
14314 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14315 }
14316 } else {
14317 if (tg3_flag(tp, 5780_CLASS)) {
14318 tg3_flag_set(tp, TSO_CAPABLE);
14319 netdev_update_features(dev);
14320 }
14321 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14322 }
14323 }
14324
tg3_change_mtu(struct net_device * dev,int new_mtu)14325 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14326 {
14327 struct tg3 *tp = netdev_priv(dev);
14328 int err;
14329 bool reset_phy = false;
14330
14331 if (!netif_running(dev)) {
14332 /* We'll just catch it later when the
14333 * device is up'd.
14334 */
14335 tg3_set_mtu(dev, tp, new_mtu);
14336 return 0;
14337 }
14338
14339 tg3_phy_stop(tp);
14340
14341 tg3_netif_stop(tp);
14342
14343 tg3_set_mtu(dev, tp, new_mtu);
14344
14345 tg3_full_lock(tp, 1);
14346
14347 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14348
14349 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14350 * breaks all requests to 256 bytes.
14351 */
14352 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14353 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14354 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14355 tg3_asic_rev(tp) == ASIC_REV_5720)
14356 reset_phy = true;
14357
14358 err = tg3_restart_hw(tp, reset_phy);
14359
14360 if (!err)
14361 tg3_netif_start(tp);
14362
14363 tg3_full_unlock(tp);
14364
14365 if (!err)
14366 tg3_phy_start(tp);
14367
14368 return err;
14369 }
14370
14371 static const struct net_device_ops tg3_netdev_ops = {
14372 .ndo_open = tg3_open,
14373 .ndo_stop = tg3_close,
14374 .ndo_start_xmit = tg3_start_xmit,
14375 .ndo_get_stats64 = tg3_get_stats64,
14376 .ndo_validate_addr = eth_validate_addr,
14377 .ndo_set_rx_mode = tg3_set_rx_mode,
14378 .ndo_set_mac_address = tg3_set_mac_addr,
14379 .ndo_eth_ioctl = tg3_ioctl,
14380 .ndo_tx_timeout = tg3_tx_timeout,
14381 .ndo_change_mtu = tg3_change_mtu,
14382 .ndo_fix_features = tg3_fix_features,
14383 .ndo_set_features = tg3_set_features,
14384 #ifdef CONFIG_NET_POLL_CONTROLLER
14385 .ndo_poll_controller = tg3_poll_controller,
14386 #endif
14387 };
14388
tg3_get_eeprom_size(struct tg3 * tp)14389 static void tg3_get_eeprom_size(struct tg3 *tp)
14390 {
14391 u32 cursize, val, magic;
14392
14393 tp->nvram_size = EEPROM_CHIP_SIZE;
14394
14395 if (tg3_nvram_read(tp, 0, &magic) != 0)
14396 return;
14397
14398 if ((magic != TG3_EEPROM_MAGIC) &&
14399 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14400 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14401 return;
14402
14403 /*
14404 * Size the chip by reading offsets at increasing powers of two.
14405 * When we encounter our validation signature, we know the addressing
14406 * has wrapped around, and thus have our chip size.
14407 */
14408 cursize = 0x10;
14409
14410 while (cursize < tp->nvram_size) {
14411 if (tg3_nvram_read(tp, cursize, &val) != 0)
14412 return;
14413
14414 if (val == magic)
14415 break;
14416
14417 cursize <<= 1;
14418 }
14419
14420 tp->nvram_size = cursize;
14421 }
14422
tg3_get_nvram_size(struct tg3 * tp)14423 static void tg3_get_nvram_size(struct tg3 *tp)
14424 {
14425 u32 val;
14426
14427 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14428 return;
14429
14430 /* Selfboot format */
14431 if (val != TG3_EEPROM_MAGIC) {
14432 tg3_get_eeprom_size(tp);
14433 return;
14434 }
14435
14436 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14437 if (val != 0) {
14438 /* This is confusing. We want to operate on the
14439 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14440 * call will read from NVRAM and byteswap the data
14441 * according to the byteswapping settings for all
14442 * other register accesses. This ensures the data we
14443 * want will always reside in the lower 16-bits.
14444 * However, the data in NVRAM is in LE format, which
14445 * means the data from the NVRAM read will always be
14446 * opposite the endianness of the CPU. The 16-bit
14447 * byteswap then brings the data to CPU endianness.
14448 */
14449 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14450 return;
14451 }
14452 }
14453 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14454 }
14455
tg3_get_nvram_info(struct tg3 * tp)14456 static void tg3_get_nvram_info(struct tg3 *tp)
14457 {
14458 u32 nvcfg1;
14459
14460 nvcfg1 = tr32(NVRAM_CFG1);
14461 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14462 tg3_flag_set(tp, FLASH);
14463 } else {
14464 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14465 tw32(NVRAM_CFG1, nvcfg1);
14466 }
14467
14468 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14469 tg3_flag(tp, 5780_CLASS)) {
14470 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14471 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14472 tp->nvram_jedecnum = JEDEC_ATMEL;
14473 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14474 tg3_flag_set(tp, NVRAM_BUFFERED);
14475 break;
14476 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14477 tp->nvram_jedecnum = JEDEC_ATMEL;
14478 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14479 break;
14480 case FLASH_VENDOR_ATMEL_EEPROM:
14481 tp->nvram_jedecnum = JEDEC_ATMEL;
14482 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14483 tg3_flag_set(tp, NVRAM_BUFFERED);
14484 break;
14485 case FLASH_VENDOR_ST:
14486 tp->nvram_jedecnum = JEDEC_ST;
14487 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14488 tg3_flag_set(tp, NVRAM_BUFFERED);
14489 break;
14490 case FLASH_VENDOR_SAIFUN:
14491 tp->nvram_jedecnum = JEDEC_SAIFUN;
14492 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14493 break;
14494 case FLASH_VENDOR_SST_SMALL:
14495 case FLASH_VENDOR_SST_LARGE:
14496 tp->nvram_jedecnum = JEDEC_SST;
14497 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14498 break;
14499 }
14500 } else {
14501 tp->nvram_jedecnum = JEDEC_ATMEL;
14502 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14503 tg3_flag_set(tp, NVRAM_BUFFERED);
14504 }
14505 }
14506
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14507 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14508 {
14509 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14510 case FLASH_5752PAGE_SIZE_256:
14511 tp->nvram_pagesize = 256;
14512 break;
14513 case FLASH_5752PAGE_SIZE_512:
14514 tp->nvram_pagesize = 512;
14515 break;
14516 case FLASH_5752PAGE_SIZE_1K:
14517 tp->nvram_pagesize = 1024;
14518 break;
14519 case FLASH_5752PAGE_SIZE_2K:
14520 tp->nvram_pagesize = 2048;
14521 break;
14522 case FLASH_5752PAGE_SIZE_4K:
14523 tp->nvram_pagesize = 4096;
14524 break;
14525 case FLASH_5752PAGE_SIZE_264:
14526 tp->nvram_pagesize = 264;
14527 break;
14528 case FLASH_5752PAGE_SIZE_528:
14529 tp->nvram_pagesize = 528;
14530 break;
14531 }
14532 }
14533
tg3_get_5752_nvram_info(struct tg3 * tp)14534 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14535 {
14536 u32 nvcfg1;
14537
14538 nvcfg1 = tr32(NVRAM_CFG1);
14539
14540 /* NVRAM protection for TPM */
14541 if (nvcfg1 & (1 << 27))
14542 tg3_flag_set(tp, PROTECTED_NVRAM);
14543
14544 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14545 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14546 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14547 tp->nvram_jedecnum = JEDEC_ATMEL;
14548 tg3_flag_set(tp, NVRAM_BUFFERED);
14549 break;
14550 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14551 tp->nvram_jedecnum = JEDEC_ATMEL;
14552 tg3_flag_set(tp, NVRAM_BUFFERED);
14553 tg3_flag_set(tp, FLASH);
14554 break;
14555 case FLASH_5752VENDOR_ST_M45PE10:
14556 case FLASH_5752VENDOR_ST_M45PE20:
14557 case FLASH_5752VENDOR_ST_M45PE40:
14558 tp->nvram_jedecnum = JEDEC_ST;
14559 tg3_flag_set(tp, NVRAM_BUFFERED);
14560 tg3_flag_set(tp, FLASH);
14561 break;
14562 }
14563
14564 if (tg3_flag(tp, FLASH)) {
14565 tg3_nvram_get_pagesize(tp, nvcfg1);
14566 } else {
14567 /* For eeprom, set pagesize to maximum eeprom size */
14568 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14569
14570 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14571 tw32(NVRAM_CFG1, nvcfg1);
14572 }
14573 }
14574
tg3_get_5755_nvram_info(struct tg3 * tp)14575 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14576 {
14577 u32 nvcfg1, protect = 0;
14578
14579 nvcfg1 = tr32(NVRAM_CFG1);
14580
14581 /* NVRAM protection for TPM */
14582 if (nvcfg1 & (1 << 27)) {
14583 tg3_flag_set(tp, PROTECTED_NVRAM);
14584 protect = 1;
14585 }
14586
14587 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14588 switch (nvcfg1) {
14589 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14590 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14591 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14592 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14593 tp->nvram_jedecnum = JEDEC_ATMEL;
14594 tg3_flag_set(tp, NVRAM_BUFFERED);
14595 tg3_flag_set(tp, FLASH);
14596 tp->nvram_pagesize = 264;
14597 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14598 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14599 tp->nvram_size = (protect ? 0x3e200 :
14600 TG3_NVRAM_SIZE_512KB);
14601 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14602 tp->nvram_size = (protect ? 0x1f200 :
14603 TG3_NVRAM_SIZE_256KB);
14604 else
14605 tp->nvram_size = (protect ? 0x1f200 :
14606 TG3_NVRAM_SIZE_128KB);
14607 break;
14608 case FLASH_5752VENDOR_ST_M45PE10:
14609 case FLASH_5752VENDOR_ST_M45PE20:
14610 case FLASH_5752VENDOR_ST_M45PE40:
14611 tp->nvram_jedecnum = JEDEC_ST;
14612 tg3_flag_set(tp, NVRAM_BUFFERED);
14613 tg3_flag_set(tp, FLASH);
14614 tp->nvram_pagesize = 256;
14615 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14616 tp->nvram_size = (protect ?
14617 TG3_NVRAM_SIZE_64KB :
14618 TG3_NVRAM_SIZE_128KB);
14619 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14620 tp->nvram_size = (protect ?
14621 TG3_NVRAM_SIZE_64KB :
14622 TG3_NVRAM_SIZE_256KB);
14623 else
14624 tp->nvram_size = (protect ?
14625 TG3_NVRAM_SIZE_128KB :
14626 TG3_NVRAM_SIZE_512KB);
14627 break;
14628 }
14629 }
14630
tg3_get_5787_nvram_info(struct tg3 * tp)14631 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14632 {
14633 u32 nvcfg1;
14634
14635 nvcfg1 = tr32(NVRAM_CFG1);
14636
14637 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14638 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14639 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14640 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14641 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14642 tp->nvram_jedecnum = JEDEC_ATMEL;
14643 tg3_flag_set(tp, NVRAM_BUFFERED);
14644 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14645
14646 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14647 tw32(NVRAM_CFG1, nvcfg1);
14648 break;
14649 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14650 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14651 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14652 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14653 tp->nvram_jedecnum = JEDEC_ATMEL;
14654 tg3_flag_set(tp, NVRAM_BUFFERED);
14655 tg3_flag_set(tp, FLASH);
14656 tp->nvram_pagesize = 264;
14657 break;
14658 case FLASH_5752VENDOR_ST_M45PE10:
14659 case FLASH_5752VENDOR_ST_M45PE20:
14660 case FLASH_5752VENDOR_ST_M45PE40:
14661 tp->nvram_jedecnum = JEDEC_ST;
14662 tg3_flag_set(tp, NVRAM_BUFFERED);
14663 tg3_flag_set(tp, FLASH);
14664 tp->nvram_pagesize = 256;
14665 break;
14666 }
14667 }
14668
tg3_get_5761_nvram_info(struct tg3 * tp)14669 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14670 {
14671 u32 nvcfg1, protect = 0;
14672
14673 nvcfg1 = tr32(NVRAM_CFG1);
14674
14675 /* NVRAM protection for TPM */
14676 if (nvcfg1 & (1 << 27)) {
14677 tg3_flag_set(tp, PROTECTED_NVRAM);
14678 protect = 1;
14679 }
14680
14681 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14682 switch (nvcfg1) {
14683 case FLASH_5761VENDOR_ATMEL_ADB021D:
14684 case FLASH_5761VENDOR_ATMEL_ADB041D:
14685 case FLASH_5761VENDOR_ATMEL_ADB081D:
14686 case FLASH_5761VENDOR_ATMEL_ADB161D:
14687 case FLASH_5761VENDOR_ATMEL_MDB021D:
14688 case FLASH_5761VENDOR_ATMEL_MDB041D:
14689 case FLASH_5761VENDOR_ATMEL_MDB081D:
14690 case FLASH_5761VENDOR_ATMEL_MDB161D:
14691 tp->nvram_jedecnum = JEDEC_ATMEL;
14692 tg3_flag_set(tp, NVRAM_BUFFERED);
14693 tg3_flag_set(tp, FLASH);
14694 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14695 tp->nvram_pagesize = 256;
14696 break;
14697 case FLASH_5761VENDOR_ST_A_M45PE20:
14698 case FLASH_5761VENDOR_ST_A_M45PE40:
14699 case FLASH_5761VENDOR_ST_A_M45PE80:
14700 case FLASH_5761VENDOR_ST_A_M45PE16:
14701 case FLASH_5761VENDOR_ST_M_M45PE20:
14702 case FLASH_5761VENDOR_ST_M_M45PE40:
14703 case FLASH_5761VENDOR_ST_M_M45PE80:
14704 case FLASH_5761VENDOR_ST_M_M45PE16:
14705 tp->nvram_jedecnum = JEDEC_ST;
14706 tg3_flag_set(tp, NVRAM_BUFFERED);
14707 tg3_flag_set(tp, FLASH);
14708 tp->nvram_pagesize = 256;
14709 break;
14710 }
14711
14712 if (protect) {
14713 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14714 } else {
14715 switch (nvcfg1) {
14716 case FLASH_5761VENDOR_ATMEL_ADB161D:
14717 case FLASH_5761VENDOR_ATMEL_MDB161D:
14718 case FLASH_5761VENDOR_ST_A_M45PE16:
14719 case FLASH_5761VENDOR_ST_M_M45PE16:
14720 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14721 break;
14722 case FLASH_5761VENDOR_ATMEL_ADB081D:
14723 case FLASH_5761VENDOR_ATMEL_MDB081D:
14724 case FLASH_5761VENDOR_ST_A_M45PE80:
14725 case FLASH_5761VENDOR_ST_M_M45PE80:
14726 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14727 break;
14728 case FLASH_5761VENDOR_ATMEL_ADB041D:
14729 case FLASH_5761VENDOR_ATMEL_MDB041D:
14730 case FLASH_5761VENDOR_ST_A_M45PE40:
14731 case FLASH_5761VENDOR_ST_M_M45PE40:
14732 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14733 break;
14734 case FLASH_5761VENDOR_ATMEL_ADB021D:
14735 case FLASH_5761VENDOR_ATMEL_MDB021D:
14736 case FLASH_5761VENDOR_ST_A_M45PE20:
14737 case FLASH_5761VENDOR_ST_M_M45PE20:
14738 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14739 break;
14740 }
14741 }
14742 }
14743
tg3_get_5906_nvram_info(struct tg3 * tp)14744 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14745 {
14746 tp->nvram_jedecnum = JEDEC_ATMEL;
14747 tg3_flag_set(tp, NVRAM_BUFFERED);
14748 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14749 }
14750
tg3_get_57780_nvram_info(struct tg3 * tp)14751 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14752 {
14753 u32 nvcfg1;
14754
14755 nvcfg1 = tr32(NVRAM_CFG1);
14756
14757 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14758 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14759 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14760 tp->nvram_jedecnum = JEDEC_ATMEL;
14761 tg3_flag_set(tp, NVRAM_BUFFERED);
14762 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14763
14764 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14765 tw32(NVRAM_CFG1, nvcfg1);
14766 return;
14767 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14768 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14769 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14770 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14771 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14772 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14773 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14774 tp->nvram_jedecnum = JEDEC_ATMEL;
14775 tg3_flag_set(tp, NVRAM_BUFFERED);
14776 tg3_flag_set(tp, FLASH);
14777
14778 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14779 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14780 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14781 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14782 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14783 break;
14784 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14785 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14786 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14787 break;
14788 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14789 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14790 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14791 break;
14792 }
14793 break;
14794 case FLASH_5752VENDOR_ST_M45PE10:
14795 case FLASH_5752VENDOR_ST_M45PE20:
14796 case FLASH_5752VENDOR_ST_M45PE40:
14797 tp->nvram_jedecnum = JEDEC_ST;
14798 tg3_flag_set(tp, NVRAM_BUFFERED);
14799 tg3_flag_set(tp, FLASH);
14800
14801 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14802 case FLASH_5752VENDOR_ST_M45PE10:
14803 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14804 break;
14805 case FLASH_5752VENDOR_ST_M45PE20:
14806 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14807 break;
14808 case FLASH_5752VENDOR_ST_M45PE40:
14809 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14810 break;
14811 }
14812 break;
14813 default:
14814 tg3_flag_set(tp, NO_NVRAM);
14815 return;
14816 }
14817
14818 tg3_nvram_get_pagesize(tp, nvcfg1);
14819 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14820 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14821 }
14822
14823
tg3_get_5717_nvram_info(struct tg3 * tp)14824 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14825 {
14826 u32 nvcfg1;
14827
14828 nvcfg1 = tr32(NVRAM_CFG1);
14829
14830 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14831 case FLASH_5717VENDOR_ATMEL_EEPROM:
14832 case FLASH_5717VENDOR_MICRO_EEPROM:
14833 tp->nvram_jedecnum = JEDEC_ATMEL;
14834 tg3_flag_set(tp, NVRAM_BUFFERED);
14835 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14836
14837 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14838 tw32(NVRAM_CFG1, nvcfg1);
14839 return;
14840 case FLASH_5717VENDOR_ATMEL_MDB011D:
14841 case FLASH_5717VENDOR_ATMEL_ADB011B:
14842 case FLASH_5717VENDOR_ATMEL_ADB011D:
14843 case FLASH_5717VENDOR_ATMEL_MDB021D:
14844 case FLASH_5717VENDOR_ATMEL_ADB021B:
14845 case FLASH_5717VENDOR_ATMEL_ADB021D:
14846 case FLASH_5717VENDOR_ATMEL_45USPT:
14847 tp->nvram_jedecnum = JEDEC_ATMEL;
14848 tg3_flag_set(tp, NVRAM_BUFFERED);
14849 tg3_flag_set(tp, FLASH);
14850
14851 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14852 case FLASH_5717VENDOR_ATMEL_MDB021D:
14853 /* Detect size with tg3_nvram_get_size() */
14854 break;
14855 case FLASH_5717VENDOR_ATMEL_ADB021B:
14856 case FLASH_5717VENDOR_ATMEL_ADB021D:
14857 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14858 break;
14859 default:
14860 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14861 break;
14862 }
14863 break;
14864 case FLASH_5717VENDOR_ST_M_M25PE10:
14865 case FLASH_5717VENDOR_ST_A_M25PE10:
14866 case FLASH_5717VENDOR_ST_M_M45PE10:
14867 case FLASH_5717VENDOR_ST_A_M45PE10:
14868 case FLASH_5717VENDOR_ST_M_M25PE20:
14869 case FLASH_5717VENDOR_ST_A_M25PE20:
14870 case FLASH_5717VENDOR_ST_M_M45PE20:
14871 case FLASH_5717VENDOR_ST_A_M45PE20:
14872 case FLASH_5717VENDOR_ST_25USPT:
14873 case FLASH_5717VENDOR_ST_45USPT:
14874 tp->nvram_jedecnum = JEDEC_ST;
14875 tg3_flag_set(tp, NVRAM_BUFFERED);
14876 tg3_flag_set(tp, FLASH);
14877
14878 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14879 case FLASH_5717VENDOR_ST_M_M25PE20:
14880 case FLASH_5717VENDOR_ST_M_M45PE20:
14881 /* Detect size with tg3_nvram_get_size() */
14882 break;
14883 case FLASH_5717VENDOR_ST_A_M25PE20:
14884 case FLASH_5717VENDOR_ST_A_M45PE20:
14885 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14886 break;
14887 default:
14888 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14889 break;
14890 }
14891 break;
14892 default:
14893 tg3_flag_set(tp, NO_NVRAM);
14894 return;
14895 }
14896
14897 tg3_nvram_get_pagesize(tp, nvcfg1);
14898 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14899 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14900 }
14901
tg3_get_5720_nvram_info(struct tg3 * tp)14902 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14903 {
14904 u32 nvcfg1, nvmpinstrp, nv_status;
14905
14906 nvcfg1 = tr32(NVRAM_CFG1);
14907 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14908
14909 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14910 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14911 tg3_flag_set(tp, NO_NVRAM);
14912 return;
14913 }
14914
14915 switch (nvmpinstrp) {
14916 case FLASH_5762_MX25L_100:
14917 case FLASH_5762_MX25L_200:
14918 case FLASH_5762_MX25L_400:
14919 case FLASH_5762_MX25L_800:
14920 case FLASH_5762_MX25L_160_320:
14921 tp->nvram_pagesize = 4096;
14922 tp->nvram_jedecnum = JEDEC_MACRONIX;
14923 tg3_flag_set(tp, NVRAM_BUFFERED);
14924 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14925 tg3_flag_set(tp, FLASH);
14926 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14927 tp->nvram_size =
14928 (1 << (nv_status >> AUTOSENSE_DEVID &
14929 AUTOSENSE_DEVID_MASK)
14930 << AUTOSENSE_SIZE_IN_MB);
14931 return;
14932
14933 case FLASH_5762_EEPROM_HD:
14934 nvmpinstrp = FLASH_5720_EEPROM_HD;
14935 break;
14936 case FLASH_5762_EEPROM_LD:
14937 nvmpinstrp = FLASH_5720_EEPROM_LD;
14938 break;
14939 case FLASH_5720VENDOR_M_ST_M45PE20:
14940 /* This pinstrap supports multiple sizes, so force it
14941 * to read the actual size from location 0xf0.
14942 */
14943 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14944 break;
14945 }
14946 }
14947
14948 switch (nvmpinstrp) {
14949 case FLASH_5720_EEPROM_HD:
14950 case FLASH_5720_EEPROM_LD:
14951 tp->nvram_jedecnum = JEDEC_ATMEL;
14952 tg3_flag_set(tp, NVRAM_BUFFERED);
14953
14954 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14955 tw32(NVRAM_CFG1, nvcfg1);
14956 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14957 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14958 else
14959 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14960 return;
14961 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14962 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14963 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14964 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14965 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14966 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14967 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14968 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14969 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14970 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14971 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14972 case FLASH_5720VENDOR_ATMEL_45USPT:
14973 tp->nvram_jedecnum = JEDEC_ATMEL;
14974 tg3_flag_set(tp, NVRAM_BUFFERED);
14975 tg3_flag_set(tp, FLASH);
14976
14977 switch (nvmpinstrp) {
14978 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14979 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14980 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14981 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14982 break;
14983 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14984 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14985 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14986 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14987 break;
14988 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14989 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14990 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14991 break;
14992 default:
14993 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14994 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14995 break;
14996 }
14997 break;
14998 case FLASH_5720VENDOR_M_ST_M25PE10:
14999 case FLASH_5720VENDOR_M_ST_M45PE10:
15000 case FLASH_5720VENDOR_A_ST_M25PE10:
15001 case FLASH_5720VENDOR_A_ST_M45PE10:
15002 case FLASH_5720VENDOR_M_ST_M25PE20:
15003 case FLASH_5720VENDOR_M_ST_M45PE20:
15004 case FLASH_5720VENDOR_A_ST_M25PE20:
15005 case FLASH_5720VENDOR_A_ST_M45PE20:
15006 case FLASH_5720VENDOR_M_ST_M25PE40:
15007 case FLASH_5720VENDOR_M_ST_M45PE40:
15008 case FLASH_5720VENDOR_A_ST_M25PE40:
15009 case FLASH_5720VENDOR_A_ST_M45PE40:
15010 case FLASH_5720VENDOR_M_ST_M25PE80:
15011 case FLASH_5720VENDOR_M_ST_M45PE80:
15012 case FLASH_5720VENDOR_A_ST_M25PE80:
15013 case FLASH_5720VENDOR_A_ST_M45PE80:
15014 case FLASH_5720VENDOR_ST_25USPT:
15015 case FLASH_5720VENDOR_ST_45USPT:
15016 tp->nvram_jedecnum = JEDEC_ST;
15017 tg3_flag_set(tp, NVRAM_BUFFERED);
15018 tg3_flag_set(tp, FLASH);
15019
15020 switch (nvmpinstrp) {
15021 case FLASH_5720VENDOR_M_ST_M25PE20:
15022 case FLASH_5720VENDOR_M_ST_M45PE20:
15023 case FLASH_5720VENDOR_A_ST_M25PE20:
15024 case FLASH_5720VENDOR_A_ST_M45PE20:
15025 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15026 break;
15027 case FLASH_5720VENDOR_M_ST_M25PE40:
15028 case FLASH_5720VENDOR_M_ST_M45PE40:
15029 case FLASH_5720VENDOR_A_ST_M25PE40:
15030 case FLASH_5720VENDOR_A_ST_M45PE40:
15031 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15032 break;
15033 case FLASH_5720VENDOR_M_ST_M25PE80:
15034 case FLASH_5720VENDOR_M_ST_M45PE80:
15035 case FLASH_5720VENDOR_A_ST_M25PE80:
15036 case FLASH_5720VENDOR_A_ST_M45PE80:
15037 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15038 break;
15039 default:
15040 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15041 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15042 break;
15043 }
15044 break;
15045 default:
15046 tg3_flag_set(tp, NO_NVRAM);
15047 return;
15048 }
15049
15050 tg3_nvram_get_pagesize(tp, nvcfg1);
15051 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15052 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15053
15054 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15055 u32 val;
15056
15057 if (tg3_nvram_read(tp, 0, &val))
15058 return;
15059
15060 if (val != TG3_EEPROM_MAGIC &&
15061 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15062 tg3_flag_set(tp, NO_NVRAM);
15063 }
15064 }
15065
15066 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)15067 static void tg3_nvram_init(struct tg3 *tp)
15068 {
15069 if (tg3_flag(tp, IS_SSB_CORE)) {
15070 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15071 tg3_flag_clear(tp, NVRAM);
15072 tg3_flag_clear(tp, NVRAM_BUFFERED);
15073 tg3_flag_set(tp, NO_NVRAM);
15074 return;
15075 }
15076
15077 tw32_f(GRC_EEPROM_ADDR,
15078 (EEPROM_ADDR_FSM_RESET |
15079 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15080 EEPROM_ADDR_CLKPERD_SHIFT)));
15081
15082 msleep(1);
15083
15084 /* Enable seeprom accesses. */
15085 tw32_f(GRC_LOCAL_CTRL,
15086 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15087 udelay(100);
15088
15089 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15090 tg3_asic_rev(tp) != ASIC_REV_5701) {
15091 tg3_flag_set(tp, NVRAM);
15092
15093 if (tg3_nvram_lock(tp)) {
15094 netdev_warn(tp->dev,
15095 "Cannot get nvram lock, %s failed\n",
15096 __func__);
15097 return;
15098 }
15099 tg3_enable_nvram_access(tp);
15100
15101 tp->nvram_size = 0;
15102
15103 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15104 tg3_get_5752_nvram_info(tp);
15105 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15106 tg3_get_5755_nvram_info(tp);
15107 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15108 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15109 tg3_asic_rev(tp) == ASIC_REV_5785)
15110 tg3_get_5787_nvram_info(tp);
15111 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15112 tg3_get_5761_nvram_info(tp);
15113 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15114 tg3_get_5906_nvram_info(tp);
15115 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15116 tg3_flag(tp, 57765_CLASS))
15117 tg3_get_57780_nvram_info(tp);
15118 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15119 tg3_asic_rev(tp) == ASIC_REV_5719)
15120 tg3_get_5717_nvram_info(tp);
15121 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15122 tg3_asic_rev(tp) == ASIC_REV_5762)
15123 tg3_get_5720_nvram_info(tp);
15124 else
15125 tg3_get_nvram_info(tp);
15126
15127 if (tp->nvram_size == 0)
15128 tg3_get_nvram_size(tp);
15129
15130 tg3_disable_nvram_access(tp);
15131 tg3_nvram_unlock(tp);
15132
15133 } else {
15134 tg3_flag_clear(tp, NVRAM);
15135 tg3_flag_clear(tp, NVRAM_BUFFERED);
15136
15137 tg3_get_eeprom_size(tp);
15138 }
15139 }
15140
15141 struct subsys_tbl_ent {
15142 u16 subsys_vendor, subsys_devid;
15143 u32 phy_id;
15144 };
15145
15146 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15147 /* Broadcom boards. */
15148 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15149 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15150 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15151 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15152 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15153 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15154 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15155 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15156 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15157 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15158 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15159 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15160 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15161 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15162 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15163 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15164 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15165 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15166 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15167 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15168 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15169 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15170
15171 /* 3com boards. */
15172 { TG3PCI_SUBVENDOR_ID_3COM,
15173 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15174 { TG3PCI_SUBVENDOR_ID_3COM,
15175 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15176 { TG3PCI_SUBVENDOR_ID_3COM,
15177 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15178 { TG3PCI_SUBVENDOR_ID_3COM,
15179 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15180 { TG3PCI_SUBVENDOR_ID_3COM,
15181 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15182
15183 /* DELL boards. */
15184 { TG3PCI_SUBVENDOR_ID_DELL,
15185 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15186 { TG3PCI_SUBVENDOR_ID_DELL,
15187 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15188 { TG3PCI_SUBVENDOR_ID_DELL,
15189 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15190 { TG3PCI_SUBVENDOR_ID_DELL,
15191 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15192
15193 /* Compaq boards. */
15194 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15195 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15196 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15197 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15198 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15199 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15200 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15201 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15202 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15203 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15204
15205 /* IBM boards. */
15206 { TG3PCI_SUBVENDOR_ID_IBM,
15207 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15208 };
15209
tg3_lookup_by_subsys(struct tg3 * tp)15210 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15211 {
15212 int i;
15213
15214 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15215 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15216 tp->pdev->subsystem_vendor) &&
15217 (subsys_id_to_phy_id[i].subsys_devid ==
15218 tp->pdev->subsystem_device))
15219 return &subsys_id_to_phy_id[i];
15220 }
15221 return NULL;
15222 }
15223
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15224 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15225 {
15226 u32 val;
15227
15228 tp->phy_id = TG3_PHY_ID_INVALID;
15229 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15230
15231 /* Assume an onboard device and WOL capable by default. */
15232 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15233 tg3_flag_set(tp, WOL_CAP);
15234
15235 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15236 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15237 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15238 tg3_flag_set(tp, IS_NIC);
15239 }
15240 val = tr32(VCPU_CFGSHDW);
15241 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15242 tg3_flag_set(tp, ASPM_WORKAROUND);
15243 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15244 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15245 tg3_flag_set(tp, WOL_ENABLE);
15246 device_set_wakeup_enable(&tp->pdev->dev, true);
15247 }
15248 goto done;
15249 }
15250
15251 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15252 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15253 u32 nic_cfg, led_cfg;
15254 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15255 u32 nic_phy_id, ver, eeprom_phy_id;
15256 int eeprom_phy_serdes = 0;
15257
15258 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15259 tp->nic_sram_data_cfg = nic_cfg;
15260
15261 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15262 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15263 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15264 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15265 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15266 (ver > 0) && (ver < 0x100))
15267 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15268
15269 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15270 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15271
15272 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15273 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15274 tg3_asic_rev(tp) == ASIC_REV_5720)
15275 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15276
15277 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15278 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15279 eeprom_phy_serdes = 1;
15280
15281 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15282 if (nic_phy_id != 0) {
15283 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15284 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15285
15286 eeprom_phy_id = (id1 >> 16) << 10;
15287 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15288 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15289 } else
15290 eeprom_phy_id = 0;
15291
15292 tp->phy_id = eeprom_phy_id;
15293 if (eeprom_phy_serdes) {
15294 if (!tg3_flag(tp, 5705_PLUS))
15295 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15296 else
15297 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15298 }
15299
15300 if (tg3_flag(tp, 5750_PLUS))
15301 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15302 SHASTA_EXT_LED_MODE_MASK);
15303 else
15304 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15305
15306 switch (led_cfg) {
15307 default:
15308 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15309 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15310 break;
15311
15312 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15313 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15314 break;
15315
15316 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15317 tp->led_ctrl = LED_CTRL_MODE_MAC;
15318
15319 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15320 * read on some older 5700/5701 bootcode.
15321 */
15322 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15323 tg3_asic_rev(tp) == ASIC_REV_5701)
15324 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15325
15326 break;
15327
15328 case SHASTA_EXT_LED_SHARED:
15329 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15330 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15331 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15332 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15333 LED_CTRL_MODE_PHY_2);
15334
15335 if (tg3_flag(tp, 5717_PLUS) ||
15336 tg3_asic_rev(tp) == ASIC_REV_5762)
15337 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15338 LED_CTRL_BLINK_RATE_MASK;
15339
15340 break;
15341
15342 case SHASTA_EXT_LED_MAC:
15343 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15344 break;
15345
15346 case SHASTA_EXT_LED_COMBO:
15347 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15348 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15349 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15350 LED_CTRL_MODE_PHY_2);
15351 break;
15352
15353 }
15354
15355 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15356 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15357 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15358 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15359
15360 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15361 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15362
15363 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15364 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15365 if ((tp->pdev->subsystem_vendor ==
15366 PCI_VENDOR_ID_ARIMA) &&
15367 (tp->pdev->subsystem_device == 0x205a ||
15368 tp->pdev->subsystem_device == 0x2063))
15369 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15370 } else {
15371 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15372 tg3_flag_set(tp, IS_NIC);
15373 }
15374
15375 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15376 tg3_flag_set(tp, ENABLE_ASF);
15377 if (tg3_flag(tp, 5750_PLUS))
15378 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15379 }
15380
15381 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15382 tg3_flag(tp, 5750_PLUS))
15383 tg3_flag_set(tp, ENABLE_APE);
15384
15385 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15386 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15387 tg3_flag_clear(tp, WOL_CAP);
15388
15389 if (tg3_flag(tp, WOL_CAP) &&
15390 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15391 tg3_flag_set(tp, WOL_ENABLE);
15392 device_set_wakeup_enable(&tp->pdev->dev, true);
15393 }
15394
15395 if (cfg2 & (1 << 17))
15396 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15397
15398 /* serdes signal pre-emphasis in register 0x590 set by */
15399 /* bootcode if bit 18 is set */
15400 if (cfg2 & (1 << 18))
15401 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15402
15403 if ((tg3_flag(tp, 57765_PLUS) ||
15404 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15405 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15406 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15407 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15408
15409 if (tg3_flag(tp, PCI_EXPRESS)) {
15410 u32 cfg3;
15411
15412 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15413 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15414 !tg3_flag(tp, 57765_PLUS) &&
15415 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15416 tg3_flag_set(tp, ASPM_WORKAROUND);
15417 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15418 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15419 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15420 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15421 }
15422
15423 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15424 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15425 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15426 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15427 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15428 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15429
15430 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15431 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15432 }
15433 done:
15434 if (tg3_flag(tp, WOL_CAP))
15435 device_set_wakeup_enable(&tp->pdev->dev,
15436 tg3_flag(tp, WOL_ENABLE));
15437 else
15438 device_set_wakeup_capable(&tp->pdev->dev, false);
15439 }
15440
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15441 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15442 {
15443 int i, err;
15444 u32 val2, off = offset * 8;
15445
15446 err = tg3_nvram_lock(tp);
15447 if (err)
15448 return err;
15449
15450 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15451 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15452 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15453 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15454 udelay(10);
15455
15456 for (i = 0; i < 100; i++) {
15457 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15458 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15459 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15460 break;
15461 }
15462 udelay(10);
15463 }
15464
15465 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15466
15467 tg3_nvram_unlock(tp);
15468 if (val2 & APE_OTP_STATUS_CMD_DONE)
15469 return 0;
15470
15471 return -EBUSY;
15472 }
15473
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15474 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15475 {
15476 int i;
15477 u32 val;
15478
15479 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15480 tw32(OTP_CTRL, cmd);
15481
15482 /* Wait for up to 1 ms for command to execute. */
15483 for (i = 0; i < 100; i++) {
15484 val = tr32(OTP_STATUS);
15485 if (val & OTP_STATUS_CMD_DONE)
15486 break;
15487 udelay(10);
15488 }
15489
15490 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15491 }
15492
15493 /* Read the gphy configuration from the OTP region of the chip. The gphy
15494 * configuration is a 32-bit value that straddles the alignment boundary.
15495 * We do two 32-bit reads and then shift and merge the results.
15496 */
tg3_read_otp_phycfg(struct tg3 * tp)15497 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15498 {
15499 u32 bhalf_otp, thalf_otp;
15500
15501 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15502
15503 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15504 return 0;
15505
15506 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15507
15508 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15509 return 0;
15510
15511 thalf_otp = tr32(OTP_READ_DATA);
15512
15513 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15514
15515 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15516 return 0;
15517
15518 bhalf_otp = tr32(OTP_READ_DATA);
15519
15520 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15521 }
15522
tg3_phy_init_link_config(struct tg3 * tp)15523 static void tg3_phy_init_link_config(struct tg3 *tp)
15524 {
15525 u32 adv = ADVERTISED_Autoneg;
15526
15527 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15528 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15529 adv |= ADVERTISED_1000baseT_Half;
15530 adv |= ADVERTISED_1000baseT_Full;
15531 }
15532
15533 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15534 adv |= ADVERTISED_100baseT_Half |
15535 ADVERTISED_100baseT_Full |
15536 ADVERTISED_10baseT_Half |
15537 ADVERTISED_10baseT_Full |
15538 ADVERTISED_TP;
15539 else
15540 adv |= ADVERTISED_FIBRE;
15541
15542 tp->link_config.advertising = adv;
15543 tp->link_config.speed = SPEED_UNKNOWN;
15544 tp->link_config.duplex = DUPLEX_UNKNOWN;
15545 tp->link_config.autoneg = AUTONEG_ENABLE;
15546 tp->link_config.active_speed = SPEED_UNKNOWN;
15547 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15548
15549 tp->old_link = -1;
15550 }
15551
tg3_phy_probe(struct tg3 * tp)15552 static int tg3_phy_probe(struct tg3 *tp)
15553 {
15554 u32 hw_phy_id_1, hw_phy_id_2;
15555 u32 hw_phy_id, hw_phy_id_masked;
15556 int err;
15557
15558 /* flow control autonegotiation is default behavior */
15559 tg3_flag_set(tp, PAUSE_AUTONEG);
15560 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15561
15562 if (tg3_flag(tp, ENABLE_APE)) {
15563 switch (tp->pci_fn) {
15564 case 0:
15565 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15566 break;
15567 case 1:
15568 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15569 break;
15570 case 2:
15571 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15572 break;
15573 case 3:
15574 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15575 break;
15576 }
15577 }
15578
15579 if (!tg3_flag(tp, ENABLE_ASF) &&
15580 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15581 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15582 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15583 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15584
15585 if (tg3_flag(tp, USE_PHYLIB))
15586 return tg3_phy_init(tp);
15587
15588 /* Reading the PHY ID register can conflict with ASF
15589 * firmware access to the PHY hardware.
15590 */
15591 err = 0;
15592 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15593 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15594 } else {
15595 /* Now read the physical PHY_ID from the chip and verify
15596 * that it is sane. If it doesn't look good, we fall back
15597 * to either the hard-coded table based PHY_ID and failing
15598 * that the value found in the eeprom area.
15599 */
15600 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15601 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15602
15603 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15604 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15605 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15606
15607 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15608 }
15609
15610 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15611 tp->phy_id = hw_phy_id;
15612 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15613 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15614 else
15615 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15616 } else {
15617 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15618 /* Do nothing, phy ID already set up in
15619 * tg3_get_eeprom_hw_cfg().
15620 */
15621 } else {
15622 struct subsys_tbl_ent *p;
15623
15624 /* No eeprom signature? Try the hardcoded
15625 * subsys device table.
15626 */
15627 p = tg3_lookup_by_subsys(tp);
15628 if (p) {
15629 tp->phy_id = p->phy_id;
15630 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15631 /* For now we saw the IDs 0xbc050cd0,
15632 * 0xbc050f80 and 0xbc050c30 on devices
15633 * connected to an BCM4785 and there are
15634 * probably more. Just assume that the phy is
15635 * supported when it is connected to a SSB core
15636 * for now.
15637 */
15638 return -ENODEV;
15639 }
15640
15641 if (!tp->phy_id ||
15642 tp->phy_id == TG3_PHY_ID_BCM8002)
15643 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15644 }
15645 }
15646
15647 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15648 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15649 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15650 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15651 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15652 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15653 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15654 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15655 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15656 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15657
15658 tp->eee.supported = SUPPORTED_100baseT_Full |
15659 SUPPORTED_1000baseT_Full;
15660 tp->eee.advertised = ADVERTISED_100baseT_Full |
15661 ADVERTISED_1000baseT_Full;
15662 tp->eee.eee_enabled = 1;
15663 tp->eee.tx_lpi_enabled = 1;
15664 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15665 }
15666
15667 tg3_phy_init_link_config(tp);
15668
15669 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15670 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15671 !tg3_flag(tp, ENABLE_APE) &&
15672 !tg3_flag(tp, ENABLE_ASF)) {
15673 u32 bmsr, dummy;
15674
15675 tg3_readphy(tp, MII_BMSR, &bmsr);
15676 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15677 (bmsr & BMSR_LSTATUS))
15678 goto skip_phy_reset;
15679
15680 err = tg3_phy_reset(tp);
15681 if (err)
15682 return err;
15683
15684 tg3_phy_set_wirespeed(tp);
15685
15686 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15687 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15688 tp->link_config.flowctrl);
15689
15690 tg3_writephy(tp, MII_BMCR,
15691 BMCR_ANENABLE | BMCR_ANRESTART);
15692 }
15693 }
15694
15695 skip_phy_reset:
15696 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15697 err = tg3_init_5401phy_dsp(tp);
15698 if (err)
15699 return err;
15700
15701 err = tg3_init_5401phy_dsp(tp);
15702 }
15703
15704 return err;
15705 }
15706
tg3_read_vpd(struct tg3 * tp)15707 static void tg3_read_vpd(struct tg3 *tp)
15708 {
15709 u8 *vpd_data;
15710 unsigned int len, vpdlen;
15711 int i;
15712
15713 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15714 if (!vpd_data)
15715 goto out_no_vpd;
15716
15717 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15718 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15719 if (i < 0)
15720 goto partno;
15721
15722 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15723 goto partno;
15724
15725 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15726 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15727 if (i < 0)
15728 goto partno;
15729
15730 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15731 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15732
15733 partno:
15734 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15735 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15736 if (i < 0)
15737 goto out_not_found;
15738
15739 if (len > TG3_BPN_SIZE)
15740 goto out_not_found;
15741
15742 memcpy(tp->board_part_number, &vpd_data[i], len);
15743
15744 out_not_found:
15745 kfree(vpd_data);
15746 if (tp->board_part_number[0])
15747 return;
15748
15749 out_no_vpd:
15750 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15751 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15752 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15753 strcpy(tp->board_part_number, "BCM5717");
15754 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15755 strcpy(tp->board_part_number, "BCM5718");
15756 else
15757 goto nomatch;
15758 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15759 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15760 strcpy(tp->board_part_number, "BCM57780");
15761 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15762 strcpy(tp->board_part_number, "BCM57760");
15763 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15764 strcpy(tp->board_part_number, "BCM57790");
15765 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15766 strcpy(tp->board_part_number, "BCM57788");
15767 else
15768 goto nomatch;
15769 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15770 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15771 strcpy(tp->board_part_number, "BCM57761");
15772 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15773 strcpy(tp->board_part_number, "BCM57765");
15774 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15775 strcpy(tp->board_part_number, "BCM57781");
15776 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15777 strcpy(tp->board_part_number, "BCM57785");
15778 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15779 strcpy(tp->board_part_number, "BCM57791");
15780 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15781 strcpy(tp->board_part_number, "BCM57795");
15782 else
15783 goto nomatch;
15784 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15785 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15786 strcpy(tp->board_part_number, "BCM57762");
15787 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15788 strcpy(tp->board_part_number, "BCM57766");
15789 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15790 strcpy(tp->board_part_number, "BCM57782");
15791 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15792 strcpy(tp->board_part_number, "BCM57786");
15793 else
15794 goto nomatch;
15795 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15796 strcpy(tp->board_part_number, "BCM95906");
15797 } else {
15798 nomatch:
15799 strcpy(tp->board_part_number, "none");
15800 }
15801 }
15802
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15803 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15804 {
15805 u32 val;
15806
15807 if (tg3_nvram_read(tp, offset, &val) ||
15808 (val & 0xfc000000) != 0x0c000000 ||
15809 tg3_nvram_read(tp, offset + 4, &val) ||
15810 val != 0)
15811 return 0;
15812
15813 return 1;
15814 }
15815
tg3_read_bc_ver(struct tg3 * tp)15816 static void tg3_read_bc_ver(struct tg3 *tp)
15817 {
15818 u32 val, offset, start, ver_offset;
15819 int i, dst_off;
15820 bool newver = false;
15821
15822 if (tg3_nvram_read(tp, 0xc, &offset) ||
15823 tg3_nvram_read(tp, 0x4, &start))
15824 return;
15825
15826 offset = tg3_nvram_logical_addr(tp, offset);
15827
15828 if (tg3_nvram_read(tp, offset, &val))
15829 return;
15830
15831 if ((val & 0xfc000000) == 0x0c000000) {
15832 if (tg3_nvram_read(tp, offset + 4, &val))
15833 return;
15834
15835 if (val == 0)
15836 newver = true;
15837 }
15838
15839 dst_off = strlen(tp->fw_ver);
15840
15841 if (newver) {
15842 if (TG3_VER_SIZE - dst_off < 16 ||
15843 tg3_nvram_read(tp, offset + 8, &ver_offset))
15844 return;
15845
15846 offset = offset + ver_offset - start;
15847 for (i = 0; i < 16; i += 4) {
15848 __be32 v;
15849 if (tg3_nvram_read_be32(tp, offset + i, &v))
15850 return;
15851
15852 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15853 }
15854 } else {
15855 u32 major, minor;
15856
15857 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15858 return;
15859
15860 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15861 TG3_NVM_BCVER_MAJSFT;
15862 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15863 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15864 "v%d.%02d", major, minor);
15865 }
15866 }
15867
tg3_read_hwsb_ver(struct tg3 * tp)15868 static void tg3_read_hwsb_ver(struct tg3 *tp)
15869 {
15870 u32 val, major, minor;
15871
15872 /* Use native endian representation */
15873 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15874 return;
15875
15876 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15877 TG3_NVM_HWSB_CFG1_MAJSFT;
15878 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15879 TG3_NVM_HWSB_CFG1_MINSFT;
15880
15881 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15882 }
15883
tg3_read_sb_ver(struct tg3 * tp,u32 val)15884 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15885 {
15886 u32 offset, major, minor, build;
15887
15888 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15889
15890 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15891 return;
15892
15893 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15894 case TG3_EEPROM_SB_REVISION_0:
15895 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15896 break;
15897 case TG3_EEPROM_SB_REVISION_2:
15898 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15899 break;
15900 case TG3_EEPROM_SB_REVISION_3:
15901 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15902 break;
15903 case TG3_EEPROM_SB_REVISION_4:
15904 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15905 break;
15906 case TG3_EEPROM_SB_REVISION_5:
15907 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15908 break;
15909 case TG3_EEPROM_SB_REVISION_6:
15910 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15911 break;
15912 default:
15913 return;
15914 }
15915
15916 if (tg3_nvram_read(tp, offset, &val))
15917 return;
15918
15919 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15920 TG3_EEPROM_SB_EDH_BLD_SHFT;
15921 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15922 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15923 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15924
15925 if (minor > 99 || build > 26)
15926 return;
15927
15928 offset = strlen(tp->fw_ver);
15929 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15930 " v%d.%02d", major, minor);
15931
15932 if (build > 0) {
15933 offset = strlen(tp->fw_ver);
15934 if (offset < TG3_VER_SIZE - 1)
15935 tp->fw_ver[offset] = 'a' + build - 1;
15936 }
15937 }
15938
tg3_read_mgmtfw_ver(struct tg3 * tp)15939 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15940 {
15941 u32 val, offset, start;
15942 int i, vlen;
15943
15944 for (offset = TG3_NVM_DIR_START;
15945 offset < TG3_NVM_DIR_END;
15946 offset += TG3_NVM_DIRENT_SIZE) {
15947 if (tg3_nvram_read(tp, offset, &val))
15948 return;
15949
15950 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15951 break;
15952 }
15953
15954 if (offset == TG3_NVM_DIR_END)
15955 return;
15956
15957 if (!tg3_flag(tp, 5705_PLUS))
15958 start = 0x08000000;
15959 else if (tg3_nvram_read(tp, offset - 4, &start))
15960 return;
15961
15962 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15963 !tg3_fw_img_is_valid(tp, offset) ||
15964 tg3_nvram_read(tp, offset + 8, &val))
15965 return;
15966
15967 offset += val - start;
15968
15969 vlen = strlen(tp->fw_ver);
15970
15971 tp->fw_ver[vlen++] = ',';
15972 tp->fw_ver[vlen++] = ' ';
15973
15974 for (i = 0; i < 4; i++) {
15975 __be32 v;
15976 if (tg3_nvram_read_be32(tp, offset, &v))
15977 return;
15978
15979 offset += sizeof(v);
15980
15981 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15982 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15983 break;
15984 }
15985
15986 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15987 vlen += sizeof(v);
15988 }
15989 }
15990
tg3_probe_ncsi(struct tg3 * tp)15991 static void tg3_probe_ncsi(struct tg3 *tp)
15992 {
15993 u32 apedata;
15994
15995 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15996 if (apedata != APE_SEG_SIG_MAGIC)
15997 return;
15998
15999 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
16000 if (!(apedata & APE_FW_STATUS_READY))
16001 return;
16002
16003 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
16004 tg3_flag_set(tp, APE_HAS_NCSI);
16005 }
16006
tg3_read_dash_ver(struct tg3 * tp)16007 static void tg3_read_dash_ver(struct tg3 *tp)
16008 {
16009 int vlen;
16010 u32 apedata;
16011 char *fwtype;
16012
16013 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16014
16015 if (tg3_flag(tp, APE_HAS_NCSI))
16016 fwtype = "NCSI";
16017 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16018 fwtype = "SMASH";
16019 else
16020 fwtype = "DASH";
16021
16022 vlen = strlen(tp->fw_ver);
16023
16024 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16025 fwtype,
16026 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16027 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16028 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16029 (apedata & APE_FW_VERSION_BLDMSK));
16030 }
16031
tg3_read_otp_ver(struct tg3 * tp)16032 static void tg3_read_otp_ver(struct tg3 *tp)
16033 {
16034 u32 val, val2;
16035
16036 if (tg3_asic_rev(tp) != ASIC_REV_5762)
16037 return;
16038
16039 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16040 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16041 TG3_OTP_MAGIC0_VALID(val)) {
16042 u64 val64 = (u64) val << 32 | val2;
16043 u32 ver = 0;
16044 int i, vlen;
16045
16046 for (i = 0; i < 7; i++) {
16047 if ((val64 & 0xff) == 0)
16048 break;
16049 ver = val64 & 0xff;
16050 val64 >>= 8;
16051 }
16052 vlen = strlen(tp->fw_ver);
16053 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16054 }
16055 }
16056
tg3_read_fw_ver(struct tg3 * tp)16057 static void tg3_read_fw_ver(struct tg3 *tp)
16058 {
16059 u32 val;
16060 bool vpd_vers = false;
16061
16062 if (tp->fw_ver[0] != 0)
16063 vpd_vers = true;
16064
16065 if (tg3_flag(tp, NO_NVRAM)) {
16066 strcat(tp->fw_ver, "sb");
16067 tg3_read_otp_ver(tp);
16068 return;
16069 }
16070
16071 if (tg3_nvram_read(tp, 0, &val))
16072 return;
16073
16074 if (val == TG3_EEPROM_MAGIC)
16075 tg3_read_bc_ver(tp);
16076 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16077 tg3_read_sb_ver(tp, val);
16078 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16079 tg3_read_hwsb_ver(tp);
16080
16081 if (tg3_flag(tp, ENABLE_ASF)) {
16082 if (tg3_flag(tp, ENABLE_APE)) {
16083 tg3_probe_ncsi(tp);
16084 if (!vpd_vers)
16085 tg3_read_dash_ver(tp);
16086 } else if (!vpd_vers) {
16087 tg3_read_mgmtfw_ver(tp);
16088 }
16089 }
16090
16091 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16092 }
16093
tg3_rx_ret_ring_size(struct tg3 * tp)16094 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16095 {
16096 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16097 return TG3_RX_RET_MAX_SIZE_5717;
16098 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16099 return TG3_RX_RET_MAX_SIZE_5700;
16100 else
16101 return TG3_RX_RET_MAX_SIZE_5705;
16102 }
16103
16104 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16105 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16106 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16107 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16108 { },
16109 };
16110
tg3_find_peer(struct tg3 * tp)16111 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16112 {
16113 struct pci_dev *peer;
16114 unsigned int func, devnr = tp->pdev->devfn & ~7;
16115
16116 for (func = 0; func < 8; func++) {
16117 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16118 if (peer && peer != tp->pdev)
16119 break;
16120 pci_dev_put(peer);
16121 }
16122 /* 5704 can be configured in single-port mode, set peer to
16123 * tp->pdev in that case.
16124 */
16125 if (!peer) {
16126 peer = tp->pdev;
16127 return peer;
16128 }
16129
16130 /*
16131 * We don't need to keep the refcount elevated; there's no way
16132 * to remove one half of this device without removing the other
16133 */
16134 pci_dev_put(peer);
16135
16136 return peer;
16137 }
16138
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16139 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16140 {
16141 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16142 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16143 u32 reg;
16144
16145 /* All devices that use the alternate
16146 * ASIC REV location have a CPMU.
16147 */
16148 tg3_flag_set(tp, CPMU_PRESENT);
16149
16150 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16151 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16152 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16153 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16154 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16155 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16159 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16160 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16161 reg = TG3PCI_GEN2_PRODID_ASICREV;
16162 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16163 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16164 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16165 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16166 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16167 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16168 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16169 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16170 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16171 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16172 reg = TG3PCI_GEN15_PRODID_ASICREV;
16173 else
16174 reg = TG3PCI_PRODID_ASICREV;
16175
16176 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16177 }
16178
16179 /* Wrong chip ID in 5752 A0. This code can be removed later
16180 * as A0 is not in production.
16181 */
16182 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16183 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16184
16185 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16186 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16187
16188 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16189 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16190 tg3_asic_rev(tp) == ASIC_REV_5720)
16191 tg3_flag_set(tp, 5717_PLUS);
16192
16193 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16194 tg3_asic_rev(tp) == ASIC_REV_57766)
16195 tg3_flag_set(tp, 57765_CLASS);
16196
16197 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16198 tg3_asic_rev(tp) == ASIC_REV_5762)
16199 tg3_flag_set(tp, 57765_PLUS);
16200
16201 /* Intentionally exclude ASIC_REV_5906 */
16202 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16203 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16204 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16205 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16206 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16207 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16208 tg3_flag(tp, 57765_PLUS))
16209 tg3_flag_set(tp, 5755_PLUS);
16210
16211 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16212 tg3_asic_rev(tp) == ASIC_REV_5714)
16213 tg3_flag_set(tp, 5780_CLASS);
16214
16215 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16216 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16217 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16218 tg3_flag(tp, 5755_PLUS) ||
16219 tg3_flag(tp, 5780_CLASS))
16220 tg3_flag_set(tp, 5750_PLUS);
16221
16222 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16223 tg3_flag(tp, 5750_PLUS))
16224 tg3_flag_set(tp, 5705_PLUS);
16225 }
16226
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16227 static bool tg3_10_100_only_device(struct tg3 *tp,
16228 const struct pci_device_id *ent)
16229 {
16230 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16231
16232 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16233 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16234 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16235 return true;
16236
16237 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16238 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16239 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16240 return true;
16241 } else {
16242 return true;
16243 }
16244 }
16245
16246 return false;
16247 }
16248
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16249 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16250 {
16251 u32 misc_ctrl_reg;
16252 u32 pci_state_reg, grc_misc_cfg;
16253 u32 val;
16254 u16 pci_cmd;
16255 int err;
16256
16257 /* Force memory write invalidate off. If we leave it on,
16258 * then on 5700_BX chips we have to enable a workaround.
16259 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16260 * to match the cacheline size. The Broadcom driver have this
16261 * workaround but turns MWI off all the times so never uses
16262 * it. This seems to suggest that the workaround is insufficient.
16263 */
16264 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16265 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16266 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16267
16268 /* Important! -- Make sure register accesses are byteswapped
16269 * correctly. Also, for those chips that require it, make
16270 * sure that indirect register accesses are enabled before
16271 * the first operation.
16272 */
16273 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16274 &misc_ctrl_reg);
16275 tp->misc_host_ctrl |= (misc_ctrl_reg &
16276 MISC_HOST_CTRL_CHIPREV);
16277 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16278 tp->misc_host_ctrl);
16279
16280 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16281
16282 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16283 * we need to disable memory and use config. cycles
16284 * only to access all registers. The 5702/03 chips
16285 * can mistakenly decode the special cycles from the
16286 * ICH chipsets as memory write cycles, causing corruption
16287 * of register and memory space. Only certain ICH bridges
16288 * will drive special cycles with non-zero data during the
16289 * address phase which can fall within the 5703's address
16290 * range. This is not an ICH bug as the PCI spec allows
16291 * non-zero address during special cycles. However, only
16292 * these ICH bridges are known to drive non-zero addresses
16293 * during special cycles.
16294 *
16295 * Since special cycles do not cross PCI bridges, we only
16296 * enable this workaround if the 5703 is on the secondary
16297 * bus of these ICH bridges.
16298 */
16299 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16300 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16301 static struct tg3_dev_id {
16302 u32 vendor;
16303 u32 device;
16304 u32 rev;
16305 } ich_chipsets[] = {
16306 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16307 PCI_ANY_ID },
16308 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16309 PCI_ANY_ID },
16310 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16311 0xa },
16312 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16313 PCI_ANY_ID },
16314 { },
16315 };
16316 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16317 struct pci_dev *bridge = NULL;
16318
16319 while (pci_id->vendor != 0) {
16320 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16321 bridge);
16322 if (!bridge) {
16323 pci_id++;
16324 continue;
16325 }
16326 if (pci_id->rev != PCI_ANY_ID) {
16327 if (bridge->revision > pci_id->rev)
16328 continue;
16329 }
16330 if (bridge->subordinate &&
16331 (bridge->subordinate->number ==
16332 tp->pdev->bus->number)) {
16333 tg3_flag_set(tp, ICH_WORKAROUND);
16334 pci_dev_put(bridge);
16335 break;
16336 }
16337 }
16338 }
16339
16340 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16341 static struct tg3_dev_id {
16342 u32 vendor;
16343 u32 device;
16344 } bridge_chipsets[] = {
16345 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16346 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16347 { },
16348 };
16349 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16350 struct pci_dev *bridge = NULL;
16351
16352 while (pci_id->vendor != 0) {
16353 bridge = pci_get_device(pci_id->vendor,
16354 pci_id->device,
16355 bridge);
16356 if (!bridge) {
16357 pci_id++;
16358 continue;
16359 }
16360 if (bridge->subordinate &&
16361 (bridge->subordinate->number <=
16362 tp->pdev->bus->number) &&
16363 (bridge->subordinate->busn_res.end >=
16364 tp->pdev->bus->number)) {
16365 tg3_flag_set(tp, 5701_DMA_BUG);
16366 pci_dev_put(bridge);
16367 break;
16368 }
16369 }
16370 }
16371
16372 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16373 * DMA addresses > 40-bit. This bridge may have other additional
16374 * 57xx devices behind it in some 4-port NIC designs for example.
16375 * Any tg3 device found behind the bridge will also need the 40-bit
16376 * DMA workaround.
16377 */
16378 if (tg3_flag(tp, 5780_CLASS)) {
16379 tg3_flag_set(tp, 40BIT_DMA_BUG);
16380 tp->msi_cap = tp->pdev->msi_cap;
16381 } else {
16382 struct pci_dev *bridge = NULL;
16383
16384 do {
16385 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16386 PCI_DEVICE_ID_SERVERWORKS_EPB,
16387 bridge);
16388 if (bridge && bridge->subordinate &&
16389 (bridge->subordinate->number <=
16390 tp->pdev->bus->number) &&
16391 (bridge->subordinate->busn_res.end >=
16392 tp->pdev->bus->number)) {
16393 tg3_flag_set(tp, 40BIT_DMA_BUG);
16394 pci_dev_put(bridge);
16395 break;
16396 }
16397 } while (bridge);
16398 }
16399
16400 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16401 tg3_asic_rev(tp) == ASIC_REV_5714)
16402 tp->pdev_peer = tg3_find_peer(tp);
16403
16404 /* Determine TSO capabilities */
16405 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16406 ; /* Do nothing. HW bug. */
16407 else if (tg3_flag(tp, 57765_PLUS))
16408 tg3_flag_set(tp, HW_TSO_3);
16409 else if (tg3_flag(tp, 5755_PLUS) ||
16410 tg3_asic_rev(tp) == ASIC_REV_5906)
16411 tg3_flag_set(tp, HW_TSO_2);
16412 else if (tg3_flag(tp, 5750_PLUS)) {
16413 tg3_flag_set(tp, HW_TSO_1);
16414 tg3_flag_set(tp, TSO_BUG);
16415 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16416 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16417 tg3_flag_clear(tp, TSO_BUG);
16418 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16419 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16420 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16421 tg3_flag_set(tp, FW_TSO);
16422 tg3_flag_set(tp, TSO_BUG);
16423 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16424 tp->fw_needed = FIRMWARE_TG3TSO5;
16425 else
16426 tp->fw_needed = FIRMWARE_TG3TSO;
16427 }
16428
16429 /* Selectively allow TSO based on operating conditions */
16430 if (tg3_flag(tp, HW_TSO_1) ||
16431 tg3_flag(tp, HW_TSO_2) ||
16432 tg3_flag(tp, HW_TSO_3) ||
16433 tg3_flag(tp, FW_TSO)) {
16434 /* For firmware TSO, assume ASF is disabled.
16435 * We'll disable TSO later if we discover ASF
16436 * is enabled in tg3_get_eeprom_hw_cfg().
16437 */
16438 tg3_flag_set(tp, TSO_CAPABLE);
16439 } else {
16440 tg3_flag_clear(tp, TSO_CAPABLE);
16441 tg3_flag_clear(tp, TSO_BUG);
16442 tp->fw_needed = NULL;
16443 }
16444
16445 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16446 tp->fw_needed = FIRMWARE_TG3;
16447
16448 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16449 tp->fw_needed = FIRMWARE_TG357766;
16450
16451 tp->irq_max = 1;
16452
16453 if (tg3_flag(tp, 5750_PLUS)) {
16454 tg3_flag_set(tp, SUPPORT_MSI);
16455 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16456 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16457 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16458 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16459 tp->pdev_peer == tp->pdev))
16460 tg3_flag_clear(tp, SUPPORT_MSI);
16461
16462 if (tg3_flag(tp, 5755_PLUS) ||
16463 tg3_asic_rev(tp) == ASIC_REV_5906) {
16464 tg3_flag_set(tp, 1SHOT_MSI);
16465 }
16466
16467 if (tg3_flag(tp, 57765_PLUS)) {
16468 tg3_flag_set(tp, SUPPORT_MSIX);
16469 tp->irq_max = TG3_IRQ_MAX_VECS;
16470 }
16471 }
16472
16473 tp->txq_max = 1;
16474 tp->rxq_max = 1;
16475 if (tp->irq_max > 1) {
16476 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16477 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16478
16479 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16480 tg3_asic_rev(tp) == ASIC_REV_5720)
16481 tp->txq_max = tp->irq_max - 1;
16482 }
16483
16484 if (tg3_flag(tp, 5755_PLUS) ||
16485 tg3_asic_rev(tp) == ASIC_REV_5906)
16486 tg3_flag_set(tp, SHORT_DMA_BUG);
16487
16488 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16489 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16490
16491 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16492 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16493 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16494 tg3_asic_rev(tp) == ASIC_REV_5762)
16495 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16496
16497 if (tg3_flag(tp, 57765_PLUS) &&
16498 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16499 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16500
16501 if (!tg3_flag(tp, 5705_PLUS) ||
16502 tg3_flag(tp, 5780_CLASS) ||
16503 tg3_flag(tp, USE_JUMBO_BDFLAG))
16504 tg3_flag_set(tp, JUMBO_CAPABLE);
16505
16506 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16507 &pci_state_reg);
16508
16509 if (pci_is_pcie(tp->pdev)) {
16510 u16 lnkctl;
16511
16512 tg3_flag_set(tp, PCI_EXPRESS);
16513
16514 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16515 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16516 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16517 tg3_flag_clear(tp, HW_TSO_2);
16518 tg3_flag_clear(tp, TSO_CAPABLE);
16519 }
16520 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16521 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16522 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16523 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16524 tg3_flag_set(tp, CLKREQ_BUG);
16525 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16526 tg3_flag_set(tp, L1PLLPD_EN);
16527 }
16528 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16529 /* BCM5785 devices are effectively PCIe devices, and should
16530 * follow PCIe codepaths, but do not have a PCIe capabilities
16531 * section.
16532 */
16533 tg3_flag_set(tp, PCI_EXPRESS);
16534 } else if (!tg3_flag(tp, 5705_PLUS) ||
16535 tg3_flag(tp, 5780_CLASS)) {
16536 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16537 if (!tp->pcix_cap) {
16538 dev_err(&tp->pdev->dev,
16539 "Cannot find PCI-X capability, aborting\n");
16540 return -EIO;
16541 }
16542
16543 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16544 tg3_flag_set(tp, PCIX_MODE);
16545 }
16546
16547 /* If we have an AMD 762 or VIA K8T800 chipset, write
16548 * reordering to the mailbox registers done by the host
16549 * controller can cause major troubles. We read back from
16550 * every mailbox register write to force the writes to be
16551 * posted to the chip in order.
16552 */
16553 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16554 !tg3_flag(tp, PCI_EXPRESS))
16555 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16556
16557 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16558 &tp->pci_cacheline_sz);
16559 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16560 &tp->pci_lat_timer);
16561 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16562 tp->pci_lat_timer < 64) {
16563 tp->pci_lat_timer = 64;
16564 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16565 tp->pci_lat_timer);
16566 }
16567
16568 /* Important! -- It is critical that the PCI-X hw workaround
16569 * situation is decided before the first MMIO register access.
16570 */
16571 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16572 /* 5700 BX chips need to have their TX producer index
16573 * mailboxes written twice to workaround a bug.
16574 */
16575 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16576
16577 /* If we are in PCI-X mode, enable register write workaround.
16578 *
16579 * The workaround is to use indirect register accesses
16580 * for all chip writes not to mailbox registers.
16581 */
16582 if (tg3_flag(tp, PCIX_MODE)) {
16583 u32 pm_reg;
16584
16585 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16586
16587 /* The chip can have it's power management PCI config
16588 * space registers clobbered due to this bug.
16589 * So explicitly force the chip into D0 here.
16590 */
16591 pci_read_config_dword(tp->pdev,
16592 tp->pdev->pm_cap + PCI_PM_CTRL,
16593 &pm_reg);
16594 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16595 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16596 pci_write_config_dword(tp->pdev,
16597 tp->pdev->pm_cap + PCI_PM_CTRL,
16598 pm_reg);
16599
16600 /* Also, force SERR#/PERR# in PCI command. */
16601 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16602 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16603 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16604 }
16605 }
16606
16607 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16608 tg3_flag_set(tp, PCI_HIGH_SPEED);
16609 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16610 tg3_flag_set(tp, PCI_32BIT);
16611
16612 /* Chip-specific fixup from Broadcom driver */
16613 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16614 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16615 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16616 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16617 }
16618
16619 /* Default fast path register access methods */
16620 tp->read32 = tg3_read32;
16621 tp->write32 = tg3_write32;
16622 tp->read32_mbox = tg3_read32;
16623 tp->write32_mbox = tg3_write32;
16624 tp->write32_tx_mbox = tg3_write32;
16625 tp->write32_rx_mbox = tg3_write32;
16626
16627 /* Various workaround register access methods */
16628 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16629 tp->write32 = tg3_write_indirect_reg32;
16630 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16631 (tg3_flag(tp, PCI_EXPRESS) &&
16632 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16633 /*
16634 * Back to back register writes can cause problems on these
16635 * chips, the workaround is to read back all reg writes
16636 * except those to mailbox regs.
16637 *
16638 * See tg3_write_indirect_reg32().
16639 */
16640 tp->write32 = tg3_write_flush_reg32;
16641 }
16642
16643 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16644 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16645 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16646 tp->write32_rx_mbox = tg3_write_flush_reg32;
16647 }
16648
16649 if (tg3_flag(tp, ICH_WORKAROUND)) {
16650 tp->read32 = tg3_read_indirect_reg32;
16651 tp->write32 = tg3_write_indirect_reg32;
16652 tp->read32_mbox = tg3_read_indirect_mbox;
16653 tp->write32_mbox = tg3_write_indirect_mbox;
16654 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16655 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16656
16657 iounmap(tp->regs);
16658 tp->regs = NULL;
16659
16660 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16661 pci_cmd &= ~PCI_COMMAND_MEMORY;
16662 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16663 }
16664 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16665 tp->read32_mbox = tg3_read32_mbox_5906;
16666 tp->write32_mbox = tg3_write32_mbox_5906;
16667 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16668 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16669 }
16670
16671 if (tp->write32 == tg3_write_indirect_reg32 ||
16672 (tg3_flag(tp, PCIX_MODE) &&
16673 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16674 tg3_asic_rev(tp) == ASIC_REV_5701)))
16675 tg3_flag_set(tp, SRAM_USE_CONFIG);
16676
16677 /* The memory arbiter has to be enabled in order for SRAM accesses
16678 * to succeed. Normally on powerup the tg3 chip firmware will make
16679 * sure it is enabled, but other entities such as system netboot
16680 * code might disable it.
16681 */
16682 val = tr32(MEMARB_MODE);
16683 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16684
16685 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16686 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16687 tg3_flag(tp, 5780_CLASS)) {
16688 if (tg3_flag(tp, PCIX_MODE)) {
16689 pci_read_config_dword(tp->pdev,
16690 tp->pcix_cap + PCI_X_STATUS,
16691 &val);
16692 tp->pci_fn = val & 0x7;
16693 }
16694 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16695 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16696 tg3_asic_rev(tp) == ASIC_REV_5720) {
16697 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16698 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16699 val = tr32(TG3_CPMU_STATUS);
16700
16701 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16702 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16703 else
16704 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16705 TG3_CPMU_STATUS_FSHFT_5719;
16706 }
16707
16708 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16709 tp->write32_tx_mbox = tg3_write_flush_reg32;
16710 tp->write32_rx_mbox = tg3_write_flush_reg32;
16711 }
16712
16713 /* Get eeprom hw config before calling tg3_set_power_state().
16714 * In particular, the TG3_FLAG_IS_NIC flag must be
16715 * determined before calling tg3_set_power_state() so that
16716 * we know whether or not to switch out of Vaux power.
16717 * When the flag is set, it means that GPIO1 is used for eeprom
16718 * write protect and also implies that it is a LOM where GPIOs
16719 * are not used to switch power.
16720 */
16721 tg3_get_eeprom_hw_cfg(tp);
16722
16723 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16724 tg3_flag_clear(tp, TSO_CAPABLE);
16725 tg3_flag_clear(tp, TSO_BUG);
16726 tp->fw_needed = NULL;
16727 }
16728
16729 if (tg3_flag(tp, ENABLE_APE)) {
16730 /* Allow reads and writes to the
16731 * APE register and memory space.
16732 */
16733 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16734 PCISTATE_ALLOW_APE_SHMEM_WR |
16735 PCISTATE_ALLOW_APE_PSPACE_WR;
16736 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16737 pci_state_reg);
16738
16739 tg3_ape_lock_init(tp);
16740 tp->ape_hb_interval =
16741 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16742 }
16743
16744 /* Set up tp->grc_local_ctrl before calling
16745 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16746 * will bring 5700's external PHY out of reset.
16747 * It is also used as eeprom write protect on LOMs.
16748 */
16749 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16750 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16751 tg3_flag(tp, EEPROM_WRITE_PROT))
16752 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16753 GRC_LCLCTRL_GPIO_OUTPUT1);
16754 /* Unused GPIO3 must be driven as output on 5752 because there
16755 * are no pull-up resistors on unused GPIO pins.
16756 */
16757 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16758 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16759
16760 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16761 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16762 tg3_flag(tp, 57765_CLASS))
16763 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16764
16765 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16766 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16767 /* Turn off the debug UART. */
16768 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16769 if (tg3_flag(tp, IS_NIC))
16770 /* Keep VMain power. */
16771 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16772 GRC_LCLCTRL_GPIO_OUTPUT0;
16773 }
16774
16775 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16776 tp->grc_local_ctrl |=
16777 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16778
16779 /* Switch out of Vaux if it is a NIC */
16780 tg3_pwrsrc_switch_to_vmain(tp);
16781
16782 /* Derive initial jumbo mode from MTU assigned in
16783 * ether_setup() via the alloc_etherdev() call
16784 */
16785 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16786 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16787
16788 /* Determine WakeOnLan speed to use. */
16789 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16790 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16791 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16792 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16793 tg3_flag_clear(tp, WOL_SPEED_100MB);
16794 } else {
16795 tg3_flag_set(tp, WOL_SPEED_100MB);
16796 }
16797
16798 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16799 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16800
16801 /* A few boards don't want Ethernet@WireSpeed phy feature */
16802 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16803 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16804 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16805 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16806 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16807 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16808 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16809
16810 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16811 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16812 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16813 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16814 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16815
16816 if (tg3_flag(tp, 5705_PLUS) &&
16817 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16818 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16819 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16820 !tg3_flag(tp, 57765_PLUS)) {
16821 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16822 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16823 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16824 tg3_asic_rev(tp) == ASIC_REV_5761) {
16825 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16826 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16827 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16828 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16829 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16830 } else
16831 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16832 }
16833
16834 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16835 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16836 tp->phy_otp = tg3_read_otp_phycfg(tp);
16837 if (tp->phy_otp == 0)
16838 tp->phy_otp = TG3_OTP_DEFAULT;
16839 }
16840
16841 if (tg3_flag(tp, CPMU_PRESENT))
16842 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16843 else
16844 tp->mi_mode = MAC_MI_MODE_BASE;
16845
16846 tp->coalesce_mode = 0;
16847 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16848 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16849 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16850
16851 /* Set these bits to enable statistics workaround. */
16852 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16853 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16854 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16855 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16856 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16857 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16858 }
16859
16860 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16861 tg3_asic_rev(tp) == ASIC_REV_57780)
16862 tg3_flag_set(tp, USE_PHYLIB);
16863
16864 err = tg3_mdio_init(tp);
16865 if (err)
16866 return err;
16867
16868 /* Initialize data/descriptor byte/word swapping. */
16869 val = tr32(GRC_MODE);
16870 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16871 tg3_asic_rev(tp) == ASIC_REV_5762)
16872 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16873 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16874 GRC_MODE_B2HRX_ENABLE |
16875 GRC_MODE_HTX2B_ENABLE |
16876 GRC_MODE_HOST_STACKUP);
16877 else
16878 val &= GRC_MODE_HOST_STACKUP;
16879
16880 tw32(GRC_MODE, val | tp->grc_mode);
16881
16882 tg3_switch_clocks(tp);
16883
16884 /* Clear this out for sanity. */
16885 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16886
16887 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16888 tw32(TG3PCI_REG_BASE_ADDR, 0);
16889
16890 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16891 &pci_state_reg);
16892 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16893 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16894 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16895 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16896 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16897 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16898 void __iomem *sram_base;
16899
16900 /* Write some dummy words into the SRAM status block
16901 * area, see if it reads back correctly. If the return
16902 * value is bad, force enable the PCIX workaround.
16903 */
16904 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16905
16906 writel(0x00000000, sram_base);
16907 writel(0x00000000, sram_base + 4);
16908 writel(0xffffffff, sram_base + 4);
16909 if (readl(sram_base) != 0x00000000)
16910 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16911 }
16912 }
16913
16914 udelay(50);
16915 tg3_nvram_init(tp);
16916
16917 /* If the device has an NVRAM, no need to load patch firmware */
16918 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16919 !tg3_flag(tp, NO_NVRAM))
16920 tp->fw_needed = NULL;
16921
16922 grc_misc_cfg = tr32(GRC_MISC_CFG);
16923 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16924
16925 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16926 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16927 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16928 tg3_flag_set(tp, IS_5788);
16929
16930 if (!tg3_flag(tp, IS_5788) &&
16931 tg3_asic_rev(tp) != ASIC_REV_5700)
16932 tg3_flag_set(tp, TAGGED_STATUS);
16933 if (tg3_flag(tp, TAGGED_STATUS)) {
16934 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16935 HOSTCC_MODE_CLRTICK_TXBD);
16936
16937 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16938 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16939 tp->misc_host_ctrl);
16940 }
16941
16942 /* Preserve the APE MAC_MODE bits */
16943 if (tg3_flag(tp, ENABLE_APE))
16944 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16945 else
16946 tp->mac_mode = 0;
16947
16948 if (tg3_10_100_only_device(tp, ent))
16949 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16950
16951 err = tg3_phy_probe(tp);
16952 if (err) {
16953 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16954 /* ... but do not return immediately ... */
16955 tg3_mdio_fini(tp);
16956 }
16957
16958 tg3_read_vpd(tp);
16959 tg3_read_fw_ver(tp);
16960
16961 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16962 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16963 } else {
16964 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16965 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16966 else
16967 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16968 }
16969
16970 /* 5700 {AX,BX} chips have a broken status block link
16971 * change bit implementation, so we must use the
16972 * status register in those cases.
16973 */
16974 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16975 tg3_flag_set(tp, USE_LINKCHG_REG);
16976 else
16977 tg3_flag_clear(tp, USE_LINKCHG_REG);
16978
16979 /* The led_ctrl is set during tg3_phy_probe, here we might
16980 * have to force the link status polling mechanism based
16981 * upon subsystem IDs.
16982 */
16983 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16984 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16985 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16986 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16987 tg3_flag_set(tp, USE_LINKCHG_REG);
16988 }
16989
16990 /* For all SERDES we poll the MAC status register. */
16991 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16992 tg3_flag_set(tp, POLL_SERDES);
16993 else
16994 tg3_flag_clear(tp, POLL_SERDES);
16995
16996 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16997 tg3_flag_set(tp, POLL_CPMU_LINK);
16998
16999 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
17000 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
17001 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
17002 tg3_flag(tp, PCIX_MODE)) {
17003 tp->rx_offset = NET_SKB_PAD;
17004 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
17005 tp->rx_copy_thresh = ~(u16)0;
17006 #endif
17007 }
17008
17009 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17010 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17011 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17012
17013 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17014
17015 /* Increment the rx prod index on the rx std ring by at most
17016 * 8 for these chips to workaround hw errata.
17017 */
17018 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17019 tg3_asic_rev(tp) == ASIC_REV_5752 ||
17020 tg3_asic_rev(tp) == ASIC_REV_5755)
17021 tp->rx_std_max_post = 8;
17022
17023 if (tg3_flag(tp, ASPM_WORKAROUND))
17024 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17025 PCIE_PWR_MGMT_L1_THRESH_MSK;
17026
17027 return err;
17028 }
17029
tg3_get_device_address(struct tg3 * tp,u8 * addr)17030 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17031 {
17032 u32 hi, lo, mac_offset;
17033 int addr_ok = 0;
17034 int err;
17035
17036 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17037 return 0;
17038
17039 if (tg3_flag(tp, IS_SSB_CORE)) {
17040 err = ssb_gige_get_macaddr(tp->pdev, addr);
17041 if (!err && is_valid_ether_addr(addr))
17042 return 0;
17043 }
17044
17045 mac_offset = 0x7c;
17046 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17047 tg3_flag(tp, 5780_CLASS)) {
17048 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17049 mac_offset = 0xcc;
17050 if (tg3_nvram_lock(tp))
17051 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17052 else
17053 tg3_nvram_unlock(tp);
17054 } else if (tg3_flag(tp, 5717_PLUS)) {
17055 if (tp->pci_fn & 1)
17056 mac_offset = 0xcc;
17057 if (tp->pci_fn > 1)
17058 mac_offset += 0x18c;
17059 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17060 mac_offset = 0x10;
17061
17062 /* First try to get it from MAC address mailbox. */
17063 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17064 if ((hi >> 16) == 0x484b) {
17065 addr[0] = (hi >> 8) & 0xff;
17066 addr[1] = (hi >> 0) & 0xff;
17067
17068 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17069 addr[2] = (lo >> 24) & 0xff;
17070 addr[3] = (lo >> 16) & 0xff;
17071 addr[4] = (lo >> 8) & 0xff;
17072 addr[5] = (lo >> 0) & 0xff;
17073
17074 /* Some old bootcode may report a 0 MAC address in SRAM */
17075 addr_ok = is_valid_ether_addr(addr);
17076 }
17077 if (!addr_ok) {
17078 /* Next, try NVRAM. */
17079 if (!tg3_flag(tp, NO_NVRAM) &&
17080 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17081 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17082 memcpy(&addr[0], ((char *)&hi) + 2, 2);
17083 memcpy(&addr[2], (char *)&lo, sizeof(lo));
17084 }
17085 /* Finally just fetch it out of the MAC control regs. */
17086 else {
17087 hi = tr32(MAC_ADDR_0_HIGH);
17088 lo = tr32(MAC_ADDR_0_LOW);
17089
17090 addr[5] = lo & 0xff;
17091 addr[4] = (lo >> 8) & 0xff;
17092 addr[3] = (lo >> 16) & 0xff;
17093 addr[2] = (lo >> 24) & 0xff;
17094 addr[1] = hi & 0xff;
17095 addr[0] = (hi >> 8) & 0xff;
17096 }
17097 }
17098
17099 if (!is_valid_ether_addr(addr))
17100 return -EINVAL;
17101 return 0;
17102 }
17103
17104 #define BOUNDARY_SINGLE_CACHELINE 1
17105 #define BOUNDARY_MULTI_CACHELINE 2
17106
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17107 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17108 {
17109 int cacheline_size;
17110 u8 byte;
17111 int goal;
17112
17113 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17114 if (byte == 0)
17115 cacheline_size = 1024;
17116 else
17117 cacheline_size = (int) byte * 4;
17118
17119 /* On 5703 and later chips, the boundary bits have no
17120 * effect.
17121 */
17122 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17123 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17124 !tg3_flag(tp, PCI_EXPRESS))
17125 goto out;
17126
17127 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17128 goal = BOUNDARY_MULTI_CACHELINE;
17129 #else
17130 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17131 goal = BOUNDARY_SINGLE_CACHELINE;
17132 #else
17133 goal = 0;
17134 #endif
17135 #endif
17136
17137 if (tg3_flag(tp, 57765_PLUS)) {
17138 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17139 goto out;
17140 }
17141
17142 if (!goal)
17143 goto out;
17144
17145 /* PCI controllers on most RISC systems tend to disconnect
17146 * when a device tries to burst across a cache-line boundary.
17147 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17148 *
17149 * Unfortunately, for PCI-E there are only limited
17150 * write-side controls for this, and thus for reads
17151 * we will still get the disconnects. We'll also waste
17152 * these PCI cycles for both read and write for chips
17153 * other than 5700 and 5701 which do not implement the
17154 * boundary bits.
17155 */
17156 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17157 switch (cacheline_size) {
17158 case 16:
17159 case 32:
17160 case 64:
17161 case 128:
17162 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17163 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17164 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17165 } else {
17166 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17167 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17168 }
17169 break;
17170
17171 case 256:
17172 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17173 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17174 break;
17175
17176 default:
17177 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17178 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17179 break;
17180 }
17181 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17182 switch (cacheline_size) {
17183 case 16:
17184 case 32:
17185 case 64:
17186 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17187 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17188 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17189 break;
17190 }
17191 fallthrough;
17192 case 128:
17193 default:
17194 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17195 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17196 break;
17197 }
17198 } else {
17199 switch (cacheline_size) {
17200 case 16:
17201 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17202 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17203 DMA_RWCTRL_WRITE_BNDRY_16);
17204 break;
17205 }
17206 fallthrough;
17207 case 32:
17208 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17209 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17210 DMA_RWCTRL_WRITE_BNDRY_32);
17211 break;
17212 }
17213 fallthrough;
17214 case 64:
17215 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17216 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17217 DMA_RWCTRL_WRITE_BNDRY_64);
17218 break;
17219 }
17220 fallthrough;
17221 case 128:
17222 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17223 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17224 DMA_RWCTRL_WRITE_BNDRY_128);
17225 break;
17226 }
17227 fallthrough;
17228 case 256:
17229 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17230 DMA_RWCTRL_WRITE_BNDRY_256);
17231 break;
17232 case 512:
17233 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17234 DMA_RWCTRL_WRITE_BNDRY_512);
17235 break;
17236 case 1024:
17237 default:
17238 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17239 DMA_RWCTRL_WRITE_BNDRY_1024);
17240 break;
17241 }
17242 }
17243
17244 out:
17245 return val;
17246 }
17247
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17248 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17249 int size, bool to_device)
17250 {
17251 struct tg3_internal_buffer_desc test_desc;
17252 u32 sram_dma_descs;
17253 int i, ret;
17254
17255 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17256
17257 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17258 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17259 tw32(RDMAC_STATUS, 0);
17260 tw32(WDMAC_STATUS, 0);
17261
17262 tw32(BUFMGR_MODE, 0);
17263 tw32(FTQ_RESET, 0);
17264
17265 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17266 test_desc.addr_lo = buf_dma & 0xffffffff;
17267 test_desc.nic_mbuf = 0x00002100;
17268 test_desc.len = size;
17269
17270 /*
17271 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17272 * the *second* time the tg3 driver was getting loaded after an
17273 * initial scan.
17274 *
17275 * Broadcom tells me:
17276 * ...the DMA engine is connected to the GRC block and a DMA
17277 * reset may affect the GRC block in some unpredictable way...
17278 * The behavior of resets to individual blocks has not been tested.
17279 *
17280 * Broadcom noted the GRC reset will also reset all sub-components.
17281 */
17282 if (to_device) {
17283 test_desc.cqid_sqid = (13 << 8) | 2;
17284
17285 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17286 udelay(40);
17287 } else {
17288 test_desc.cqid_sqid = (16 << 8) | 7;
17289
17290 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17291 udelay(40);
17292 }
17293 test_desc.flags = 0x00000005;
17294
17295 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17296 u32 val;
17297
17298 val = *(((u32 *)&test_desc) + i);
17299 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17300 sram_dma_descs + (i * sizeof(u32)));
17301 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17302 }
17303 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17304
17305 if (to_device)
17306 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17307 else
17308 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17309
17310 ret = -ENODEV;
17311 for (i = 0; i < 40; i++) {
17312 u32 val;
17313
17314 if (to_device)
17315 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17316 else
17317 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17318 if ((val & 0xffff) == sram_dma_descs) {
17319 ret = 0;
17320 break;
17321 }
17322
17323 udelay(100);
17324 }
17325
17326 return ret;
17327 }
17328
17329 #define TEST_BUFFER_SIZE 0x2000
17330
17331 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17332 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17333 { },
17334 };
17335
tg3_test_dma(struct tg3 * tp)17336 static int tg3_test_dma(struct tg3 *tp)
17337 {
17338 dma_addr_t buf_dma;
17339 u32 *buf, saved_dma_rwctrl;
17340 int ret = 0;
17341
17342 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17343 &buf_dma, GFP_KERNEL);
17344 if (!buf) {
17345 ret = -ENOMEM;
17346 goto out_nofree;
17347 }
17348
17349 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17350 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17351
17352 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17353
17354 if (tg3_flag(tp, 57765_PLUS))
17355 goto out;
17356
17357 if (tg3_flag(tp, PCI_EXPRESS)) {
17358 /* DMA read watermark not used on PCIE */
17359 tp->dma_rwctrl |= 0x00180000;
17360 } else if (!tg3_flag(tp, PCIX_MODE)) {
17361 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17362 tg3_asic_rev(tp) == ASIC_REV_5750)
17363 tp->dma_rwctrl |= 0x003f0000;
17364 else
17365 tp->dma_rwctrl |= 0x003f000f;
17366 } else {
17367 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17368 tg3_asic_rev(tp) == ASIC_REV_5704) {
17369 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17370 u32 read_water = 0x7;
17371
17372 /* If the 5704 is behind the EPB bridge, we can
17373 * do the less restrictive ONE_DMA workaround for
17374 * better performance.
17375 */
17376 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17377 tg3_asic_rev(tp) == ASIC_REV_5704)
17378 tp->dma_rwctrl |= 0x8000;
17379 else if (ccval == 0x6 || ccval == 0x7)
17380 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17381
17382 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17383 read_water = 4;
17384 /* Set bit 23 to enable PCIX hw bug fix */
17385 tp->dma_rwctrl |=
17386 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17387 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17388 (1 << 23);
17389 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17390 /* 5780 always in PCIX mode */
17391 tp->dma_rwctrl |= 0x00144000;
17392 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17393 /* 5714 always in PCIX mode */
17394 tp->dma_rwctrl |= 0x00148000;
17395 } else {
17396 tp->dma_rwctrl |= 0x001b000f;
17397 }
17398 }
17399 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17400 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17401
17402 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17403 tg3_asic_rev(tp) == ASIC_REV_5704)
17404 tp->dma_rwctrl &= 0xfffffff0;
17405
17406 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17407 tg3_asic_rev(tp) == ASIC_REV_5701) {
17408 /* Remove this if it causes problems for some boards. */
17409 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17410
17411 /* On 5700/5701 chips, we need to set this bit.
17412 * Otherwise the chip will issue cacheline transactions
17413 * to streamable DMA memory with not all the byte
17414 * enables turned on. This is an error on several
17415 * RISC PCI controllers, in particular sparc64.
17416 *
17417 * On 5703/5704 chips, this bit has been reassigned
17418 * a different meaning. In particular, it is used
17419 * on those chips to enable a PCI-X workaround.
17420 */
17421 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17422 }
17423
17424 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17425
17426
17427 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17428 tg3_asic_rev(tp) != ASIC_REV_5701)
17429 goto out;
17430
17431 /* It is best to perform DMA test with maximum write burst size
17432 * to expose the 5700/5701 write DMA bug.
17433 */
17434 saved_dma_rwctrl = tp->dma_rwctrl;
17435 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17436 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17437
17438 while (1) {
17439 u32 *p = buf, i;
17440
17441 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17442 p[i] = i;
17443
17444 /* Send the buffer to the chip. */
17445 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17446 if (ret) {
17447 dev_err(&tp->pdev->dev,
17448 "%s: Buffer write failed. err = %d\n",
17449 __func__, ret);
17450 break;
17451 }
17452
17453 /* Now read it back. */
17454 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17455 if (ret) {
17456 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17457 "err = %d\n", __func__, ret);
17458 break;
17459 }
17460
17461 /* Verify it. */
17462 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17463 if (p[i] == i)
17464 continue;
17465
17466 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17467 DMA_RWCTRL_WRITE_BNDRY_16) {
17468 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17469 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17470 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17471 break;
17472 } else {
17473 dev_err(&tp->pdev->dev,
17474 "%s: Buffer corrupted on read back! "
17475 "(%d != %d)\n", __func__, p[i], i);
17476 ret = -ENODEV;
17477 goto out;
17478 }
17479 }
17480
17481 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17482 /* Success. */
17483 ret = 0;
17484 break;
17485 }
17486 }
17487 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17488 DMA_RWCTRL_WRITE_BNDRY_16) {
17489 /* DMA test passed without adjusting DMA boundary,
17490 * now look for chipsets that are known to expose the
17491 * DMA bug without failing the test.
17492 */
17493 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17494 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17495 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17496 } else {
17497 /* Safe to use the calculated DMA boundary. */
17498 tp->dma_rwctrl = saved_dma_rwctrl;
17499 }
17500
17501 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17502 }
17503
17504 out:
17505 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17506 out_nofree:
17507 return ret;
17508 }
17509
tg3_init_bufmgr_config(struct tg3 * tp)17510 static void tg3_init_bufmgr_config(struct tg3 *tp)
17511 {
17512 if (tg3_flag(tp, 57765_PLUS)) {
17513 tp->bufmgr_config.mbuf_read_dma_low_water =
17514 DEFAULT_MB_RDMA_LOW_WATER_5705;
17515 tp->bufmgr_config.mbuf_mac_rx_low_water =
17516 DEFAULT_MB_MACRX_LOW_WATER_57765;
17517 tp->bufmgr_config.mbuf_high_water =
17518 DEFAULT_MB_HIGH_WATER_57765;
17519
17520 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17521 DEFAULT_MB_RDMA_LOW_WATER_5705;
17522 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17523 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17524 tp->bufmgr_config.mbuf_high_water_jumbo =
17525 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17526 } else if (tg3_flag(tp, 5705_PLUS)) {
17527 tp->bufmgr_config.mbuf_read_dma_low_water =
17528 DEFAULT_MB_RDMA_LOW_WATER_5705;
17529 tp->bufmgr_config.mbuf_mac_rx_low_water =
17530 DEFAULT_MB_MACRX_LOW_WATER_5705;
17531 tp->bufmgr_config.mbuf_high_water =
17532 DEFAULT_MB_HIGH_WATER_5705;
17533 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17534 tp->bufmgr_config.mbuf_mac_rx_low_water =
17535 DEFAULT_MB_MACRX_LOW_WATER_5906;
17536 tp->bufmgr_config.mbuf_high_water =
17537 DEFAULT_MB_HIGH_WATER_5906;
17538 }
17539
17540 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17541 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17542 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17543 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17544 tp->bufmgr_config.mbuf_high_water_jumbo =
17545 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17546 } else {
17547 tp->bufmgr_config.mbuf_read_dma_low_water =
17548 DEFAULT_MB_RDMA_LOW_WATER;
17549 tp->bufmgr_config.mbuf_mac_rx_low_water =
17550 DEFAULT_MB_MACRX_LOW_WATER;
17551 tp->bufmgr_config.mbuf_high_water =
17552 DEFAULT_MB_HIGH_WATER;
17553
17554 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17555 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17556 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17557 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17558 tp->bufmgr_config.mbuf_high_water_jumbo =
17559 DEFAULT_MB_HIGH_WATER_JUMBO;
17560 }
17561
17562 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17563 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17564 }
17565
tg3_phy_string(struct tg3 * tp)17566 static char *tg3_phy_string(struct tg3 *tp)
17567 {
17568 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17569 case TG3_PHY_ID_BCM5400: return "5400";
17570 case TG3_PHY_ID_BCM5401: return "5401";
17571 case TG3_PHY_ID_BCM5411: return "5411";
17572 case TG3_PHY_ID_BCM5701: return "5701";
17573 case TG3_PHY_ID_BCM5703: return "5703";
17574 case TG3_PHY_ID_BCM5704: return "5704";
17575 case TG3_PHY_ID_BCM5705: return "5705";
17576 case TG3_PHY_ID_BCM5750: return "5750";
17577 case TG3_PHY_ID_BCM5752: return "5752";
17578 case TG3_PHY_ID_BCM5714: return "5714";
17579 case TG3_PHY_ID_BCM5780: return "5780";
17580 case TG3_PHY_ID_BCM5755: return "5755";
17581 case TG3_PHY_ID_BCM5787: return "5787";
17582 case TG3_PHY_ID_BCM5784: return "5784";
17583 case TG3_PHY_ID_BCM5756: return "5722/5756";
17584 case TG3_PHY_ID_BCM5906: return "5906";
17585 case TG3_PHY_ID_BCM5761: return "5761";
17586 case TG3_PHY_ID_BCM5718C: return "5718C";
17587 case TG3_PHY_ID_BCM5718S: return "5718S";
17588 case TG3_PHY_ID_BCM57765: return "57765";
17589 case TG3_PHY_ID_BCM5719C: return "5719C";
17590 case TG3_PHY_ID_BCM5720C: return "5720C";
17591 case TG3_PHY_ID_BCM5762: return "5762C";
17592 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17593 case 0: return "serdes";
17594 default: return "unknown";
17595 }
17596 }
17597
tg3_bus_string(struct tg3 * tp,char * str)17598 static char *tg3_bus_string(struct tg3 *tp, char *str)
17599 {
17600 if (tg3_flag(tp, PCI_EXPRESS)) {
17601 strcpy(str, "PCI Express");
17602 return str;
17603 } else if (tg3_flag(tp, PCIX_MODE)) {
17604 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17605
17606 strcpy(str, "PCIX:");
17607
17608 if ((clock_ctrl == 7) ||
17609 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17610 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17611 strcat(str, "133MHz");
17612 else if (clock_ctrl == 0)
17613 strcat(str, "33MHz");
17614 else if (clock_ctrl == 2)
17615 strcat(str, "50MHz");
17616 else if (clock_ctrl == 4)
17617 strcat(str, "66MHz");
17618 else if (clock_ctrl == 6)
17619 strcat(str, "100MHz");
17620 } else {
17621 strcpy(str, "PCI:");
17622 if (tg3_flag(tp, PCI_HIGH_SPEED))
17623 strcat(str, "66MHz");
17624 else
17625 strcat(str, "33MHz");
17626 }
17627 if (tg3_flag(tp, PCI_32BIT))
17628 strcat(str, ":32-bit");
17629 else
17630 strcat(str, ":64-bit");
17631 return str;
17632 }
17633
tg3_init_coal(struct tg3 * tp)17634 static void tg3_init_coal(struct tg3 *tp)
17635 {
17636 struct ethtool_coalesce *ec = &tp->coal;
17637
17638 memset(ec, 0, sizeof(*ec));
17639 ec->cmd = ETHTOOL_GCOALESCE;
17640 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17641 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17642 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17643 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17644 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17645 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17646 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17647 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17648 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17649
17650 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17651 HOSTCC_MODE_CLRTICK_TXBD)) {
17652 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17653 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17654 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17655 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17656 }
17657
17658 if (tg3_flag(tp, 5705_PLUS)) {
17659 ec->rx_coalesce_usecs_irq = 0;
17660 ec->tx_coalesce_usecs_irq = 0;
17661 ec->stats_block_coalesce_usecs = 0;
17662 }
17663 }
17664
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17665 static int tg3_init_one(struct pci_dev *pdev,
17666 const struct pci_device_id *ent)
17667 {
17668 struct net_device *dev;
17669 struct tg3 *tp;
17670 int i, err;
17671 u32 sndmbx, rcvmbx, intmbx;
17672 char str[40];
17673 u64 dma_mask, persist_dma_mask;
17674 netdev_features_t features = 0;
17675 u8 addr[ETH_ALEN] __aligned(2);
17676
17677 err = pci_enable_device(pdev);
17678 if (err) {
17679 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17680 return err;
17681 }
17682
17683 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17684 if (err) {
17685 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17686 goto err_out_disable_pdev;
17687 }
17688
17689 pci_set_master(pdev);
17690
17691 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17692 if (!dev) {
17693 err = -ENOMEM;
17694 goto err_out_free_res;
17695 }
17696
17697 SET_NETDEV_DEV(dev, &pdev->dev);
17698
17699 tp = netdev_priv(dev);
17700 tp->pdev = pdev;
17701 tp->dev = dev;
17702 tp->rx_mode = TG3_DEF_RX_MODE;
17703 tp->tx_mode = TG3_DEF_TX_MODE;
17704 tp->irq_sync = 1;
17705 tp->pcierr_recovery = false;
17706
17707 if (tg3_debug > 0)
17708 tp->msg_enable = tg3_debug;
17709 else
17710 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17711
17712 if (pdev_is_ssb_gige_core(pdev)) {
17713 tg3_flag_set(tp, IS_SSB_CORE);
17714 if (ssb_gige_must_flush_posted_writes(pdev))
17715 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17716 if (ssb_gige_one_dma_at_once(pdev))
17717 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17718 if (ssb_gige_have_roboswitch(pdev)) {
17719 tg3_flag_set(tp, USE_PHYLIB);
17720 tg3_flag_set(tp, ROBOSWITCH);
17721 }
17722 if (ssb_gige_is_rgmii(pdev))
17723 tg3_flag_set(tp, RGMII_MODE);
17724 }
17725
17726 /* The word/byte swap controls here control register access byte
17727 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17728 * setting below.
17729 */
17730 tp->misc_host_ctrl =
17731 MISC_HOST_CTRL_MASK_PCI_INT |
17732 MISC_HOST_CTRL_WORD_SWAP |
17733 MISC_HOST_CTRL_INDIR_ACCESS |
17734 MISC_HOST_CTRL_PCISTATE_RW;
17735
17736 /* The NONFRM (non-frame) byte/word swap controls take effect
17737 * on descriptor entries, anything which isn't packet data.
17738 *
17739 * The StrongARM chips on the board (one for tx, one for rx)
17740 * are running in big-endian mode.
17741 */
17742 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17743 GRC_MODE_WSWAP_NONFRM_DATA);
17744 #ifdef __BIG_ENDIAN
17745 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17746 #endif
17747 spin_lock_init(&tp->lock);
17748 spin_lock_init(&tp->indirect_lock);
17749 INIT_WORK(&tp->reset_task, tg3_reset_task);
17750
17751 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17752 if (!tp->regs) {
17753 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17754 err = -ENOMEM;
17755 goto err_out_free_dev;
17756 }
17757
17758 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17759 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17760 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17762 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17763 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17764 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17765 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17766 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17767 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17768 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17769 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17770 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17771 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17772 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17773 tg3_flag_set(tp, ENABLE_APE);
17774 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17775 if (!tp->aperegs) {
17776 dev_err(&pdev->dev,
17777 "Cannot map APE registers, aborting\n");
17778 err = -ENOMEM;
17779 goto err_out_iounmap;
17780 }
17781 }
17782
17783 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17784 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17785
17786 dev->ethtool_ops = &tg3_ethtool_ops;
17787 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17788 dev->netdev_ops = &tg3_netdev_ops;
17789 dev->irq = pdev->irq;
17790
17791 err = tg3_get_invariants(tp, ent);
17792 if (err) {
17793 dev_err(&pdev->dev,
17794 "Problem fetching invariants of chip, aborting\n");
17795 goto err_out_apeunmap;
17796 }
17797
17798 /* The EPB bridge inside 5714, 5715, and 5780 and any
17799 * device behind the EPB cannot support DMA addresses > 40-bit.
17800 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17801 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17802 * do DMA address check in __tg3_start_xmit().
17803 */
17804 if (tg3_flag(tp, IS_5788))
17805 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17806 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17807 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17808 #ifdef CONFIG_HIGHMEM
17809 dma_mask = DMA_BIT_MASK(64);
17810 #endif
17811 } else
17812 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17813
17814 /* Configure DMA attributes. */
17815 if (dma_mask > DMA_BIT_MASK(32)) {
17816 err = dma_set_mask(&pdev->dev, dma_mask);
17817 if (!err) {
17818 features |= NETIF_F_HIGHDMA;
17819 err = dma_set_coherent_mask(&pdev->dev,
17820 persist_dma_mask);
17821 if (err < 0) {
17822 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17823 "DMA for consistent allocations\n");
17824 goto err_out_apeunmap;
17825 }
17826 }
17827 }
17828 if (err || dma_mask == DMA_BIT_MASK(32)) {
17829 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17830 if (err) {
17831 dev_err(&pdev->dev,
17832 "No usable DMA configuration, aborting\n");
17833 goto err_out_apeunmap;
17834 }
17835 }
17836
17837 tg3_init_bufmgr_config(tp);
17838
17839 /* 5700 B0 chips do not support checksumming correctly due
17840 * to hardware bugs.
17841 */
17842 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17843 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17844
17845 if (tg3_flag(tp, 5755_PLUS))
17846 features |= NETIF_F_IPV6_CSUM;
17847 }
17848
17849 /* TSO is on by default on chips that support hardware TSO.
17850 * Firmware TSO on older chips gives lower performance, so it
17851 * is off by default, but can be enabled using ethtool.
17852 */
17853 if ((tg3_flag(tp, HW_TSO_1) ||
17854 tg3_flag(tp, HW_TSO_2) ||
17855 tg3_flag(tp, HW_TSO_3)) &&
17856 (features & NETIF_F_IP_CSUM))
17857 features |= NETIF_F_TSO;
17858 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17859 if (features & NETIF_F_IPV6_CSUM)
17860 features |= NETIF_F_TSO6;
17861 if (tg3_flag(tp, HW_TSO_3) ||
17862 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17863 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17864 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17865 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17866 tg3_asic_rev(tp) == ASIC_REV_57780)
17867 features |= NETIF_F_TSO_ECN;
17868 }
17869
17870 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17871 NETIF_F_HW_VLAN_CTAG_RX;
17872 dev->vlan_features |= features;
17873
17874 /*
17875 * Add loopback capability only for a subset of devices that support
17876 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17877 * loopback for the remaining devices.
17878 */
17879 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17880 !tg3_flag(tp, CPMU_PRESENT))
17881 /* Add the loopback capability */
17882 features |= NETIF_F_LOOPBACK;
17883
17884 dev->hw_features |= features;
17885 dev->priv_flags |= IFF_UNICAST_FLT;
17886
17887 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17888 dev->min_mtu = TG3_MIN_MTU;
17889 dev->max_mtu = TG3_MAX_MTU(tp);
17890
17891 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17892 !tg3_flag(tp, TSO_CAPABLE) &&
17893 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17894 tg3_flag_set(tp, MAX_RXPEND_64);
17895 tp->rx_pending = 63;
17896 }
17897
17898 err = tg3_get_device_address(tp, addr);
17899 if (err) {
17900 dev_err(&pdev->dev,
17901 "Could not obtain valid ethernet address, aborting\n");
17902 goto err_out_apeunmap;
17903 }
17904 eth_hw_addr_set(dev, addr);
17905
17906 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17907 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17908 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17909 for (i = 0; i < tp->irq_max; i++) {
17910 struct tg3_napi *tnapi = &tp->napi[i];
17911
17912 tnapi->tp = tp;
17913 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17914
17915 tnapi->int_mbox = intmbx;
17916 intmbx += 0x8;
17917
17918 tnapi->consmbox = rcvmbx;
17919 tnapi->prodmbox = sndmbx;
17920
17921 if (i)
17922 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17923 else
17924 tnapi->coal_now = HOSTCC_MODE_NOW;
17925
17926 if (!tg3_flag(tp, SUPPORT_MSIX))
17927 break;
17928
17929 /*
17930 * If we support MSIX, we'll be using RSS. If we're using
17931 * RSS, the first vector only handles link interrupts and the
17932 * remaining vectors handle rx and tx interrupts. Reuse the
17933 * mailbox values for the next iteration. The values we setup
17934 * above are still useful for the single vectored mode.
17935 */
17936 if (!i)
17937 continue;
17938
17939 rcvmbx += 0x8;
17940
17941 if (sndmbx & 0x4)
17942 sndmbx -= 0x4;
17943 else
17944 sndmbx += 0xc;
17945 }
17946
17947 /*
17948 * Reset chip in case UNDI or EFI driver did not shutdown
17949 * DMA self test will enable WDMAC and we'll see (spurious)
17950 * pending DMA on the PCI bus at that point.
17951 */
17952 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17953 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17954 tg3_full_lock(tp, 0);
17955 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17956 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17957 tg3_full_unlock(tp);
17958 }
17959
17960 err = tg3_test_dma(tp);
17961 if (err) {
17962 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17963 goto err_out_apeunmap;
17964 }
17965
17966 tg3_init_coal(tp);
17967
17968 pci_set_drvdata(pdev, dev);
17969
17970 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17971 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17972 tg3_asic_rev(tp) == ASIC_REV_5762)
17973 tg3_flag_set(tp, PTP_CAPABLE);
17974
17975 tg3_timer_init(tp);
17976
17977 tg3_carrier_off(tp);
17978
17979 err = register_netdev(dev);
17980 if (err) {
17981 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17982 goto err_out_apeunmap;
17983 }
17984
17985 if (tg3_flag(tp, PTP_CAPABLE)) {
17986 tg3_ptp_init(tp);
17987 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17988 &tp->pdev->dev);
17989 if (IS_ERR(tp->ptp_clock))
17990 tp->ptp_clock = NULL;
17991 }
17992
17993 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17994 tp->board_part_number,
17995 tg3_chip_rev_id(tp),
17996 tg3_bus_string(tp, str),
17997 dev->dev_addr);
17998
17999 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
18000 char *ethtype;
18001
18002 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18003 ethtype = "10/100Base-TX";
18004 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18005 ethtype = "1000Base-SX";
18006 else
18007 ethtype = "10/100/1000Base-T";
18008
18009 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18010 "(WireSpeed[%d], EEE[%d])\n",
18011 tg3_phy_string(tp), ethtype,
18012 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18013 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18014 }
18015
18016 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18017 (dev->features & NETIF_F_RXCSUM) != 0,
18018 tg3_flag(tp, USE_LINKCHG_REG) != 0,
18019 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18020 tg3_flag(tp, ENABLE_ASF) != 0,
18021 tg3_flag(tp, TSO_CAPABLE) != 0);
18022 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18023 tp->dma_rwctrl,
18024 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18025 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18026
18027 pci_save_state(pdev);
18028
18029 return 0;
18030
18031 err_out_apeunmap:
18032 if (tp->aperegs) {
18033 iounmap(tp->aperegs);
18034 tp->aperegs = NULL;
18035 }
18036
18037 err_out_iounmap:
18038 if (tp->regs) {
18039 iounmap(tp->regs);
18040 tp->regs = NULL;
18041 }
18042
18043 err_out_free_dev:
18044 free_netdev(dev);
18045
18046 err_out_free_res:
18047 pci_release_regions(pdev);
18048
18049 err_out_disable_pdev:
18050 if (pci_is_enabled(pdev))
18051 pci_disable_device(pdev);
18052 return err;
18053 }
18054
tg3_remove_one(struct pci_dev * pdev)18055 static void tg3_remove_one(struct pci_dev *pdev)
18056 {
18057 struct net_device *dev = pci_get_drvdata(pdev);
18058
18059 if (dev) {
18060 struct tg3 *tp = netdev_priv(dev);
18061
18062 tg3_ptp_fini(tp);
18063
18064 release_firmware(tp->fw);
18065
18066 tg3_reset_task_cancel(tp);
18067
18068 if (tg3_flag(tp, USE_PHYLIB)) {
18069 tg3_phy_fini(tp);
18070 tg3_mdio_fini(tp);
18071 }
18072
18073 unregister_netdev(dev);
18074 if (tp->aperegs) {
18075 iounmap(tp->aperegs);
18076 tp->aperegs = NULL;
18077 }
18078 if (tp->regs) {
18079 iounmap(tp->regs);
18080 tp->regs = NULL;
18081 }
18082 free_netdev(dev);
18083 pci_release_regions(pdev);
18084 pci_disable_device(pdev);
18085 }
18086 }
18087
18088 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18089 static int tg3_suspend(struct device *device)
18090 {
18091 struct net_device *dev = dev_get_drvdata(device);
18092 struct tg3 *tp = netdev_priv(dev);
18093 int err = 0;
18094
18095 rtnl_lock();
18096
18097 if (!netif_running(dev))
18098 goto unlock;
18099
18100 tg3_reset_task_cancel(tp);
18101 tg3_phy_stop(tp);
18102 tg3_netif_stop(tp);
18103
18104 tg3_timer_stop(tp);
18105
18106 tg3_full_lock(tp, 1);
18107 tg3_disable_ints(tp);
18108 tg3_full_unlock(tp);
18109
18110 netif_device_detach(dev);
18111
18112 tg3_full_lock(tp, 0);
18113 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18114 tg3_flag_clear(tp, INIT_COMPLETE);
18115 tg3_full_unlock(tp);
18116
18117 err = tg3_power_down_prepare(tp);
18118 if (err) {
18119 int err2;
18120
18121 tg3_full_lock(tp, 0);
18122
18123 tg3_flag_set(tp, INIT_COMPLETE);
18124 err2 = tg3_restart_hw(tp, true);
18125 if (err2)
18126 goto out;
18127
18128 tg3_timer_start(tp);
18129
18130 netif_device_attach(dev);
18131 tg3_netif_start(tp);
18132
18133 out:
18134 tg3_full_unlock(tp);
18135
18136 if (!err2)
18137 tg3_phy_start(tp);
18138 }
18139
18140 unlock:
18141 rtnl_unlock();
18142 return err;
18143 }
18144
tg3_resume(struct device * device)18145 static int tg3_resume(struct device *device)
18146 {
18147 struct net_device *dev = dev_get_drvdata(device);
18148 struct tg3 *tp = netdev_priv(dev);
18149 int err = 0;
18150
18151 rtnl_lock();
18152
18153 if (!netif_running(dev))
18154 goto unlock;
18155
18156 netif_device_attach(dev);
18157
18158 tg3_full_lock(tp, 0);
18159
18160 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18161
18162 tg3_flag_set(tp, INIT_COMPLETE);
18163 err = tg3_restart_hw(tp,
18164 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18165 if (err)
18166 goto out;
18167
18168 tg3_timer_start(tp);
18169
18170 tg3_netif_start(tp);
18171
18172 out:
18173 tg3_full_unlock(tp);
18174
18175 if (!err)
18176 tg3_phy_start(tp);
18177
18178 unlock:
18179 rtnl_unlock();
18180 return err;
18181 }
18182 #endif /* CONFIG_PM_SLEEP */
18183
18184 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18185
tg3_shutdown(struct pci_dev * pdev)18186 static void tg3_shutdown(struct pci_dev *pdev)
18187 {
18188 struct net_device *dev = pci_get_drvdata(pdev);
18189 struct tg3 *tp = netdev_priv(dev);
18190
18191 tg3_reset_task_cancel(tp);
18192
18193 rtnl_lock();
18194
18195 netif_device_detach(dev);
18196
18197 if (netif_running(dev))
18198 dev_close(dev);
18199
18200 if (system_state == SYSTEM_POWER_OFF)
18201 tg3_power_down(tp);
18202
18203 rtnl_unlock();
18204
18205 pci_disable_device(pdev);
18206 }
18207
18208 /**
18209 * tg3_io_error_detected - called when PCI error is detected
18210 * @pdev: Pointer to PCI device
18211 * @state: The current pci connection state
18212 *
18213 * This function is called after a PCI bus error affecting
18214 * this device has been detected.
18215 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18216 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18217 pci_channel_state_t state)
18218 {
18219 struct net_device *netdev = pci_get_drvdata(pdev);
18220 struct tg3 *tp = netdev_priv(netdev);
18221 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18222
18223 netdev_info(netdev, "PCI I/O error detected\n");
18224
18225 /* Want to make sure that the reset task doesn't run */
18226 tg3_reset_task_cancel(tp);
18227
18228 rtnl_lock();
18229
18230 /* Could be second call or maybe we don't have netdev yet */
18231 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18232 goto done;
18233
18234 /* We needn't recover from permanent error */
18235 if (state == pci_channel_io_frozen)
18236 tp->pcierr_recovery = true;
18237
18238 tg3_phy_stop(tp);
18239
18240 tg3_netif_stop(tp);
18241
18242 tg3_timer_stop(tp);
18243
18244 netif_device_detach(netdev);
18245
18246 /* Clean up software state, even if MMIO is blocked */
18247 tg3_full_lock(tp, 0);
18248 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18249 tg3_full_unlock(tp);
18250
18251 done:
18252 if (state == pci_channel_io_perm_failure) {
18253 if (netdev) {
18254 tg3_napi_enable(tp);
18255 dev_close(netdev);
18256 }
18257 err = PCI_ERS_RESULT_DISCONNECT;
18258 } else {
18259 pci_disable_device(pdev);
18260 }
18261
18262 rtnl_unlock();
18263
18264 return err;
18265 }
18266
18267 /**
18268 * tg3_io_slot_reset - called after the pci bus has been reset.
18269 * @pdev: Pointer to PCI device
18270 *
18271 * Restart the card from scratch, as if from a cold-boot.
18272 * At this point, the card has exprienced a hard reset,
18273 * followed by fixups by BIOS, and has its config space
18274 * set up identically to what it was at cold boot.
18275 */
tg3_io_slot_reset(struct pci_dev * pdev)18276 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18277 {
18278 struct net_device *netdev = pci_get_drvdata(pdev);
18279 struct tg3 *tp = netdev_priv(netdev);
18280 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18281 int err;
18282
18283 rtnl_lock();
18284
18285 if (pci_enable_device(pdev)) {
18286 dev_err(&pdev->dev,
18287 "Cannot re-enable PCI device after reset.\n");
18288 goto done;
18289 }
18290
18291 pci_set_master(pdev);
18292 pci_restore_state(pdev);
18293 pci_save_state(pdev);
18294
18295 if (!netdev || !netif_running(netdev)) {
18296 rc = PCI_ERS_RESULT_RECOVERED;
18297 goto done;
18298 }
18299
18300 err = tg3_power_up(tp);
18301 if (err)
18302 goto done;
18303
18304 rc = PCI_ERS_RESULT_RECOVERED;
18305
18306 done:
18307 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18308 tg3_napi_enable(tp);
18309 dev_close(netdev);
18310 }
18311 rtnl_unlock();
18312
18313 return rc;
18314 }
18315
18316 /**
18317 * tg3_io_resume - called when traffic can start flowing again.
18318 * @pdev: Pointer to PCI device
18319 *
18320 * This callback is called when the error recovery driver tells
18321 * us that its OK to resume normal operation.
18322 */
tg3_io_resume(struct pci_dev * pdev)18323 static void tg3_io_resume(struct pci_dev *pdev)
18324 {
18325 struct net_device *netdev = pci_get_drvdata(pdev);
18326 struct tg3 *tp = netdev_priv(netdev);
18327 int err;
18328
18329 rtnl_lock();
18330
18331 if (!netdev || !netif_running(netdev))
18332 goto done;
18333
18334 tg3_full_lock(tp, 0);
18335 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18336 tg3_flag_set(tp, INIT_COMPLETE);
18337 err = tg3_restart_hw(tp, true);
18338 if (err) {
18339 tg3_full_unlock(tp);
18340 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18341 goto done;
18342 }
18343
18344 netif_device_attach(netdev);
18345
18346 tg3_timer_start(tp);
18347
18348 tg3_netif_start(tp);
18349
18350 tg3_full_unlock(tp);
18351
18352 tg3_phy_start(tp);
18353
18354 done:
18355 tp->pcierr_recovery = false;
18356 rtnl_unlock();
18357 }
18358
18359 static const struct pci_error_handlers tg3_err_handler = {
18360 .error_detected = tg3_io_error_detected,
18361 .slot_reset = tg3_io_slot_reset,
18362 .resume = tg3_io_resume
18363 };
18364
18365 static struct pci_driver tg3_driver = {
18366 .name = DRV_MODULE_NAME,
18367 .id_table = tg3_pci_tbl,
18368 .probe = tg3_init_one,
18369 .remove = tg3_remove_one,
18370 .err_handler = &tg3_err_handler,
18371 .driver.pm = &tg3_pm_ops,
18372 .shutdown = tg3_shutdown,
18373 };
18374
18375 module_pci_driver(tg3_driver);
18376