1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/types.h>
4 #include <linux/module.h>
5 #include <linux/list.h>
6 #include <linux/pci.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/dmapool.h>
11 #include <linux/mempool.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/interrupt.h>
15 #include <linux/errno.h>
16 #include <linux/ioport.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <net/ipv6.h>
21 #include <linux/tcp.h>
22 #include <linux/udp.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/skbuff.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/if_vlan.h>
31 #include <linux/delay.h>
32 #include <linux/mm.h>
33 #include <linux/vmalloc.h>
34 
35 
36 #include "qlge.h"
37 
38 static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
39 	"Loopback test  (offline)"
40 };
41 #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
42 
ql_update_ring_coalescing(struct ql_adapter * qdev)43 static int ql_update_ring_coalescing(struct ql_adapter *qdev)
44 {
45 	int i, status = 0;
46 	struct rx_ring *rx_ring;
47 	struct cqicb *cqicb;
48 
49 	if (!netif_running(qdev->ndev))
50 		return status;
51 
52 	/* Skip the default queue, and update the outbound handler
53 	 * queues if they changed.
54 	 */
55 	cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
56 	if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
57 		le16_to_cpu(cqicb->pkt_delay) !=
58 				qdev->tx_max_coalesced_frames) {
59 		for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
60 			rx_ring = &qdev->rx_ring[i];
61 			cqicb = (struct cqicb *)rx_ring;
62 			cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
63 			cqicb->pkt_delay =
64 			    cpu_to_le16(qdev->tx_max_coalesced_frames);
65 			cqicb->flags = FLAGS_LI;
66 			status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
67 						CFG_LCQ, rx_ring->cq_id);
68 			if (status) {
69 				netif_err(qdev, ifup, qdev->ndev,
70 					  "Failed to load CQICB.\n");
71 				goto exit;
72 			}
73 		}
74 	}
75 
76 	/* Update the inbound (RSS) handler queues if they changed. */
77 	cqicb = (struct cqicb *)&qdev->rx_ring[0];
78 	if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
79 		le16_to_cpu(cqicb->pkt_delay) !=
80 					qdev->rx_max_coalesced_frames) {
81 		for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
82 			rx_ring = &qdev->rx_ring[i];
83 			cqicb = (struct cqicb *)rx_ring;
84 			cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
85 			cqicb->pkt_delay =
86 			    cpu_to_le16(qdev->rx_max_coalesced_frames);
87 			cqicb->flags = FLAGS_LI;
88 			status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
89 						CFG_LCQ, rx_ring->cq_id);
90 			if (status) {
91 				netif_err(qdev, ifup, qdev->ndev,
92 					  "Failed to load CQICB.\n");
93 				goto exit;
94 			}
95 		}
96 	}
97 exit:
98 	return status;
99 }
100 
ql_update_stats(struct ql_adapter * qdev)101 static void ql_update_stats(struct ql_adapter *qdev)
102 {
103 	u32 i;
104 	u64 data;
105 	u64 *iter = &qdev->nic_stats.tx_pkts;
106 
107 	spin_lock(&qdev->stats_lock);
108 	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
109 			netif_err(qdev, drv, qdev->ndev,
110 				  "Couldn't get xgmac sem.\n");
111 		goto quit;
112 	}
113 	/*
114 	 * Get TX statistics.
115 	 */
116 	for (i = 0x200; i < 0x280; i += 8) {
117 		if (ql_read_xgmac_reg64(qdev, i, &data)) {
118 			netif_err(qdev, drv, qdev->ndev,
119 				  "Error reading status register 0x%.04x.\n",
120 				  i);
121 			goto end;
122 		} else
123 			*iter = data;
124 		iter++;
125 	}
126 
127 	/*
128 	 * Get RX statistics.
129 	 */
130 	for (i = 0x300; i < 0x3d0; i += 8) {
131 		if (ql_read_xgmac_reg64(qdev, i, &data)) {
132 			netif_err(qdev, drv, qdev->ndev,
133 				  "Error reading status register 0x%.04x.\n",
134 				  i);
135 			goto end;
136 		} else
137 			*iter = data;
138 		iter++;
139 	}
140 
141 	/*
142 	 * Get Per-priority TX pause frame counter statistics.
143 	 */
144 	for (i = 0x500; i < 0x540; i += 8) {
145 		if (ql_read_xgmac_reg64(qdev, i, &data)) {
146 			netif_err(qdev, drv, qdev->ndev,
147 				  "Error reading status register 0x%.04x.\n",
148 				  i);
149 			goto end;
150 		} else
151 			*iter = data;
152 		iter++;
153 	}
154 
155 	/*
156 	 * Get Per-priority RX pause frame counter statistics.
157 	 */
158 	for (i = 0x568; i < 0x5a8; i += 8) {
159 		if (ql_read_xgmac_reg64(qdev, i, &data)) {
160 			netif_err(qdev, drv, qdev->ndev,
161 				  "Error reading status register 0x%.04x.\n",
162 				  i);
163 			goto end;
164 		} else
165 			*iter = data;
166 		iter++;
167 	}
168 
169 	/*
170 	 * Get RX NIC FIFO DROP statistics.
171 	 */
172 	if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
173 		netif_err(qdev, drv, qdev->ndev,
174 			  "Error reading status register 0x%.04x.\n", i);
175 		goto end;
176 	} else
177 		*iter = data;
178 end:
179 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
180 quit:
181 	spin_unlock(&qdev->stats_lock);
182 
183 	QL_DUMP_STAT(qdev);
184 }
185 
186 static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
187 	{"tx_pkts"},
188 	{"tx_bytes"},
189 	{"tx_mcast_pkts"},
190 	{"tx_bcast_pkts"},
191 	{"tx_ucast_pkts"},
192 	{"tx_ctl_pkts"},
193 	{"tx_pause_pkts"},
194 	{"tx_64_pkts"},
195 	{"tx_65_to_127_pkts"},
196 	{"tx_128_to_255_pkts"},
197 	{"tx_256_511_pkts"},
198 	{"tx_512_to_1023_pkts"},
199 	{"tx_1024_to_1518_pkts"},
200 	{"tx_1519_to_max_pkts"},
201 	{"tx_undersize_pkts"},
202 	{"tx_oversize_pkts"},
203 	{"rx_bytes"},
204 	{"rx_bytes_ok"},
205 	{"rx_pkts"},
206 	{"rx_pkts_ok"},
207 	{"rx_bcast_pkts"},
208 	{"rx_mcast_pkts"},
209 	{"rx_ucast_pkts"},
210 	{"rx_undersize_pkts"},
211 	{"rx_oversize_pkts"},
212 	{"rx_jabber_pkts"},
213 	{"rx_undersize_fcerr_pkts"},
214 	{"rx_drop_events"},
215 	{"rx_fcerr_pkts"},
216 	{"rx_align_err"},
217 	{"rx_symbol_err"},
218 	{"rx_mac_err"},
219 	{"rx_ctl_pkts"},
220 	{"rx_pause_pkts"},
221 	{"rx_64_pkts"},
222 	{"rx_65_to_127_pkts"},
223 	{"rx_128_255_pkts"},
224 	{"rx_256_511_pkts"},
225 	{"rx_512_to_1023_pkts"},
226 	{"rx_1024_to_1518_pkts"},
227 	{"rx_1519_to_max_pkts"},
228 	{"rx_len_err_pkts"},
229 	{"tx_cbfc_pause_frames0"},
230 	{"tx_cbfc_pause_frames1"},
231 	{"tx_cbfc_pause_frames2"},
232 	{"tx_cbfc_pause_frames3"},
233 	{"tx_cbfc_pause_frames4"},
234 	{"tx_cbfc_pause_frames5"},
235 	{"tx_cbfc_pause_frames6"},
236 	{"tx_cbfc_pause_frames7"},
237 	{"rx_cbfc_pause_frames0"},
238 	{"rx_cbfc_pause_frames1"},
239 	{"rx_cbfc_pause_frames2"},
240 	{"rx_cbfc_pause_frames3"},
241 	{"rx_cbfc_pause_frames4"},
242 	{"rx_cbfc_pause_frames5"},
243 	{"rx_cbfc_pause_frames6"},
244 	{"rx_cbfc_pause_frames7"},
245 	{"rx_nic_fifo_drop"},
246 };
247 
ql_get_strings(struct net_device * dev,u32 stringset,u8 * buf)248 static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
249 {
250 	switch (stringset) {
251 	case ETH_SS_STATS:
252 		memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
253 		break;
254 	}
255 }
256 
ql_get_sset_count(struct net_device * dev,int sset)257 static int ql_get_sset_count(struct net_device *dev, int sset)
258 {
259 	switch (sset) {
260 	case ETH_SS_TEST:
261 		return QLGE_TEST_LEN;
262 	case ETH_SS_STATS:
263 		return ARRAY_SIZE(ql_stats_str_arr);
264 	default:
265 		return -EOPNOTSUPP;
266 	}
267 }
268 
269 static void
ql_get_ethtool_stats(struct net_device * ndev,struct ethtool_stats * stats,u64 * data)270 ql_get_ethtool_stats(struct net_device *ndev,
271 		     struct ethtool_stats *stats, u64 *data)
272 {
273 	struct ql_adapter *qdev = netdev_priv(ndev);
274 	struct nic_stats *s = &qdev->nic_stats;
275 
276 	ql_update_stats(qdev);
277 
278 	*data++ = s->tx_pkts;
279 	*data++ = s->tx_bytes;
280 	*data++ = s->tx_mcast_pkts;
281 	*data++ = s->tx_bcast_pkts;
282 	*data++ = s->tx_ucast_pkts;
283 	*data++ = s->tx_ctl_pkts;
284 	*data++ = s->tx_pause_pkts;
285 	*data++ = s->tx_64_pkt;
286 	*data++ = s->tx_65_to_127_pkt;
287 	*data++ = s->tx_128_to_255_pkt;
288 	*data++ = s->tx_256_511_pkt;
289 	*data++ = s->tx_512_to_1023_pkt;
290 	*data++ = s->tx_1024_to_1518_pkt;
291 	*data++ = s->tx_1519_to_max_pkt;
292 	*data++ = s->tx_undersize_pkt;
293 	*data++ = s->tx_oversize_pkt;
294 	*data++ = s->rx_bytes;
295 	*data++ = s->rx_bytes_ok;
296 	*data++ = s->rx_pkts;
297 	*data++ = s->rx_pkts_ok;
298 	*data++ = s->rx_bcast_pkts;
299 	*data++ = s->rx_mcast_pkts;
300 	*data++ = s->rx_ucast_pkts;
301 	*data++ = s->rx_undersize_pkts;
302 	*data++ = s->rx_oversize_pkts;
303 	*data++ = s->rx_jabber_pkts;
304 	*data++ = s->rx_undersize_fcerr_pkts;
305 	*data++ = s->rx_drop_events;
306 	*data++ = s->rx_fcerr_pkts;
307 	*data++ = s->rx_align_err;
308 	*data++ = s->rx_symbol_err;
309 	*data++ = s->rx_mac_err;
310 	*data++ = s->rx_ctl_pkts;
311 	*data++ = s->rx_pause_pkts;
312 	*data++ = s->rx_64_pkts;
313 	*data++ = s->rx_65_to_127_pkts;
314 	*data++ = s->rx_128_255_pkts;
315 	*data++ = s->rx_256_511_pkts;
316 	*data++ = s->rx_512_to_1023_pkts;
317 	*data++ = s->rx_1024_to_1518_pkts;
318 	*data++ = s->rx_1519_to_max_pkts;
319 	*data++ = s->rx_len_err_pkts;
320 	*data++ = s->tx_cbfc_pause_frames0;
321 	*data++ = s->tx_cbfc_pause_frames1;
322 	*data++ = s->tx_cbfc_pause_frames2;
323 	*data++ = s->tx_cbfc_pause_frames3;
324 	*data++ = s->tx_cbfc_pause_frames4;
325 	*data++ = s->tx_cbfc_pause_frames5;
326 	*data++ = s->tx_cbfc_pause_frames6;
327 	*data++ = s->tx_cbfc_pause_frames7;
328 	*data++ = s->rx_cbfc_pause_frames0;
329 	*data++ = s->rx_cbfc_pause_frames1;
330 	*data++ = s->rx_cbfc_pause_frames2;
331 	*data++ = s->rx_cbfc_pause_frames3;
332 	*data++ = s->rx_cbfc_pause_frames4;
333 	*data++ = s->rx_cbfc_pause_frames5;
334 	*data++ = s->rx_cbfc_pause_frames6;
335 	*data++ = s->rx_cbfc_pause_frames7;
336 	*data++ = s->rx_nic_fifo_drop;
337 }
338 
ql_get_settings(struct net_device * ndev,struct ethtool_cmd * ecmd)339 static int ql_get_settings(struct net_device *ndev,
340 			      struct ethtool_cmd *ecmd)
341 {
342 	struct ql_adapter *qdev = netdev_priv(ndev);
343 
344 	ecmd->supported = SUPPORTED_10000baseT_Full;
345 	ecmd->advertising = ADVERTISED_10000baseT_Full;
346 	ecmd->autoneg = AUTONEG_ENABLE;
347 	ecmd->transceiver = XCVR_EXTERNAL;
348 	if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
349 				STS_LINK_TYPE_10GBASET) {
350 		ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
351 		ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
352 		ecmd->port = PORT_TP;
353 	} else {
354 		ecmd->supported |= SUPPORTED_FIBRE;
355 		ecmd->advertising |= ADVERTISED_FIBRE;
356 		ecmd->port = PORT_FIBRE;
357 	}
358 
359 	ethtool_cmd_speed_set(ecmd, SPEED_10000);
360 	ecmd->duplex = DUPLEX_FULL;
361 
362 	return 0;
363 }
364 
ql_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * drvinfo)365 static void ql_get_drvinfo(struct net_device *ndev,
366 			   struct ethtool_drvinfo *drvinfo)
367 {
368 	struct ql_adapter *qdev = netdev_priv(ndev);
369 	strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
370 	strlcpy(drvinfo->version, qlge_driver_version,
371 		sizeof(drvinfo->version));
372 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
373 		 "v%d.%d.%d",
374 		 (qdev->fw_rev_id & 0x00ff0000) >> 16,
375 		 (qdev->fw_rev_id & 0x0000ff00) >> 8,
376 		 (qdev->fw_rev_id & 0x000000ff));
377 	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
378 		sizeof(drvinfo->bus_info));
379 	drvinfo->n_stats = 0;
380 	drvinfo->testinfo_len = 0;
381 	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
382 		drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
383 	else
384 		drvinfo->regdump_len = sizeof(struct ql_reg_dump);
385 	drvinfo->eedump_len = 0;
386 }
387 
ql_get_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)388 static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
389 {
390 	struct ql_adapter *qdev = netdev_priv(ndev);
391 	/* What we support. */
392 	wol->supported = WAKE_MAGIC;
393 	/* What we've currently got set. */
394 	wol->wolopts = qdev->wol;
395 }
396 
ql_set_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)397 static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
398 {
399 	struct ql_adapter *qdev = netdev_priv(ndev);
400 	int status;
401 
402 	if (wol->wolopts & ~WAKE_MAGIC)
403 		return -EINVAL;
404 	qdev->wol = wol->wolopts;
405 
406 	netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
407 	if (!qdev->wol) {
408 		u32 wol = 0;
409 		status = ql_mb_wol_mode(qdev, wol);
410 		netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
411 			  status == 0 ? "cleared successfully" : "clear failed",
412 			  wol);
413 	}
414 
415 	return 0;
416 }
417 
ql_set_phys_id(struct net_device * ndev,enum ethtool_phys_id_state state)418 static int ql_set_phys_id(struct net_device *ndev,
419 			  enum ethtool_phys_id_state state)
420 
421 {
422 	struct ql_adapter *qdev = netdev_priv(ndev);
423 
424 	switch (state) {
425 	case ETHTOOL_ID_ACTIVE:
426 		/* Save the current LED settings */
427 		if (ql_mb_get_led_cfg(qdev))
428 			return -EIO;
429 
430 		/* Start blinking */
431 		ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
432 		return 0;
433 
434 	case ETHTOOL_ID_INACTIVE:
435 		/* Restore LED settings */
436 		if (ql_mb_set_led_cfg(qdev, qdev->led_config))
437 			return -EIO;
438 		return 0;
439 
440 	default:
441 		return -EINVAL;
442 	}
443 }
444 
ql_start_loopback(struct ql_adapter * qdev)445 static int ql_start_loopback(struct ql_adapter *qdev)
446 {
447 	if (netif_carrier_ok(qdev->ndev)) {
448 		set_bit(QL_LB_LINK_UP, &qdev->flags);
449 		netif_carrier_off(qdev->ndev);
450 	} else
451 		clear_bit(QL_LB_LINK_UP, &qdev->flags);
452 	qdev->link_config |= CFG_LOOPBACK_PCS;
453 	return ql_mb_set_port_cfg(qdev);
454 }
455 
ql_stop_loopback(struct ql_adapter * qdev)456 static void ql_stop_loopback(struct ql_adapter *qdev)
457 {
458 	qdev->link_config &= ~CFG_LOOPBACK_PCS;
459 	ql_mb_set_port_cfg(qdev);
460 	if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
461 		netif_carrier_on(qdev->ndev);
462 		clear_bit(QL_LB_LINK_UP, &qdev->flags);
463 	}
464 }
465 
ql_create_lb_frame(struct sk_buff * skb,unsigned int frame_size)466 static void ql_create_lb_frame(struct sk_buff *skb,
467 					unsigned int frame_size)
468 {
469 	memset(skb->data, 0xFF, frame_size);
470 	frame_size &= ~1;
471 	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
472 	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
473 	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
474 }
475 
ql_check_lb_frame(struct ql_adapter * qdev,struct sk_buff * skb)476 void ql_check_lb_frame(struct ql_adapter *qdev,
477 					struct sk_buff *skb)
478 {
479 	unsigned int frame_size = skb->len;
480 
481 	if ((*(skb->data + 3) == 0xFF) &&
482 		(*(skb->data + frame_size / 2 + 10) == 0xBE) &&
483 		(*(skb->data + frame_size / 2 + 12) == 0xAF)) {
484 			atomic_dec(&qdev->lb_count);
485 			return;
486 	}
487 }
488 
ql_run_loopback_test(struct ql_adapter * qdev)489 static int ql_run_loopback_test(struct ql_adapter *qdev)
490 {
491 	int i;
492 	netdev_tx_t rc;
493 	struct sk_buff *skb;
494 	unsigned int size = SMALL_BUF_MAP_SIZE;
495 
496 	for (i = 0; i < 64; i++) {
497 		skb = netdev_alloc_skb(qdev->ndev, size);
498 		if (!skb)
499 			return -ENOMEM;
500 
501 		skb->queue_mapping = 0;
502 		skb_put(skb, size);
503 		ql_create_lb_frame(skb, size);
504 		rc = ql_lb_send(skb, qdev->ndev);
505 		if (rc != NETDEV_TX_OK)
506 			return -EPIPE;
507 		atomic_inc(&qdev->lb_count);
508 	}
509 	/* Give queue time to settle before testing results. */
510 	msleep(2);
511 	ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
512 	return atomic_read(&qdev->lb_count) ? -EIO : 0;
513 }
514 
ql_loopback_test(struct ql_adapter * qdev,u64 * data)515 static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
516 {
517 	*data = ql_start_loopback(qdev);
518 	if (*data)
519 		goto out;
520 	*data = ql_run_loopback_test(qdev);
521 out:
522 	ql_stop_loopback(qdev);
523 	return *data;
524 }
525 
ql_self_test(struct net_device * ndev,struct ethtool_test * eth_test,u64 * data)526 static void ql_self_test(struct net_device *ndev,
527 				struct ethtool_test *eth_test, u64 *data)
528 {
529 	struct ql_adapter *qdev = netdev_priv(ndev);
530 
531 	if (netif_running(ndev)) {
532 		set_bit(QL_SELFTEST, &qdev->flags);
533 		if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
534 			/* Offline tests */
535 			if (ql_loopback_test(qdev, &data[0]))
536 				eth_test->flags |= ETH_TEST_FL_FAILED;
537 
538 		} else {
539 			/* Online tests */
540 			data[0] = 0;
541 		}
542 		clear_bit(QL_SELFTEST, &qdev->flags);
543 		/* Give link time to come up after
544 		 * port configuration changes.
545 		 */
546 		msleep_interruptible(4 * 1000);
547 	} else {
548 		netif_err(qdev, drv, qdev->ndev,
549 			  "is down, Loopback test will fail.\n");
550 		eth_test->flags |= ETH_TEST_FL_FAILED;
551 	}
552 }
553 
ql_get_regs_len(struct net_device * ndev)554 static int ql_get_regs_len(struct net_device *ndev)
555 {
556 	struct ql_adapter *qdev = netdev_priv(ndev);
557 
558 	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
559 		return sizeof(struct ql_mpi_coredump);
560 	else
561 		return sizeof(struct ql_reg_dump);
562 }
563 
ql_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * p)564 static void ql_get_regs(struct net_device *ndev,
565 			struct ethtool_regs *regs, void *p)
566 {
567 	struct ql_adapter *qdev = netdev_priv(ndev);
568 
569 	ql_get_dump(qdev, p);
570 	qdev->core_is_dumped = 0;
571 	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
572 		regs->len = sizeof(struct ql_mpi_coredump);
573 	else
574 		regs->len = sizeof(struct ql_reg_dump);
575 }
576 
ql_get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)577 static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
578 {
579 	struct ql_adapter *qdev = netdev_priv(dev);
580 
581 	c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
582 	c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
583 
584 	/* This chip coalesces as follows:
585 	 * If a packet arrives, hold off interrupts until
586 	 * cqicb->int_delay expires, but if no other packets arrive don't
587 	 * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
588 	 * timer to coalesce on a frame basis.  So, we have to take ethtool's
589 	 * max_coalesced_frames value and convert it to a delay in microseconds.
590 	 * We do this by using a basic thoughput of 1,000,000 frames per
591 	 * second @ (1024 bytes).  This means one frame per usec. So it's a
592 	 * simple one to one ratio.
593 	 */
594 	c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
595 	c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
596 
597 	return 0;
598 }
599 
ql_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * c)600 static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
601 {
602 	struct ql_adapter *qdev = netdev_priv(ndev);
603 
604 	/* Validate user parameters. */
605 	if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
606 		return -EINVAL;
607        /* Don't wait more than 10 usec. */
608 	if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
609 		return -EINVAL;
610 	if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
611 		return -EINVAL;
612 	if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
613 		return -EINVAL;
614 
615 	/* Verify a change took place before updating the hardware. */
616 	if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
617 	    qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
618 	    qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
619 	    qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
620 		return 0;
621 
622 	qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
623 	qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
624 	qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
625 	qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
626 
627 	return ql_update_ring_coalescing(qdev);
628 }
629 
ql_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)630 static void ql_get_pauseparam(struct net_device *netdev,
631 			struct ethtool_pauseparam *pause)
632 {
633 	struct ql_adapter *qdev = netdev_priv(netdev);
634 
635 	ql_mb_get_port_cfg(qdev);
636 	if (qdev->link_config & CFG_PAUSE_STD) {
637 		pause->rx_pause = 1;
638 		pause->tx_pause = 1;
639 	}
640 }
641 
ql_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)642 static int ql_set_pauseparam(struct net_device *netdev,
643 			struct ethtool_pauseparam *pause)
644 {
645 	struct ql_adapter *qdev = netdev_priv(netdev);
646 	int status = 0;
647 
648 	if ((pause->rx_pause) && (pause->tx_pause))
649 		qdev->link_config |= CFG_PAUSE_STD;
650 	else if (!pause->rx_pause && !pause->tx_pause)
651 		qdev->link_config &= ~CFG_PAUSE_STD;
652 	else
653 		return -EINVAL;
654 
655 	status = ql_mb_set_port_cfg(qdev);
656 	return status;
657 }
658 
ql_get_msglevel(struct net_device * ndev)659 static u32 ql_get_msglevel(struct net_device *ndev)
660 {
661 	struct ql_adapter *qdev = netdev_priv(ndev);
662 	return qdev->msg_enable;
663 }
664 
ql_set_msglevel(struct net_device * ndev,u32 value)665 static void ql_set_msglevel(struct net_device *ndev, u32 value)
666 {
667 	struct ql_adapter *qdev = netdev_priv(ndev);
668 	qdev->msg_enable = value;
669 }
670 
671 const struct ethtool_ops qlge_ethtool_ops = {
672 	.get_settings = ql_get_settings,
673 	.get_drvinfo = ql_get_drvinfo,
674 	.get_wol = ql_get_wol,
675 	.set_wol = ql_set_wol,
676 	.get_regs_len	= ql_get_regs_len,
677 	.get_regs = ql_get_regs,
678 	.get_msglevel = ql_get_msglevel,
679 	.set_msglevel = ql_set_msglevel,
680 	.get_link = ethtool_op_get_link,
681 	.set_phys_id		 = ql_set_phys_id,
682 	.self_test		 = ql_self_test,
683 	.get_pauseparam		 = ql_get_pauseparam,
684 	.set_pauseparam		 = ql_set_pauseparam,
685 	.get_coalesce = ql_get_coalesce,
686 	.set_coalesce = ql_set_coalesce,
687 	.get_sset_count = ql_get_sset_count,
688 	.get_strings = ql_get_strings,
689 	.get_ethtool_stats = ql_get_ethtool_stats,
690 };
691 
692