1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
3
4 #include <linux/types.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/netdevice.h>
8 #include <linux/string.h>
9 #include <linux/etherdevice.h>
10 #include <linux/phylink.h>
11 #include <net/ip.h>
12 #include <linux/if_vlan.h>
13
14 #include "../libwx/wx_type.h"
15 #include "../libwx/wx_lib.h"
16 #include "../libwx/wx_ptp.h"
17 #include "../libwx/wx_hw.h"
18 #include "txgbe_type.h"
19 #include "txgbe_hw.h"
20 #include "txgbe_phy.h"
21 #include "txgbe_irq.h"
22 #include "txgbe_fdir.h"
23 #include "txgbe_ethtool.h"
24
25 char txgbe_driver_name[] = "txgbe";
26
27 /* txgbe_pci_tbl - PCI Device ID Table
28 *
29 * Wildcard entries (PCI_ANY_ID) should come last
30 * Last entry must be all 0s
31 *
32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33 * Class, Class Mask, private data (not used) }
34 */
35 static const struct pci_device_id txgbe_pci_tbl[] = {
36 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
37 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
38 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5010), 0},
39 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5110), 0},
40 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5025), 0},
41 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5125), 0},
42 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5040), 0},
43 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5140), 0},
44 /* required last entry */
45 { .device = 0 }
46 };
47
48 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
49
txgbe_check_minimum_link(struct wx * wx)50 static void txgbe_check_minimum_link(struct wx *wx)
51 {
52 struct pci_dev *pdev;
53
54 pdev = wx->pdev;
55 pcie_print_link_status(pdev);
56 }
57
58 /**
59 * txgbe_enumerate_functions - Get the number of ports this device has
60 * @wx: wx structure
61 *
62 * This function enumerates the phsyical functions co-located on a single slot,
63 * in order to determine how many ports a device has. This is most useful in
64 * determining the required GT/s of PCIe bandwidth necessary for optimal
65 * performance.
66 **/
txgbe_enumerate_functions(struct wx * wx)67 static int txgbe_enumerate_functions(struct wx *wx)
68 {
69 struct pci_dev *entry, *pdev = wx->pdev;
70 int physfns = 0;
71
72 list_for_each_entry(entry, &pdev->bus->devices, bus_list) {
73 /* When the devices on the bus don't all match our device ID,
74 * we can't reliably determine the correct number of
75 * functions. This can occur if a function has been direct
76 * attached to a virtual machine using VT-d.
77 */
78 if (entry->vendor != pdev->vendor ||
79 entry->device != pdev->device)
80 return -EINVAL;
81
82 physfns++;
83 }
84
85 return physfns;
86 }
87
txgbe_up_complete(struct wx * wx)88 static void txgbe_up_complete(struct wx *wx)
89 {
90 struct net_device *netdev = wx->netdev;
91
92 wx_control_hw(wx, true);
93 wx_configure_vectors(wx);
94
95 /* make sure to complete pre-operations */
96 smp_mb__before_atomic();
97 wx_napi_enable_all(wx);
98
99 if (wx->mac.type == wx_mac_aml) {
100 u32 reg;
101
102 reg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
103 reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
104 reg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
105 wr32(wx, WX_MAC_TX_CFG, reg);
106 txgbe_enable_sec_tx_path(wx);
107 netif_carrier_on(wx->netdev);
108 } else {
109 phylink_start(wx->phylink);
110 }
111
112 /* clear any pending interrupts, may auto mask */
113 rd32(wx, WX_PX_IC(0));
114 rd32(wx, WX_PX_IC(1));
115 rd32(wx, WX_PX_MISC_IC);
116 txgbe_irq_enable(wx, true);
117
118 /* enable transmits */
119 netif_tx_start_all_queues(netdev);
120 }
121
txgbe_reset(struct wx * wx)122 static void txgbe_reset(struct wx *wx)
123 {
124 struct net_device *netdev = wx->netdev;
125 u8 old_addr[ETH_ALEN];
126 int err;
127
128 err = txgbe_reset_hw(wx);
129 if (err != 0)
130 wx_err(wx, "Hardware Error: %d\n", err);
131
132 wx_start_hw(wx);
133 /* do not flush user set addresses */
134 memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len);
135 wx_flush_sw_mac_table(wx);
136 wx_mac_set_default_filter(wx, old_addr);
137
138 if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
139 wx_ptp_reset(wx);
140 }
141
txgbe_disable_device(struct wx * wx)142 static void txgbe_disable_device(struct wx *wx)
143 {
144 struct net_device *netdev = wx->netdev;
145 u32 i;
146
147 wx_disable_pcie_master(wx);
148 /* disable receives */
149 wx_disable_rx(wx);
150
151 /* disable all enabled rx queues */
152 for (i = 0; i < wx->num_rx_queues; i++)
153 /* this call also flushes the previous write */
154 wx_disable_rx_queue(wx, wx->rx_ring[i]);
155
156 netif_tx_stop_all_queues(netdev);
157 netif_tx_disable(netdev);
158
159 wx_irq_disable(wx);
160 wx_napi_disable_all(wx);
161
162 if (wx->bus.func < 2)
163 wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0);
164 else
165 wx_err(wx, "%s: invalid bus lan id %d\n",
166 __func__, wx->bus.func);
167
168 if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
169 ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
170 /* disable mac transmiter */
171 wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
172 }
173
174 /* disable transmits in the hardware now that interrupts are off */
175 for (i = 0; i < wx->num_tx_queues; i++) {
176 u8 reg_idx = wx->tx_ring[i]->reg_idx;
177
178 wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
179 }
180
181 /* Disable the Tx DMA engine */
182 wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0);
183
184 wx_update_stats(wx);
185 }
186
txgbe_down(struct wx * wx)187 void txgbe_down(struct wx *wx)
188 {
189 txgbe_disable_device(wx);
190 txgbe_reset(wx);
191 if (wx->mac.type == wx_mac_aml)
192 netif_carrier_off(wx->netdev);
193 else
194 phylink_stop(wx->phylink);
195
196 wx_clean_all_tx_rings(wx);
197 wx_clean_all_rx_rings(wx);
198 }
199
txgbe_up(struct wx * wx)200 void txgbe_up(struct wx *wx)
201 {
202 wx_configure(wx);
203 wx_ptp_init(wx);
204 txgbe_up_complete(wx);
205 }
206
207 /**
208 * txgbe_init_type_code - Initialize the shared code
209 * @wx: pointer to hardware structure
210 **/
txgbe_init_type_code(struct wx * wx)211 static void txgbe_init_type_code(struct wx *wx)
212 {
213 u8 device_type = wx->subsystem_device_id & 0xF0;
214
215 switch (wx->device_id) {
216 case TXGBE_DEV_ID_SP1000:
217 case TXGBE_DEV_ID_WX1820:
218 wx->mac.type = wx_mac_sp;
219 break;
220 case TXGBE_DEV_ID_AML5010:
221 case TXGBE_DEV_ID_AML5110:
222 case TXGBE_DEV_ID_AML5025:
223 case TXGBE_DEV_ID_AML5125:
224 case TXGBE_DEV_ID_AML5040:
225 case TXGBE_DEV_ID_AML5140:
226 wx->mac.type = wx_mac_aml;
227 break;
228 default:
229 wx->mac.type = wx_mac_unknown;
230 break;
231 }
232
233 switch (device_type) {
234 case TXGBE_ID_SFP:
235 wx->media_type = sp_media_fiber;
236 break;
237 case TXGBE_ID_XAUI:
238 case TXGBE_ID_SGMII:
239 wx->media_type = sp_media_copper;
240 break;
241 case TXGBE_ID_KR_KX_KX4:
242 case TXGBE_ID_MAC_XAUI:
243 case TXGBE_ID_MAC_SGMII:
244 wx->media_type = sp_media_backplane;
245 break;
246 case TXGBE_ID_SFI_XAUI:
247 if (wx->bus.func == 0)
248 wx->media_type = sp_media_fiber;
249 else
250 wx->media_type = sp_media_copper;
251 break;
252 default:
253 wx->media_type = sp_media_unknown;
254 break;
255 }
256 }
257
258 /**
259 * txgbe_sw_init - Initialize general software structures (struct wx)
260 * @wx: board private structure to initialize
261 **/
txgbe_sw_init(struct wx * wx)262 static int txgbe_sw_init(struct wx *wx)
263 {
264 u16 msix_count = 0;
265 int err;
266
267 wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
268 wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
269 wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
270 wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
271 wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE;
272 wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE;
273 wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ;
274
275 /* PCI config space info */
276 err = wx_sw_init(wx);
277 if (err < 0)
278 return err;
279
280 txgbe_init_type_code(wx);
281
282 /* Set common capability flags and settings */
283 wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS;
284 err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS);
285 if (err)
286 wx_err(wx, "Do not support MSI-X\n");
287 wx->mac.max_msix_vectors = msix_count;
288
289 wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES,
290 num_online_cpus());
291 wx->rss_enabled = true;
292
293 wx->ring_feature[RING_F_FDIR].limit = min_t(int, TXGBE_MAX_FDIR_INDICES,
294 num_online_cpus());
295 set_bit(WX_FLAG_FDIR_CAPABLE, wx->flags);
296 set_bit(WX_FLAG_FDIR_HASH, wx->flags);
297 wx->atr_sample_rate = TXGBE_DEFAULT_ATR_SAMPLE_RATE;
298 wx->atr = txgbe_atr;
299 wx->configure_fdir = txgbe_configure_fdir;
300
301 set_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
302
303 /* enable itr by default in dynamic mode */
304 wx->rx_itr_setting = 1;
305 wx->tx_itr_setting = 1;
306
307 /* set default ring sizes */
308 wx->tx_ring_count = TXGBE_DEFAULT_TXD;
309 wx->rx_ring_count = TXGBE_DEFAULT_RXD;
310
311 /* set default work limits */
312 wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
313 wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
314
315 wx->do_reset = txgbe_do_reset;
316
317 switch (wx->mac.type) {
318 case wx_mac_sp:
319 break;
320 case wx_mac_aml:
321 set_bit(WX_FLAG_SWFW_RING, wx->flags);
322 wx->swfw_index = 0;
323 break;
324 default:
325 break;
326 }
327
328 return 0;
329 }
330
txgbe_init_fdir(struct txgbe * txgbe)331 static void txgbe_init_fdir(struct txgbe *txgbe)
332 {
333 txgbe->fdir_filter_count = 0;
334 spin_lock_init(&txgbe->fdir_perfect_lock);
335 }
336
337 /**
338 * txgbe_open - Called when a network interface is made active
339 * @netdev: network interface device structure
340 *
341 * Returns 0 on success, negative value on failure
342 *
343 * The open entry point is called when a network interface is made
344 * active by the system (IFF_UP).
345 **/
txgbe_open(struct net_device * netdev)346 static int txgbe_open(struct net_device *netdev)
347 {
348 struct wx *wx = netdev_priv(netdev);
349 int err;
350
351 err = wx_setup_resources(wx);
352 if (err)
353 goto err_reset;
354
355 wx_configure(wx);
356
357 err = txgbe_request_queue_irqs(wx);
358 if (err)
359 goto err_free_resources;
360
361 /* Notify the stack of the actual queue counts. */
362 err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
363 if (err)
364 goto err_free_irq;
365
366 err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
367 if (err)
368 goto err_free_irq;
369
370 wx_ptp_init(wx);
371
372 txgbe_up_complete(wx);
373
374 return 0;
375
376 err_free_irq:
377 wx_free_irq(wx);
378 err_free_resources:
379 wx_free_resources(wx);
380 err_reset:
381 txgbe_reset(wx);
382
383 return err;
384 }
385
386 /**
387 * txgbe_close_suspend - actions necessary to both suspend and close flows
388 * @wx: the private wx struct
389 *
390 * This function should contain the necessary work common to both suspending
391 * and closing of the device.
392 */
txgbe_close_suspend(struct wx * wx)393 static void txgbe_close_suspend(struct wx *wx)
394 {
395 wx_ptp_suspend(wx);
396 txgbe_disable_device(wx);
397 wx_free_resources(wx);
398 }
399
400 /**
401 * txgbe_close - Disables a network interface
402 * @netdev: network interface device structure
403 *
404 * Returns 0, this is not allowed to fail
405 *
406 * The close entry point is called when an interface is de-activated
407 * by the OS. The hardware is still under the drivers control, but
408 * needs to be disabled. A global MAC reset is issued to stop the
409 * hardware, and all transmit and receive resources are freed.
410 **/
txgbe_close(struct net_device * netdev)411 static int txgbe_close(struct net_device *netdev)
412 {
413 struct wx *wx = netdev_priv(netdev);
414
415 wx_ptp_stop(wx);
416 txgbe_down(wx);
417 wx_free_irq(wx);
418 wx_free_resources(wx);
419 txgbe_fdir_filter_exit(wx);
420 wx_control_hw(wx, false);
421
422 return 0;
423 }
424
txgbe_dev_shutdown(struct pci_dev * pdev)425 static void txgbe_dev_shutdown(struct pci_dev *pdev)
426 {
427 struct wx *wx = pci_get_drvdata(pdev);
428 struct net_device *netdev;
429
430 netdev = wx->netdev;
431 netif_device_detach(netdev);
432
433 rtnl_lock();
434 if (netif_running(netdev))
435 txgbe_close_suspend(wx);
436 rtnl_unlock();
437
438 wx_control_hw(wx, false);
439
440 pci_disable_device(pdev);
441 }
442
txgbe_shutdown(struct pci_dev * pdev)443 static void txgbe_shutdown(struct pci_dev *pdev)
444 {
445 txgbe_dev_shutdown(pdev);
446
447 if (system_state == SYSTEM_POWER_OFF) {
448 pci_wake_from_d3(pdev, false);
449 pci_set_power_state(pdev, PCI_D3hot);
450 }
451 }
452
453 /**
454 * txgbe_setup_tc - routine to configure net_device for multiple traffic
455 * classes.
456 *
457 * @dev: net device to configure
458 * @tc: number of traffic classes to enable
459 */
txgbe_setup_tc(struct net_device * dev,u8 tc)460 int txgbe_setup_tc(struct net_device *dev, u8 tc)
461 {
462 struct wx *wx = netdev_priv(dev);
463 struct txgbe *txgbe = wx->priv;
464
465 /* Hardware has to reinitialize queues and interrupts to
466 * match packet buffer alignment. Unfortunately, the
467 * hardware is not flexible enough to do this dynamically.
468 */
469 if (netif_running(dev))
470 txgbe_close(dev);
471 else
472 txgbe_reset(wx);
473
474 txgbe_free_misc_irq(txgbe);
475 wx_clear_interrupt_scheme(wx);
476
477 if (tc)
478 netdev_set_num_tc(dev, tc);
479 else
480 netdev_reset_tc(dev);
481
482 wx_init_interrupt_scheme(wx);
483 txgbe_setup_misc_irq(txgbe);
484
485 if (netif_running(dev))
486 txgbe_open(dev);
487
488 return 0;
489 }
490
txgbe_reinit_locked(struct wx * wx)491 static void txgbe_reinit_locked(struct wx *wx)
492 {
493 int err = 0;
494
495 netif_trans_update(wx->netdev);
496
497 err = wx_set_state_reset(wx);
498 if (err) {
499 wx_err(wx, "wait device reset timeout\n");
500 return;
501 }
502
503 txgbe_down(wx);
504 txgbe_up(wx);
505
506 clear_bit(WX_STATE_RESETTING, wx->state);
507 }
508
txgbe_do_reset(struct net_device * netdev)509 void txgbe_do_reset(struct net_device *netdev)
510 {
511 struct wx *wx = netdev_priv(netdev);
512
513 if (netif_running(netdev))
514 txgbe_reinit_locked(wx);
515 else
516 txgbe_reset(wx);
517 }
518
519 static const struct net_device_ops txgbe_netdev_ops = {
520 .ndo_open = txgbe_open,
521 .ndo_stop = txgbe_close,
522 .ndo_change_mtu = wx_change_mtu,
523 .ndo_start_xmit = wx_xmit_frame,
524 .ndo_set_rx_mode = wx_set_rx_mode,
525 .ndo_set_features = wx_set_features,
526 .ndo_fix_features = wx_fix_features,
527 .ndo_validate_addr = eth_validate_addr,
528 .ndo_set_mac_address = wx_set_mac,
529 .ndo_get_stats64 = wx_get_stats64,
530 .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
531 .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
532 .ndo_hwtstamp_set = wx_hwtstamp_set,
533 .ndo_hwtstamp_get = wx_hwtstamp_get,
534 };
535
536 /**
537 * txgbe_probe - Device Initialization Routine
538 * @pdev: PCI device information struct
539 * @ent: entry in txgbe_pci_tbl
540 *
541 * Returns 0 on success, negative on failure
542 *
543 * txgbe_probe initializes an adapter identified by a pci_dev structure.
544 * The OS initialization, configuring of the wx private structure,
545 * and a hardware reset occur.
546 **/
txgbe_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)547 static int txgbe_probe(struct pci_dev *pdev,
548 const struct pci_device_id __always_unused *ent)
549 {
550 struct net_device *netdev;
551 int err, expected_gts;
552 struct wx *wx = NULL;
553 struct txgbe *txgbe;
554
555 u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0;
556 u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0;
557 u16 build = 0, major = 0, patch = 0;
558 u32 etrack_id = 0;
559
560 err = pci_enable_device_mem(pdev);
561 if (err)
562 return err;
563
564 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
565 if (err) {
566 dev_err(&pdev->dev,
567 "No usable DMA configuration, aborting\n");
568 goto err_pci_disable_dev;
569 }
570
571 err = pci_request_selected_regions(pdev,
572 pci_select_bars(pdev, IORESOURCE_MEM),
573 txgbe_driver_name);
574 if (err) {
575 dev_err(&pdev->dev,
576 "pci_request_selected_regions failed 0x%x\n", err);
577 goto err_pci_disable_dev;
578 }
579
580 pci_set_master(pdev);
581
582 netdev = devm_alloc_etherdev_mqs(&pdev->dev,
583 sizeof(struct wx),
584 TXGBE_MAX_TX_QUEUES,
585 TXGBE_MAX_RX_QUEUES);
586 if (!netdev) {
587 err = -ENOMEM;
588 goto err_pci_release_regions;
589 }
590
591 SET_NETDEV_DEV(netdev, &pdev->dev);
592
593 wx = netdev_priv(netdev);
594 wx->netdev = netdev;
595 wx->pdev = pdev;
596
597 wx->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
598
599 wx->hw_addr = devm_ioremap(&pdev->dev,
600 pci_resource_start(pdev, 0),
601 pci_resource_len(pdev, 0));
602 if (!wx->hw_addr) {
603 err = -EIO;
604 goto err_pci_release_regions;
605 }
606
607 wx->driver_name = txgbe_driver_name;
608 txgbe_set_ethtool_ops(netdev);
609 netdev->netdev_ops = &txgbe_netdev_ops;
610
611 /* setup the private structure */
612 err = txgbe_sw_init(wx);
613 if (err)
614 goto err_pci_release_regions;
615
616 /* check if flash load is done after hw power up */
617 err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST);
618 if (err)
619 goto err_free_mac_table;
620 err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PWRRST);
621 if (err)
622 goto err_free_mac_table;
623
624 err = wx_mng_present(wx);
625 if (err) {
626 dev_err(&pdev->dev, "Management capability is not present\n");
627 goto err_free_mac_table;
628 }
629
630 err = txgbe_reset_hw(wx);
631 if (err) {
632 dev_err(&pdev->dev, "HW Init failed: %d\n", err);
633 goto err_free_mac_table;
634 }
635
636 netdev->features = NETIF_F_SG |
637 NETIF_F_TSO |
638 NETIF_F_TSO6 |
639 NETIF_F_RXHASH |
640 NETIF_F_RXCSUM |
641 NETIF_F_HW_CSUM;
642
643 netdev->gso_partial_features = NETIF_F_GSO_ENCAP_ALL;
644 netdev->features |= netdev->gso_partial_features;
645 netdev->features |= NETIF_F_SCTP_CRC;
646 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
647 netdev->hw_enc_features |= netdev->vlan_features;
648 netdev->features |= NETIF_F_VLAN_FEATURES;
649 /* copy netdev features into list of user selectable features */
650 netdev->hw_features |= netdev->features | NETIF_F_RXALL;
651 netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
652 netdev->features |= NETIF_F_HIGHDMA;
653 netdev->hw_features |= NETIF_F_GRO;
654 netdev->features |= NETIF_F_GRO;
655
656 netdev->priv_flags |= IFF_UNICAST_FLT;
657 netdev->priv_flags |= IFF_SUPP_NOFCS;
658 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
659
660 netdev->min_mtu = ETH_MIN_MTU;
661 netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE -
662 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
663
664 /* make sure the EEPROM is good */
665 err = txgbe_validate_eeprom_checksum(wx, NULL);
666 if (err != 0) {
667 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
668 wr32(wx, WX_MIS_RST, WX_MIS_RST_SW_RST);
669 err = -EIO;
670 goto err_free_mac_table;
671 }
672
673 eth_hw_addr_set(netdev, wx->mac.perm_addr);
674 wx_mac_set_default_filter(wx, wx->mac.perm_addr);
675
676 err = wx_init_interrupt_scheme(wx);
677 if (err)
678 goto err_free_mac_table;
679
680 /* Save off EEPROM version number and Option Rom version which
681 * together make a unique identify for the eeprom
682 */
683 wx_read_ee_hostif(wx,
684 wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H,
685 &eeprom_verh);
686 wx_read_ee_hostif(wx,
687 wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L,
688 &eeprom_verl);
689 etrack_id = (eeprom_verh << 16) | eeprom_verl;
690
691 wx_read_ee_hostif(wx,
692 wx->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG,
693 &offset);
694
695 /* Make sure offset to SCSI block is valid */
696 if (!(offset == 0x0) && !(offset == 0xffff)) {
697 wx_read_ee_hostif(wx, offset + 0x84, &eeprom_cfg_blkh);
698 wx_read_ee_hostif(wx, offset + 0x83, &eeprom_cfg_blkl);
699
700 /* Only display Option Rom if exist */
701 if (eeprom_cfg_blkl && eeprom_cfg_blkh) {
702 major = eeprom_cfg_blkl >> 8;
703 build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8);
704 patch = eeprom_cfg_blkh & 0x00ff;
705
706 snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
707 "0x%08x, %d.%d.%d", etrack_id, major, build,
708 patch);
709 } else {
710 snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
711 "0x%08x", etrack_id);
712 }
713 } else {
714 snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
715 "0x%08x", etrack_id);
716 }
717
718 if (etrack_id < 0x20010)
719 dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n");
720
721 txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL);
722 if (!txgbe) {
723 err = -ENOMEM;
724 goto err_release_hw;
725 }
726
727 txgbe->wx = wx;
728 wx->priv = txgbe;
729
730 txgbe_init_fdir(txgbe);
731
732 err = txgbe_setup_misc_irq(txgbe);
733 if (err)
734 goto err_release_hw;
735
736 err = txgbe_init_phy(txgbe);
737 if (err)
738 goto err_free_misc_irq;
739
740 err = register_netdev(netdev);
741 if (err)
742 goto err_remove_phy;
743
744 pci_set_drvdata(pdev, wx);
745
746 netif_tx_stop_all_queues(netdev);
747
748 /* calculate the expected PCIe bandwidth required for optimal
749 * performance. Note that some older parts will never have enough
750 * bandwidth due to being older generation PCIe parts. We clamp these
751 * parts to ensure that no warning is displayed, as this could confuse
752 * users otherwise.
753 */
754 expected_gts = txgbe_enumerate_functions(wx) * 10;
755
756 /* don't check link if we failed to enumerate functions */
757 if (expected_gts > 0)
758 txgbe_check_minimum_link(wx);
759 else
760 dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n");
761
762 return 0;
763
764 err_remove_phy:
765 txgbe_remove_phy(txgbe);
766 err_free_misc_irq:
767 txgbe_free_misc_irq(txgbe);
768 err_release_hw:
769 wx_clear_interrupt_scheme(wx);
770 wx_control_hw(wx, false);
771 err_free_mac_table:
772 kfree(wx->rss_key);
773 kfree(wx->mac_table);
774 err_pci_release_regions:
775 pci_release_selected_regions(pdev,
776 pci_select_bars(pdev, IORESOURCE_MEM));
777 err_pci_disable_dev:
778 pci_disable_device(pdev);
779 return err;
780 }
781
782 /**
783 * txgbe_remove - Device Removal Routine
784 * @pdev: PCI device information struct
785 *
786 * txgbe_remove is called by the PCI subsystem to alert the driver
787 * that it should release a PCI device. The could be caused by a
788 * Hot-Plug event, or because the driver is going to be removed from
789 * memory.
790 **/
txgbe_remove(struct pci_dev * pdev)791 static void txgbe_remove(struct pci_dev *pdev)
792 {
793 struct wx *wx = pci_get_drvdata(pdev);
794 struct txgbe *txgbe = wx->priv;
795 struct net_device *netdev;
796
797 netdev = wx->netdev;
798 unregister_netdev(netdev);
799
800 txgbe_remove_phy(txgbe);
801 txgbe_free_misc_irq(txgbe);
802 wx_free_isb_resources(wx);
803
804 pci_release_selected_regions(pdev,
805 pci_select_bars(pdev, IORESOURCE_MEM));
806
807 kfree(wx->rss_key);
808 kfree(wx->mac_table);
809 wx_clear_interrupt_scheme(wx);
810
811 pci_disable_device(pdev);
812 }
813
814 static struct pci_driver txgbe_driver = {
815 .name = txgbe_driver_name,
816 .id_table = txgbe_pci_tbl,
817 .probe = txgbe_probe,
818 .remove = txgbe_remove,
819 .shutdown = txgbe_shutdown,
820 };
821
822 module_pci_driver(txgbe_driver);
823
824 MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
825 MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
826 MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver");
827 MODULE_LICENSE("GPL");
828