1 /*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/interrupt.h>
11
12 #include "qlcnic.h"
13
14 #include <linux/swab.h>
15 #include <linux/dma-mapping.h>
16 #include <net/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/inetdevice.h>
19 #include <linux/sysfs.h>
20 #include <linux/aer.h>
21 #include <linux/log2.h>
22
23 MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
24 MODULE_LICENSE("GPL");
25 MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26 MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
27
28 char qlcnic_driver_name[] = "qlcnic";
29 static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
31
32 static struct workqueue_struct *qlcnic_wq;
33 static int qlcnic_mac_learn;
34 module_param(qlcnic_mac_learn, int, 0444);
35 MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36
37 static int use_msi = 1;
38 module_param(use_msi, int, 0444);
39 MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40
41 static int use_msi_x = 1;
42 module_param(use_msi_x, int, 0444);
43 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44
45 static int auto_fw_reset = 1;
46 module_param(auto_fw_reset, int, 0644);
47 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48
49 static int load_fw_file;
50 module_param(load_fw_file, int, 0444);
51 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52
53 static int qlcnic_config_npars;
54 module_param(qlcnic_config_npars, int, 0444);
55 MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56
57 static int __devinit qlcnic_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent);
59 static void __devexit qlcnic_remove(struct pci_dev *pdev);
60 static int qlcnic_open(struct net_device *netdev);
61 static int qlcnic_close(struct net_device *netdev);
62 static void qlcnic_tx_timeout(struct net_device *netdev);
63 static void qlcnic_attach_work(struct work_struct *work);
64 static void qlcnic_fwinit_work(struct work_struct *work);
65 static void qlcnic_fw_poll_work(struct work_struct *work);
66 static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay);
68 static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69 static int qlcnic_poll(struct napi_struct *napi, int budget);
70 static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
71 #ifdef CONFIG_NET_POLL_CONTROLLER
72 static void qlcnic_poll_controller(struct net_device *netdev);
73 #endif
74
75 static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76 static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77 static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78 static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
79
80 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
81 static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
82 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
83
84 static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
85 static irqreturn_t qlcnic_intr(int irq, void *data);
86 static irqreturn_t qlcnic_msi_intr(int irq, void *data);
87 static irqreturn_t qlcnic_msix_intr(int irq, void *data);
88
89 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
90 static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
91 static int qlcnic_start_firmware(struct qlcnic_adapter *);
92
93 static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
94 static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
95 static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
96 static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
97 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
98 static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
99 struct qlcnic_esw_func_cfg *);
100 static int qlcnic_vlan_rx_add(struct net_device *, u16);
101 static int qlcnic_vlan_rx_del(struct net_device *, u16);
102
103 /* PCI Device ID Table */
104 #define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
106 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
107
108 #define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
109
110 static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
112 {0,}
113 };
114
115 MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
116
117
118 inline void
qlcnic_update_cmd_producer(struct qlcnic_adapter * adapter,struct qlcnic_host_tx_ring * tx_ring)119 qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring)
121 {
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
123 }
124
125 static const u32 msi_tgt_status[8] = {
126 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
127 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
128 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130 };
131
132 static const
133 struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
134
qlcnic_disable_int(struct qlcnic_host_sds_ring * sds_ring)135 static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
136 {
137 writel(0, sds_ring->crb_intr_mask);
138 }
139
qlcnic_enable_int(struct qlcnic_host_sds_ring * sds_ring)140 static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
141 {
142 struct qlcnic_adapter *adapter = sds_ring->adapter;
143
144 writel(0x1, sds_ring->crb_intr_mask);
145
146 if (!QLCNIC_IS_MSI_FAMILY(adapter))
147 writel(0xfbff, adapter->tgt_mask_reg);
148 }
149
150 static int
qlcnic_alloc_sds_rings(struct qlcnic_recv_context * recv_ctx,int count)151 qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152 {
153 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154
155 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
156
157 return recv_ctx->sds_rings == NULL;
158 }
159
160 static void
qlcnic_free_sds_rings(struct qlcnic_recv_context * recv_ctx)161 qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162 {
163 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings);
165
166 recv_ctx->sds_rings = NULL;
167 }
168
169 static int
qlcnic_napi_add(struct qlcnic_adapter * adapter,struct net_device * netdev)170 qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171 {
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
174 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
181
182 if (ring == adapter->max_sds_rings - 1)
183 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
184 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
185 else
186 netif_napi_add(netdev, &sds_ring->napi,
187 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
188 }
189
190 return 0;
191 }
192
193 static void
qlcnic_napi_del(struct qlcnic_adapter * adapter)194 qlcnic_napi_del(struct qlcnic_adapter *adapter)
195 {
196 int ring;
197 struct qlcnic_host_sds_ring *sds_ring;
198 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
199
200 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
201 sds_ring = &recv_ctx->sds_rings[ring];
202 netif_napi_del(&sds_ring->napi);
203 }
204
205 qlcnic_free_sds_rings(adapter->recv_ctx);
206 }
207
208 static void
qlcnic_napi_enable(struct qlcnic_adapter * adapter)209 qlcnic_napi_enable(struct qlcnic_adapter *adapter)
210 {
211 int ring;
212 struct qlcnic_host_sds_ring *sds_ring;
213 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
214
215 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
216 return;
217
218 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
219 sds_ring = &recv_ctx->sds_rings[ring];
220 napi_enable(&sds_ring->napi);
221 qlcnic_enable_int(sds_ring);
222 }
223 }
224
225 static void
qlcnic_napi_disable(struct qlcnic_adapter * adapter)226 qlcnic_napi_disable(struct qlcnic_adapter *adapter)
227 {
228 int ring;
229 struct qlcnic_host_sds_ring *sds_ring;
230 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
231
232 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
233 return;
234
235 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
236 sds_ring = &recv_ctx->sds_rings[ring];
237 qlcnic_disable_int(sds_ring);
238 napi_synchronize(&sds_ring->napi);
239 napi_disable(&sds_ring->napi);
240 }
241 }
242
qlcnic_clear_stats(struct qlcnic_adapter * adapter)243 static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
244 {
245 memset(&adapter->stats, 0, sizeof(adapter->stats));
246 }
247
qlcnic_set_msix_bit(struct pci_dev * pdev,int enable)248 static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
249 {
250 u32 control;
251 int pos;
252
253 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
254 if (pos) {
255 pci_read_config_dword(pdev, pos, &control);
256 if (enable)
257 control |= PCI_MSIX_FLAGS_ENABLE;
258 else
259 control = 0;
260 pci_write_config_dword(pdev, pos, control);
261 }
262 }
263
qlcnic_init_msix_entries(struct qlcnic_adapter * adapter,int count)264 static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
265 {
266 int i;
267
268 for (i = 0; i < count; i++)
269 adapter->msix_entries[i].entry = i;
270 }
271
272 static int
qlcnic_read_mac_addr(struct qlcnic_adapter * adapter)273 qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
274 {
275 u8 mac_addr[ETH_ALEN];
276 struct net_device *netdev = adapter->netdev;
277 struct pci_dev *pdev = adapter->pdev;
278
279 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
280 return -EIO;
281
282 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
283 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
284 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
285
286 /* set station address */
287
288 if (!is_valid_ether_addr(netdev->perm_addr))
289 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
290 netdev->dev_addr);
291
292 return 0;
293 }
294
qlcnic_set_mac(struct net_device * netdev,void * p)295 static int qlcnic_set_mac(struct net_device *netdev, void *p)
296 {
297 struct qlcnic_adapter *adapter = netdev_priv(netdev);
298 struct sockaddr *addr = p;
299
300 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
301 return -EOPNOTSUPP;
302
303 if (!is_valid_ether_addr(addr->sa_data))
304 return -EINVAL;
305
306 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
307 netif_device_detach(netdev);
308 qlcnic_napi_disable(adapter);
309 }
310
311 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
312 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
313 qlcnic_set_multi(adapter->netdev);
314
315 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
316 netif_device_attach(netdev);
317 qlcnic_napi_enable(adapter);
318 }
319 return 0;
320 }
321
322 static const struct net_device_ops qlcnic_netdev_ops = {
323 .ndo_open = qlcnic_open,
324 .ndo_stop = qlcnic_close,
325 .ndo_start_xmit = qlcnic_xmit_frame,
326 .ndo_get_stats = qlcnic_get_stats,
327 .ndo_validate_addr = eth_validate_addr,
328 .ndo_set_rx_mode = qlcnic_set_multi,
329 .ndo_set_mac_address = qlcnic_set_mac,
330 .ndo_change_mtu = qlcnic_change_mtu,
331 .ndo_fix_features = qlcnic_fix_features,
332 .ndo_set_features = qlcnic_set_features,
333 .ndo_tx_timeout = qlcnic_tx_timeout,
334 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
335 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
336 #ifdef CONFIG_NET_POLL_CONTROLLER
337 .ndo_poll_controller = qlcnic_poll_controller,
338 #endif
339 };
340
341 static struct qlcnic_nic_template qlcnic_ops = {
342 .config_bridged_mode = qlcnic_config_bridged_mode,
343 .config_led = qlcnic_config_led,
344 .start_firmware = qlcnic_start_firmware
345 };
346
347 static struct qlcnic_nic_template qlcnic_vf_ops = {
348 .config_bridged_mode = qlcnicvf_config_bridged_mode,
349 .config_led = qlcnicvf_config_led,
350 .start_firmware = qlcnicvf_start_firmware
351 };
352
qlcnic_enable_msix(struct qlcnic_adapter * adapter,u32 num_msix)353 static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
354 {
355 struct pci_dev *pdev = adapter->pdev;
356 int err = -1;
357
358 adapter->max_sds_rings = 1;
359 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
360 qlcnic_set_msix_bit(pdev, 0);
361
362 if (adapter->msix_supported) {
363 enable_msix:
364 qlcnic_init_msix_entries(adapter, num_msix);
365 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
366 if (err == 0) {
367 adapter->flags |= QLCNIC_MSIX_ENABLED;
368 qlcnic_set_msix_bit(pdev, 1);
369
370 adapter->max_sds_rings = num_msix;
371
372 dev_info(&pdev->dev, "using msi-x interrupts\n");
373 return err;
374 }
375 if (err > 0) {
376 num_msix = rounddown_pow_of_two(err);
377 if (num_msix)
378 goto enable_msix;
379 }
380 }
381 return err;
382 }
383
384
qlcnic_enable_msi_legacy(struct qlcnic_adapter * adapter)385 static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
386 {
387 const struct qlcnic_legacy_intr_set *legacy_intrp;
388 struct pci_dev *pdev = adapter->pdev;
389
390 if (use_msi && !pci_enable_msi(pdev)) {
391 adapter->flags |= QLCNIC_MSI_ENABLED;
392 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
393 msi_tgt_status[adapter->ahw->pci_func]);
394 dev_info(&pdev->dev, "using msi interrupts\n");
395 adapter->msix_entries[0].vector = pdev->irq;
396 return;
397 }
398
399 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
400
401 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
402 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
403 legacy_intrp->tgt_status_reg);
404 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
405 legacy_intrp->tgt_mask_reg);
406 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
407
408 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
409 ISR_INT_STATE_REG);
410 dev_info(&pdev->dev, "using legacy interrupts\n");
411 adapter->msix_entries[0].vector = pdev->irq;
412 }
413
414 static void
qlcnic_setup_intr(struct qlcnic_adapter * adapter)415 qlcnic_setup_intr(struct qlcnic_adapter *adapter)
416 {
417 int num_msix;
418
419 if (adapter->msix_supported) {
420 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
421 QLCNIC_DEF_NUM_STS_DESC_RINGS));
422 } else
423 num_msix = 1;
424
425 if (!qlcnic_enable_msix(adapter, num_msix))
426 return;
427
428 qlcnic_enable_msi_legacy(adapter);
429 }
430
431 static void
qlcnic_teardown_intr(struct qlcnic_adapter * adapter)432 qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
433 {
434 if (adapter->flags & QLCNIC_MSIX_ENABLED)
435 pci_disable_msix(adapter->pdev);
436 if (adapter->flags & QLCNIC_MSI_ENABLED)
437 pci_disable_msi(adapter->pdev);
438 }
439
440 static void
qlcnic_cleanup_pci_map(struct qlcnic_adapter * adapter)441 qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
442 {
443 if (adapter->ahw->pci_base0 != NULL)
444 iounmap(adapter->ahw->pci_base0);
445 }
446
447 static int
qlcnic_init_pci_info(struct qlcnic_adapter * adapter)448 qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
449 {
450 struct qlcnic_pci_info *pci_info;
451 int i, ret = 0;
452 u8 pfn;
453
454 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
455 if (!pci_info)
456 return -ENOMEM;
457
458 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
459 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
460 if (!adapter->npars) {
461 ret = -ENOMEM;
462 goto err_pci_info;
463 }
464
465 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
466 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
467 if (!adapter->eswitch) {
468 ret = -ENOMEM;
469 goto err_npars;
470 }
471
472 ret = qlcnic_get_pci_info(adapter, pci_info);
473 if (ret)
474 goto err_eswitch;
475
476 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
477 pfn = pci_info[i].id;
478 if (pfn > QLCNIC_MAX_PCI_FUNC) {
479 ret = QL_STATUS_INVALID_PARAM;
480 goto err_eswitch;
481 }
482 adapter->npars[pfn].active = (u8)pci_info[i].active;
483 adapter->npars[pfn].type = (u8)pci_info[i].type;
484 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
485 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
486 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
487 }
488
489 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
490 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
491
492 kfree(pci_info);
493 return 0;
494
495 err_eswitch:
496 kfree(adapter->eswitch);
497 adapter->eswitch = NULL;
498 err_npars:
499 kfree(adapter->npars);
500 adapter->npars = NULL;
501 err_pci_info:
502 kfree(pci_info);
503
504 return ret;
505 }
506
507 static int
qlcnic_set_function_modes(struct qlcnic_adapter * adapter)508 qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
509 {
510 u8 id;
511 u32 ref_count;
512 int i, ret = 1;
513 u32 data = QLCNIC_MGMT_FUNC;
514 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
515
516 /* If other drivers are not in use set their privilege level */
517 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
518 ret = qlcnic_api_lock(adapter);
519 if (ret)
520 goto err_lock;
521
522 if (qlcnic_config_npars) {
523 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
524 id = i;
525 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
526 id == adapter->ahw->pci_func)
527 continue;
528 data |= (qlcnic_config_npars &
529 QLC_DEV_SET_DRV(0xf, id));
530 }
531 } else {
532 data = readl(priv_op);
533 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
534 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
535 adapter->ahw->pci_func));
536 }
537 writel(data, priv_op);
538 qlcnic_api_unlock(adapter);
539 err_lock:
540 return ret;
541 }
542
543 static void
qlcnic_check_vf(struct qlcnic_adapter * adapter)544 qlcnic_check_vf(struct qlcnic_adapter *adapter)
545 {
546 void __iomem *msix_base_addr;
547 void __iomem *priv_op;
548 u32 func;
549 u32 msix_base;
550 u32 op_mode, priv_level;
551
552 /* Determine FW API version */
553 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
554 QLCNIC_FW_API);
555
556 /* Find PCI function number */
557 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
558 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
559 msix_base = readl(msix_base_addr);
560 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
561 adapter->ahw->pci_func = func;
562
563 /* Determine function privilege level */
564 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
565 op_mode = readl(priv_op);
566 if (op_mode == QLC_DEV_DRV_DEFAULT)
567 priv_level = QLCNIC_MGMT_FUNC;
568 else
569 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
570
571 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
572 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
573 dev_info(&adapter->pdev->dev,
574 "HAL Version: %d Non Privileged function\n",
575 adapter->fw_hal_version);
576 adapter->nic_ops = &qlcnic_vf_ops;
577 } else
578 adapter->nic_ops = &qlcnic_ops;
579 }
580
581 static int
qlcnic_setup_pci_map(struct qlcnic_adapter * adapter)582 qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
583 {
584 void __iomem *mem_ptr0 = NULL;
585 resource_size_t mem_base;
586 unsigned long mem_len, pci_len0 = 0;
587
588 struct pci_dev *pdev = adapter->pdev;
589
590 /* remap phys address */
591 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
592 mem_len = pci_resource_len(pdev, 0);
593
594 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
595
596 mem_ptr0 = pci_ioremap_bar(pdev, 0);
597 if (mem_ptr0 == NULL) {
598 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
599 return -EIO;
600 }
601 pci_len0 = mem_len;
602 } else {
603 return -EIO;
604 }
605
606 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
607
608 adapter->ahw->pci_base0 = mem_ptr0;
609 adapter->ahw->pci_len0 = pci_len0;
610
611 qlcnic_check_vf(adapter);
612
613 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
614 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
615 adapter->ahw->pci_func)));
616
617 return 0;
618 }
619
get_brd_name(struct qlcnic_adapter * adapter,char * name)620 static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
621 {
622 struct pci_dev *pdev = adapter->pdev;
623 int i, found = 0;
624
625 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
626 if (qlcnic_boards[i].vendor == pdev->vendor &&
627 qlcnic_boards[i].device == pdev->device &&
628 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
629 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
630 sprintf(name, "%pM: %s" ,
631 adapter->mac_addr,
632 qlcnic_boards[i].short_name);
633 found = 1;
634 break;
635 }
636
637 }
638
639 if (!found)
640 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
641 }
642
643 static void
qlcnic_check_options(struct qlcnic_adapter * adapter)644 qlcnic_check_options(struct qlcnic_adapter *adapter)
645 {
646 u32 fw_major, fw_minor, fw_build, prev_fw_version;
647 struct pci_dev *pdev = adapter->pdev;
648 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
649
650 prev_fw_version = adapter->fw_version;
651
652 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
653 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
654 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
655
656 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
657
658 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) {
659 if (fw_dump->tmpl_hdr == NULL ||
660 adapter->fw_version > prev_fw_version) {
661 if (fw_dump->tmpl_hdr)
662 vfree(fw_dump->tmpl_hdr);
663 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
664 dev_info(&pdev->dev,
665 "Supports FW dump capability\n");
666 }
667 }
668
669 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
670 fw_major, fw_minor, fw_build);
671 if (adapter->ahw->port_type == QLCNIC_XGBE) {
672 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
673 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
674 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
675 } else {
676 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
677 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
678 }
679
680 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
681 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
682
683 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
684 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
685 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
686 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
687 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
688 }
689
690 adapter->msix_supported = !!use_msi_x;
691
692 adapter->num_txd = MAX_CMD_DESCRIPTORS;
693
694 adapter->max_rds_rings = MAX_RDS_RINGS;
695 }
696
697 static int
qlcnic_initialize_nic(struct qlcnic_adapter * adapter)698 qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
699 {
700 int err;
701 struct qlcnic_info nic_info;
702
703 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
704 if (err)
705 return err;
706
707 adapter->physical_port = (u8)nic_info.phys_port;
708 adapter->switch_mode = nic_info.switch_mode;
709 adapter->max_tx_ques = nic_info.max_tx_ques;
710 adapter->max_rx_ques = nic_info.max_rx_ques;
711 adapter->capabilities = nic_info.capabilities;
712 adapter->max_mac_filters = nic_info.max_mac_filters;
713 adapter->max_mtu = nic_info.max_mtu;
714
715 if (adapter->capabilities & BIT_6)
716 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
717 else
718 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
719
720 return err;
721 }
722
723 static void
qlcnic_set_vlan_config(struct qlcnic_adapter * adapter,struct qlcnic_esw_func_cfg * esw_cfg)724 qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
725 struct qlcnic_esw_func_cfg *esw_cfg)
726 {
727 if (esw_cfg->discard_tagged)
728 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
729 else
730 adapter->flags |= QLCNIC_TAGGING_ENABLED;
731
732 if (esw_cfg->vlan_id)
733 adapter->pvid = esw_cfg->vlan_id;
734 else
735 adapter->pvid = 0;
736 }
737
738 static int
qlcnic_vlan_rx_add(struct net_device * netdev,u16 vid)739 qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
740 {
741 struct qlcnic_adapter *adapter = netdev_priv(netdev);
742 set_bit(vid, adapter->vlans);
743 return 0;
744 }
745
746 static int
qlcnic_vlan_rx_del(struct net_device * netdev,u16 vid)747 qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
748 {
749 struct qlcnic_adapter *adapter = netdev_priv(netdev);
750
751 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
752 clear_bit(vid, adapter->vlans);
753 return 0;
754 }
755
756 static void
qlcnic_set_eswitch_port_features(struct qlcnic_adapter * adapter,struct qlcnic_esw_func_cfg * esw_cfg)757 qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
758 struct qlcnic_esw_func_cfg *esw_cfg)
759 {
760 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
761 QLCNIC_PROMISC_DISABLED);
762
763 if (esw_cfg->mac_anti_spoof)
764 adapter->flags |= QLCNIC_MACSPOOF;
765
766 if (!esw_cfg->mac_override)
767 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
768
769 if (!esw_cfg->promisc_mode)
770 adapter->flags |= QLCNIC_PROMISC_DISABLED;
771
772 qlcnic_set_netdev_features(adapter, esw_cfg);
773 }
774
775 static int
qlcnic_set_eswitch_port_config(struct qlcnic_adapter * adapter)776 qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
777 {
778 struct qlcnic_esw_func_cfg esw_cfg;
779
780 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
781 return 0;
782
783 esw_cfg.pci_func = adapter->ahw->pci_func;
784 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
785 return -EIO;
786 qlcnic_set_vlan_config(adapter, &esw_cfg);
787 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
788
789 return 0;
790 }
791
792 static void
qlcnic_set_netdev_features(struct qlcnic_adapter * adapter,struct qlcnic_esw_func_cfg * esw_cfg)793 qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
794 struct qlcnic_esw_func_cfg *esw_cfg)
795 {
796 struct net_device *netdev = adapter->netdev;
797 netdev_features_t features, vlan_features;
798
799 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
800 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
801 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
802 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
803
804 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
805 features |= (NETIF_F_TSO | NETIF_F_TSO6);
806 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
807 }
808
809 if (netdev->features & NETIF_F_LRO)
810 features |= NETIF_F_LRO;
811
812 if (esw_cfg->offload_flags & BIT_0) {
813 netdev->features |= features;
814 if (!(esw_cfg->offload_flags & BIT_1))
815 netdev->features &= ~NETIF_F_TSO;
816 if (!(esw_cfg->offload_flags & BIT_2))
817 netdev->features &= ~NETIF_F_TSO6;
818 } else {
819 netdev->features &= ~features;
820 }
821
822 netdev->vlan_features = (features & vlan_features);
823 }
824
825 static int
qlcnic_check_eswitch_mode(struct qlcnic_adapter * adapter)826 qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
827 {
828 void __iomem *priv_op;
829 u32 op_mode, priv_level;
830 int err = 0;
831
832 err = qlcnic_initialize_nic(adapter);
833 if (err)
834 return err;
835
836 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
837 return 0;
838
839 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
840 op_mode = readl(priv_op);
841 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
842
843 if (op_mode == QLC_DEV_DRV_DEFAULT)
844 priv_level = QLCNIC_MGMT_FUNC;
845 else
846 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
847
848 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
849 if (priv_level == QLCNIC_MGMT_FUNC) {
850 adapter->op_mode = QLCNIC_MGMT_FUNC;
851 err = qlcnic_init_pci_info(adapter);
852 if (err)
853 return err;
854 /* Set privilege level for other functions */
855 qlcnic_set_function_modes(adapter);
856 dev_info(&adapter->pdev->dev,
857 "HAL Version: %d, Management function\n",
858 adapter->fw_hal_version);
859 } else if (priv_level == QLCNIC_PRIV_FUNC) {
860 adapter->op_mode = QLCNIC_PRIV_FUNC;
861 dev_info(&adapter->pdev->dev,
862 "HAL Version: %d, Privileged function\n",
863 adapter->fw_hal_version);
864 }
865 }
866
867 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
868
869 return err;
870 }
871
872 static int
qlcnic_set_default_offload_settings(struct qlcnic_adapter * adapter)873 qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
874 {
875 struct qlcnic_esw_func_cfg esw_cfg;
876 struct qlcnic_npar_info *npar;
877 u8 i;
878
879 if (adapter->need_fw_reset)
880 return 0;
881
882 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
883 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
884 continue;
885 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
886 esw_cfg.pci_func = i;
887 esw_cfg.offload_flags = BIT_0;
888 esw_cfg.mac_override = BIT_0;
889 esw_cfg.promisc_mode = BIT_0;
890 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
891 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
892 if (qlcnic_config_switch_port(adapter, &esw_cfg))
893 return -EIO;
894 npar = &adapter->npars[i];
895 npar->pvid = esw_cfg.vlan_id;
896 npar->mac_override = esw_cfg.mac_override;
897 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
898 npar->discard_tagged = esw_cfg.discard_tagged;
899 npar->promisc_mode = esw_cfg.promisc_mode;
900 npar->offload_flags = esw_cfg.offload_flags;
901 }
902
903 return 0;
904 }
905
906 static int
qlcnic_reset_eswitch_config(struct qlcnic_adapter * adapter,struct qlcnic_npar_info * npar,int pci_func)907 qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
908 struct qlcnic_npar_info *npar, int pci_func)
909 {
910 struct qlcnic_esw_func_cfg esw_cfg;
911 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
912 esw_cfg.pci_func = pci_func;
913 esw_cfg.vlan_id = npar->pvid;
914 esw_cfg.mac_override = npar->mac_override;
915 esw_cfg.discard_tagged = npar->discard_tagged;
916 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
917 esw_cfg.offload_flags = npar->offload_flags;
918 esw_cfg.promisc_mode = npar->promisc_mode;
919 if (qlcnic_config_switch_port(adapter, &esw_cfg))
920 return -EIO;
921
922 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
923 if (qlcnic_config_switch_port(adapter, &esw_cfg))
924 return -EIO;
925
926 return 0;
927 }
928
929 static int
qlcnic_reset_npar_config(struct qlcnic_adapter * adapter)930 qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
931 {
932 int i, err;
933 struct qlcnic_npar_info *npar;
934 struct qlcnic_info nic_info;
935
936 if (!adapter->need_fw_reset)
937 return 0;
938
939 /* Set the NPAR config data after FW reset */
940 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
941 npar = &adapter->npars[i];
942 if (npar->type != QLCNIC_TYPE_NIC)
943 continue;
944 err = qlcnic_get_nic_info(adapter, &nic_info, i);
945 if (err)
946 return err;
947 nic_info.min_tx_bw = npar->min_bw;
948 nic_info.max_tx_bw = npar->max_bw;
949 err = qlcnic_set_nic_info(adapter, &nic_info);
950 if (err)
951 return err;
952
953 if (npar->enable_pm) {
954 err = qlcnic_config_port_mirroring(adapter,
955 npar->dest_npar, 1, i);
956 if (err)
957 return err;
958 }
959 err = qlcnic_reset_eswitch_config(adapter, npar, i);
960 if (err)
961 return err;
962 }
963 return 0;
964 }
965
qlcnic_check_npar_opertional(struct qlcnic_adapter * adapter)966 static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
967 {
968 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
969 u32 npar_state;
970
971 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
972 return 0;
973
974 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
975 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
976 msleep(1000);
977 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
978 }
979 if (!npar_opt_timeo) {
980 dev_err(&adapter->pdev->dev,
981 "Waiting for NPAR state to opertional timeout\n");
982 return -EIO;
983 }
984 return 0;
985 }
986
987 static int
qlcnic_set_mgmt_operations(struct qlcnic_adapter * adapter)988 qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
989 {
990 int err;
991
992 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
993 adapter->op_mode != QLCNIC_MGMT_FUNC)
994 return 0;
995
996 err = qlcnic_set_default_offload_settings(adapter);
997 if (err)
998 return err;
999
1000 err = qlcnic_reset_npar_config(adapter);
1001 if (err)
1002 return err;
1003
1004 qlcnic_dev_set_npar_ready(adapter);
1005
1006 return err;
1007 }
1008
1009 static int
qlcnic_start_firmware(struct qlcnic_adapter * adapter)1010 qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1011 {
1012 int err;
1013
1014 err = qlcnic_can_start_firmware(adapter);
1015 if (err < 0)
1016 return err;
1017 else if (!err)
1018 goto check_fw_status;
1019
1020 if (load_fw_file)
1021 qlcnic_request_firmware(adapter);
1022 else {
1023 err = qlcnic_check_flash_fw_ver(adapter);
1024 if (err)
1025 goto err_out;
1026
1027 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
1028 }
1029
1030 err = qlcnic_need_fw_reset(adapter);
1031 if (err == 0)
1032 goto check_fw_status;
1033
1034 err = qlcnic_pinit_from_rom(adapter);
1035 if (err)
1036 goto err_out;
1037
1038 err = qlcnic_load_firmware(adapter);
1039 if (err)
1040 goto err_out;
1041
1042 qlcnic_release_firmware(adapter);
1043 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
1044
1045 check_fw_status:
1046 err = qlcnic_check_fw_status(adapter);
1047 if (err)
1048 goto err_out;
1049
1050 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
1051 qlcnic_idc_debug_info(adapter, 1);
1052
1053 err = qlcnic_check_eswitch_mode(adapter);
1054 if (err) {
1055 dev_err(&adapter->pdev->dev,
1056 "Memory allocation failed for eswitch\n");
1057 goto err_out;
1058 }
1059 err = qlcnic_set_mgmt_operations(adapter);
1060 if (err)
1061 goto err_out;
1062
1063 qlcnic_check_options(adapter);
1064 adapter->need_fw_reset = 0;
1065
1066 qlcnic_release_firmware(adapter);
1067 return 0;
1068
1069 err_out:
1070 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1071 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
1072
1073 qlcnic_release_firmware(adapter);
1074 return err;
1075 }
1076
1077 static int
qlcnic_request_irq(struct qlcnic_adapter * adapter)1078 qlcnic_request_irq(struct qlcnic_adapter *adapter)
1079 {
1080 irq_handler_t handler;
1081 struct qlcnic_host_sds_ring *sds_ring;
1082 int err, ring;
1083
1084 unsigned long flags = 0;
1085 struct net_device *netdev = adapter->netdev;
1086 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1087
1088 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1089 handler = qlcnic_tmp_intr;
1090 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1091 flags |= IRQF_SHARED;
1092
1093 } else {
1094 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1095 handler = qlcnic_msix_intr;
1096 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1097 handler = qlcnic_msi_intr;
1098 else {
1099 flags |= IRQF_SHARED;
1100 handler = qlcnic_intr;
1101 }
1102 }
1103 adapter->irq = netdev->irq;
1104
1105 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1106 sds_ring = &recv_ctx->sds_rings[ring];
1107 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1108 err = request_irq(sds_ring->irq, handler,
1109 flags, sds_ring->name, sds_ring);
1110 if (err)
1111 return err;
1112 }
1113
1114 return 0;
1115 }
1116
1117 static void
qlcnic_free_irq(struct qlcnic_adapter * adapter)1118 qlcnic_free_irq(struct qlcnic_adapter *adapter)
1119 {
1120 int ring;
1121 struct qlcnic_host_sds_ring *sds_ring;
1122
1123 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1124
1125 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1126 sds_ring = &recv_ctx->sds_rings[ring];
1127 free_irq(sds_ring->irq, sds_ring);
1128 }
1129 }
1130
1131 static int
__qlcnic_up(struct qlcnic_adapter * adapter,struct net_device * netdev)1132 __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1133 {
1134 int ring;
1135 struct qlcnic_host_rds_ring *rds_ring;
1136
1137 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1138 return -EIO;
1139
1140 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1141 return 0;
1142 if (qlcnic_set_eswitch_port_config(adapter))
1143 return -EIO;
1144
1145 if (qlcnic_fw_create_ctx(adapter))
1146 return -EIO;
1147
1148 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1149 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1150 qlcnic_post_rx_buffers(adapter, rds_ring);
1151 }
1152
1153 qlcnic_set_multi(netdev);
1154 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1155
1156 adapter->ahw->linkup = 0;
1157
1158 if (adapter->max_sds_rings > 1)
1159 qlcnic_config_rss(adapter, 1);
1160
1161 qlcnic_config_intr_coalesce(adapter);
1162
1163 if (netdev->features & NETIF_F_LRO)
1164 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1165
1166 qlcnic_napi_enable(adapter);
1167
1168 qlcnic_linkevent_request(adapter, 1);
1169
1170 adapter->reset_context = 0;
1171 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1172 return 0;
1173 }
1174
1175 /* Usage: During resume and firmware recovery module.*/
1176
1177 static int
qlcnic_up(struct qlcnic_adapter * adapter,struct net_device * netdev)1178 qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1179 {
1180 int err = 0;
1181
1182 rtnl_lock();
1183 if (netif_running(netdev))
1184 err = __qlcnic_up(adapter, netdev);
1185 rtnl_unlock();
1186
1187 return err;
1188 }
1189
1190 static void
__qlcnic_down(struct qlcnic_adapter * adapter,struct net_device * netdev)1191 __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1192 {
1193 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1194 return;
1195
1196 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1197 return;
1198
1199 smp_mb();
1200 spin_lock(&adapter->tx_clean_lock);
1201 netif_carrier_off(netdev);
1202 netif_tx_disable(netdev);
1203
1204 qlcnic_free_mac_list(adapter);
1205
1206 if (adapter->fhash.fnum)
1207 qlcnic_delete_lb_filters(adapter);
1208
1209 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1210
1211 qlcnic_napi_disable(adapter);
1212
1213 qlcnic_fw_destroy_ctx(adapter);
1214
1215 qlcnic_reset_rx_buffers_list(adapter);
1216 qlcnic_release_tx_buffers(adapter);
1217 spin_unlock(&adapter->tx_clean_lock);
1218 }
1219
1220 /* Usage: During suspend and firmware recovery module */
1221
1222 static void
qlcnic_down(struct qlcnic_adapter * adapter,struct net_device * netdev)1223 qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1224 {
1225 rtnl_lock();
1226 if (netif_running(netdev))
1227 __qlcnic_down(adapter, netdev);
1228 rtnl_unlock();
1229
1230 }
1231
1232 static int
qlcnic_attach(struct qlcnic_adapter * adapter)1233 qlcnic_attach(struct qlcnic_adapter *adapter)
1234 {
1235 struct net_device *netdev = adapter->netdev;
1236 struct pci_dev *pdev = adapter->pdev;
1237 int err;
1238
1239 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1240 return 0;
1241
1242 err = qlcnic_napi_add(adapter, netdev);
1243 if (err)
1244 return err;
1245
1246 err = qlcnic_alloc_sw_resources(adapter);
1247 if (err) {
1248 dev_err(&pdev->dev, "Error in setting sw resources\n");
1249 goto err_out_napi_del;
1250 }
1251
1252 err = qlcnic_alloc_hw_resources(adapter);
1253 if (err) {
1254 dev_err(&pdev->dev, "Error in setting hw resources\n");
1255 goto err_out_free_sw;
1256 }
1257
1258 err = qlcnic_request_irq(adapter);
1259 if (err) {
1260 dev_err(&pdev->dev, "failed to setup interrupt\n");
1261 goto err_out_free_hw;
1262 }
1263
1264 qlcnic_create_sysfs_entries(adapter);
1265
1266 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1267 return 0;
1268
1269 err_out_free_hw:
1270 qlcnic_free_hw_resources(adapter);
1271 err_out_free_sw:
1272 qlcnic_free_sw_resources(adapter);
1273 err_out_napi_del:
1274 qlcnic_napi_del(adapter);
1275 return err;
1276 }
1277
1278 static void
qlcnic_detach(struct qlcnic_adapter * adapter)1279 qlcnic_detach(struct qlcnic_adapter *adapter)
1280 {
1281 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1282 return;
1283
1284 qlcnic_remove_sysfs_entries(adapter);
1285
1286 qlcnic_free_hw_resources(adapter);
1287 qlcnic_release_rx_buffers(adapter);
1288 qlcnic_free_irq(adapter);
1289 qlcnic_napi_del(adapter);
1290 qlcnic_free_sw_resources(adapter);
1291
1292 adapter->is_up = 0;
1293 }
1294
qlcnic_diag_free_res(struct net_device * netdev,int max_sds_rings)1295 void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1296 {
1297 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1298 struct qlcnic_host_sds_ring *sds_ring;
1299 int ring;
1300
1301 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1302 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1303 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1304 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1305 qlcnic_disable_int(sds_ring);
1306 }
1307 }
1308
1309 qlcnic_fw_destroy_ctx(adapter);
1310
1311 qlcnic_detach(adapter);
1312
1313 adapter->diag_test = 0;
1314 adapter->max_sds_rings = max_sds_rings;
1315
1316 if (qlcnic_attach(adapter))
1317 goto out;
1318
1319 if (netif_running(netdev))
1320 __qlcnic_up(adapter, netdev);
1321 out:
1322 netif_device_attach(netdev);
1323 }
1324
qlcnic_alloc_adapter_resources(struct qlcnic_adapter * adapter)1325 static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1326 {
1327 int err = 0;
1328 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1329 GFP_KERNEL);
1330 if (!adapter->ahw) {
1331 dev_err(&adapter->pdev->dev,
1332 "Failed to allocate recv ctx resources for adapter\n");
1333 err = -ENOMEM;
1334 goto err_out;
1335 }
1336 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1337 GFP_KERNEL);
1338 if (!adapter->recv_ctx) {
1339 dev_err(&adapter->pdev->dev,
1340 "Failed to allocate recv ctx resources for adapter\n");
1341 kfree(adapter->ahw);
1342 adapter->ahw = NULL;
1343 err = -ENOMEM;
1344 goto err_out;
1345 }
1346 /* Initialize interrupt coalesce parameters */
1347 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1348 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1349 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1350 err_out:
1351 return err;
1352 }
1353
qlcnic_free_adapter_resources(struct qlcnic_adapter * adapter)1354 static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1355 {
1356 kfree(adapter->recv_ctx);
1357 adapter->recv_ctx = NULL;
1358
1359 if (adapter->ahw->fw_dump.tmpl_hdr) {
1360 vfree(adapter->ahw->fw_dump.tmpl_hdr);
1361 adapter->ahw->fw_dump.tmpl_hdr = NULL;
1362 }
1363 kfree(adapter->ahw);
1364 adapter->ahw = NULL;
1365 }
1366
qlcnic_diag_alloc_res(struct net_device * netdev,int test)1367 int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1368 {
1369 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1370 struct qlcnic_host_sds_ring *sds_ring;
1371 struct qlcnic_host_rds_ring *rds_ring;
1372 int ring;
1373 int ret;
1374
1375 netif_device_detach(netdev);
1376
1377 if (netif_running(netdev))
1378 __qlcnic_down(adapter, netdev);
1379
1380 qlcnic_detach(adapter);
1381
1382 adapter->max_sds_rings = 1;
1383 adapter->diag_test = test;
1384
1385 ret = qlcnic_attach(adapter);
1386 if (ret) {
1387 netif_device_attach(netdev);
1388 return ret;
1389 }
1390
1391 ret = qlcnic_fw_create_ctx(adapter);
1392 if (ret) {
1393 qlcnic_detach(adapter);
1394 netif_device_attach(netdev);
1395 return ret;
1396 }
1397
1398 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1399 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1400 qlcnic_post_rx_buffers(adapter, rds_ring);
1401 }
1402
1403 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1404 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1405 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1406 qlcnic_enable_int(sds_ring);
1407 }
1408 }
1409
1410 if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) {
1411 adapter->ahw->loopback_state = 0;
1412 qlcnic_linkevent_request(adapter, 1);
1413 }
1414
1415 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1416
1417 return 0;
1418 }
1419
1420 /* Reset context in hardware only */
1421 static int
qlcnic_reset_hw_context(struct qlcnic_adapter * adapter)1422 qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1423 {
1424 struct net_device *netdev = adapter->netdev;
1425
1426 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1427 return -EBUSY;
1428
1429 netif_device_detach(netdev);
1430
1431 qlcnic_down(adapter, netdev);
1432
1433 qlcnic_up(adapter, netdev);
1434
1435 netif_device_attach(netdev);
1436
1437 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1438 return 0;
1439 }
1440
1441 int
qlcnic_reset_context(struct qlcnic_adapter * adapter)1442 qlcnic_reset_context(struct qlcnic_adapter *adapter)
1443 {
1444 int err = 0;
1445 struct net_device *netdev = adapter->netdev;
1446
1447 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1448 return -EBUSY;
1449
1450 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1451
1452 netif_device_detach(netdev);
1453
1454 if (netif_running(netdev))
1455 __qlcnic_down(adapter, netdev);
1456
1457 qlcnic_detach(adapter);
1458
1459 if (netif_running(netdev)) {
1460 err = qlcnic_attach(adapter);
1461 if (!err)
1462 __qlcnic_up(adapter, netdev);
1463 }
1464
1465 netif_device_attach(netdev);
1466 }
1467
1468 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1469 return err;
1470 }
1471
1472 static int
qlcnic_setup_netdev(struct qlcnic_adapter * adapter,struct net_device * netdev,u8 pci_using_dac)1473 qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1474 struct net_device *netdev, u8 pci_using_dac)
1475 {
1476 int err;
1477 struct pci_dev *pdev = adapter->pdev;
1478
1479 adapter->mc_enabled = 0;
1480 adapter->max_mc_count = 38;
1481
1482 netdev->netdev_ops = &qlcnic_netdev_ops;
1483 netdev->watchdog_timeo = 5*HZ;
1484
1485 qlcnic_change_mtu(netdev, netdev->mtu);
1486
1487 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1488
1489 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1490 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
1491
1492 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1493 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1494 if (pci_using_dac)
1495 netdev->hw_features |= NETIF_F_HIGHDMA;
1496
1497 netdev->vlan_features = netdev->hw_features;
1498
1499 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1500 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
1501 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1502 netdev->hw_features |= NETIF_F_LRO;
1503
1504 netdev->features |= netdev->hw_features |
1505 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1506
1507 netdev->irq = adapter->msix_entries[0].vector;
1508
1509 err = register_netdev(netdev);
1510 if (err) {
1511 dev_err(&pdev->dev, "failed to register net device\n");
1512 return err;
1513 }
1514
1515 return 0;
1516 }
1517
qlcnic_set_dma_mask(struct pci_dev * pdev,u8 * pci_using_dac)1518 static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1519 {
1520 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1521 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1522 *pci_using_dac = 1;
1523 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1524 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1525 *pci_using_dac = 0;
1526 else {
1527 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1528 return -EIO;
1529 }
1530
1531 return 0;
1532 }
1533
1534 static int
qlcnic_alloc_msix_entries(struct qlcnic_adapter * adapter,u16 count)1535 qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1536 {
1537 adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
1538 GFP_KERNEL);
1539
1540 if (adapter->msix_entries)
1541 return 0;
1542
1543 dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
1544 return -ENOMEM;
1545 }
1546
1547 static int __devinit
qlcnic_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1548 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1549 {
1550 struct net_device *netdev = NULL;
1551 struct qlcnic_adapter *adapter = NULL;
1552 int err;
1553 uint8_t revision_id;
1554 uint8_t pci_using_dac;
1555 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
1556
1557 err = pci_enable_device(pdev);
1558 if (err)
1559 return err;
1560
1561 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1562 err = -ENODEV;
1563 goto err_out_disable_pdev;
1564 }
1565
1566 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1567 if (err)
1568 goto err_out_disable_pdev;
1569
1570 err = pci_request_regions(pdev, qlcnic_driver_name);
1571 if (err)
1572 goto err_out_disable_pdev;
1573
1574 pci_set_master(pdev);
1575 pci_enable_pcie_error_reporting(pdev);
1576
1577 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1578 if (!netdev) {
1579 dev_err(&pdev->dev, "failed to allocate net_device\n");
1580 err = -ENOMEM;
1581 goto err_out_free_res;
1582 }
1583
1584 SET_NETDEV_DEV(netdev, &pdev->dev);
1585
1586 adapter = netdev_priv(netdev);
1587 adapter->netdev = netdev;
1588 adapter->pdev = pdev;
1589
1590 if (qlcnic_alloc_adapter_resources(adapter))
1591 goto err_out_free_netdev;
1592
1593 adapter->dev_rst_time = jiffies;
1594 revision_id = pdev->revision;
1595 adapter->ahw->revision_id = revision_id;
1596 adapter->mac_learn = qlcnic_mac_learn;
1597
1598 rwlock_init(&adapter->ahw->crb_lock);
1599 mutex_init(&adapter->ahw->mem_lock);
1600
1601 spin_lock_init(&adapter->tx_clean_lock);
1602 INIT_LIST_HEAD(&adapter->mac_list);
1603
1604 err = qlcnic_setup_pci_map(adapter);
1605 if (err)
1606 goto err_out_free_hw;
1607
1608 /* This will be reset for mezz cards */
1609 adapter->portnum = adapter->ahw->pci_func;
1610
1611 err = qlcnic_get_board_info(adapter);
1612 if (err) {
1613 dev_err(&pdev->dev, "Error getting board config info.\n");
1614 goto err_out_iounmap;
1615 }
1616
1617 err = qlcnic_setup_idc_param(adapter);
1618 if (err)
1619 goto err_out_iounmap;
1620
1621 adapter->flags |= QLCNIC_NEED_FLR;
1622
1623 err = adapter->nic_ops->start_firmware(adapter);
1624 if (err) {
1625 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
1626 goto err_out_decr_ref;
1627 }
1628
1629 if (qlcnic_read_mac_addr(adapter))
1630 dev_warn(&pdev->dev, "failed to read mac addr\n");
1631
1632 if (adapter->portnum == 0) {
1633 get_brd_name(adapter, brd_name);
1634
1635 pr_info("%s: %s Board Chip rev 0x%x\n",
1636 module_name(THIS_MODULE),
1637 brd_name, adapter->ahw->revision_id);
1638 }
1639
1640 qlcnic_clear_stats(adapter);
1641
1642 err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
1643 if (err)
1644 goto err_out_decr_ref;
1645
1646 qlcnic_setup_intr(adapter);
1647
1648 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
1649 if (err)
1650 goto err_out_disable_msi;
1651
1652 pci_set_drvdata(pdev, adapter);
1653
1654 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1655
1656 switch (adapter->ahw->port_type) {
1657 case QLCNIC_GBE:
1658 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1659 adapter->netdev->name);
1660 break;
1661 case QLCNIC_XGBE:
1662 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1663 adapter->netdev->name);
1664 break;
1665 }
1666
1667 if (adapter->mac_learn)
1668 qlcnic_alloc_lb_filters_mem(adapter);
1669
1670 qlcnic_create_diag_entries(adapter);
1671
1672 return 0;
1673
1674 err_out_disable_msi:
1675 qlcnic_teardown_intr(adapter);
1676 kfree(adapter->msix_entries);
1677
1678 err_out_decr_ref:
1679 qlcnic_clr_all_drv_state(adapter, 0);
1680
1681 err_out_iounmap:
1682 qlcnic_cleanup_pci_map(adapter);
1683
1684 err_out_free_hw:
1685 qlcnic_free_adapter_resources(adapter);
1686
1687 err_out_free_netdev:
1688 free_netdev(netdev);
1689
1690 err_out_free_res:
1691 pci_release_regions(pdev);
1692
1693 err_out_disable_pdev:
1694 pci_set_drvdata(pdev, NULL);
1695 pci_disable_device(pdev);
1696 return err;
1697 }
1698
qlcnic_remove(struct pci_dev * pdev)1699 static void __devexit qlcnic_remove(struct pci_dev *pdev)
1700 {
1701 struct qlcnic_adapter *adapter;
1702 struct net_device *netdev;
1703
1704 adapter = pci_get_drvdata(pdev);
1705 if (adapter == NULL)
1706 return;
1707
1708 netdev = adapter->netdev;
1709
1710 qlcnic_cancel_fw_work(adapter);
1711
1712 unregister_netdev(netdev);
1713
1714 qlcnic_detach(adapter);
1715
1716 if (adapter->npars != NULL)
1717 kfree(adapter->npars);
1718 if (adapter->eswitch != NULL)
1719 kfree(adapter->eswitch);
1720
1721 qlcnic_clr_all_drv_state(adapter, 0);
1722
1723 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1724
1725 qlcnic_free_lb_filters_mem(adapter);
1726
1727 qlcnic_teardown_intr(adapter);
1728 kfree(adapter->msix_entries);
1729
1730 qlcnic_remove_diag_entries(adapter);
1731
1732 qlcnic_cleanup_pci_map(adapter);
1733
1734 qlcnic_release_firmware(adapter);
1735
1736 pci_disable_pcie_error_reporting(pdev);
1737 pci_release_regions(pdev);
1738 pci_disable_device(pdev);
1739 pci_set_drvdata(pdev, NULL);
1740
1741 qlcnic_free_adapter_resources(adapter);
1742 free_netdev(netdev);
1743 }
__qlcnic_shutdown(struct pci_dev * pdev)1744 static int __qlcnic_shutdown(struct pci_dev *pdev)
1745 {
1746 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1747 struct net_device *netdev = adapter->netdev;
1748 int retval;
1749
1750 netif_device_detach(netdev);
1751
1752 qlcnic_cancel_fw_work(adapter);
1753
1754 if (netif_running(netdev))
1755 qlcnic_down(adapter, netdev);
1756
1757 qlcnic_clr_all_drv_state(adapter, 0);
1758
1759 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1760
1761 retval = pci_save_state(pdev);
1762 if (retval)
1763 return retval;
1764
1765 if (qlcnic_wol_supported(adapter)) {
1766 pci_enable_wake(pdev, PCI_D3cold, 1);
1767 pci_enable_wake(pdev, PCI_D3hot, 1);
1768 }
1769
1770 return 0;
1771 }
1772
qlcnic_shutdown(struct pci_dev * pdev)1773 static void qlcnic_shutdown(struct pci_dev *pdev)
1774 {
1775 if (__qlcnic_shutdown(pdev))
1776 return;
1777
1778 pci_disable_device(pdev);
1779 }
1780
1781 #ifdef CONFIG_PM
1782 static int
qlcnic_suspend(struct pci_dev * pdev,pm_message_t state)1783 qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1784 {
1785 int retval;
1786
1787 retval = __qlcnic_shutdown(pdev);
1788 if (retval)
1789 return retval;
1790
1791 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1792 return 0;
1793 }
1794
1795 static int
qlcnic_resume(struct pci_dev * pdev)1796 qlcnic_resume(struct pci_dev *pdev)
1797 {
1798 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1799 struct net_device *netdev = adapter->netdev;
1800 int err;
1801
1802 err = pci_enable_device(pdev);
1803 if (err)
1804 return err;
1805
1806 pci_set_power_state(pdev, PCI_D0);
1807 pci_set_master(pdev);
1808 pci_restore_state(pdev);
1809
1810 err = adapter->nic_ops->start_firmware(adapter);
1811 if (err) {
1812 dev_err(&pdev->dev, "failed to start firmware\n");
1813 return err;
1814 }
1815
1816 if (netif_running(netdev)) {
1817 err = qlcnic_up(adapter, netdev);
1818 if (err)
1819 goto done;
1820
1821 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1822 }
1823 done:
1824 netif_device_attach(netdev);
1825 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1826 return 0;
1827 }
1828 #endif
1829
qlcnic_open(struct net_device * netdev)1830 static int qlcnic_open(struct net_device *netdev)
1831 {
1832 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1833 int err;
1834
1835 netif_carrier_off(netdev);
1836
1837 err = qlcnic_attach(adapter);
1838 if (err)
1839 return err;
1840
1841 err = __qlcnic_up(adapter, netdev);
1842 if (err)
1843 goto err_out;
1844
1845 netif_start_queue(netdev);
1846
1847 return 0;
1848
1849 err_out:
1850 qlcnic_detach(adapter);
1851 return err;
1852 }
1853
1854 /*
1855 * qlcnic_close - Disables a network interface entry point
1856 */
qlcnic_close(struct net_device * netdev)1857 static int qlcnic_close(struct net_device *netdev)
1858 {
1859 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1860
1861 __qlcnic_down(adapter, netdev);
1862 return 0;
1863 }
1864
qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter * adapter)1865 void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1866 {
1867 void *head;
1868 int i;
1869
1870 if (adapter->fhash.fmax && adapter->fhash.fhead)
1871 return;
1872
1873 spin_lock_init(&adapter->mac_learn_lock);
1874
1875 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1876 GFP_KERNEL);
1877 if (!head)
1878 return;
1879
1880 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1881 adapter->fhash.fhead = head;
1882
1883 for (i = 0; i < adapter->fhash.fmax; i++)
1884 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1885 }
1886
qlcnic_free_lb_filters_mem(struct qlcnic_adapter * adapter)1887 static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1888 {
1889 if (adapter->fhash.fmax && adapter->fhash.fhead)
1890 kfree(adapter->fhash.fhead);
1891
1892 adapter->fhash.fhead = NULL;
1893 adapter->fhash.fmax = 0;
1894 }
1895
qlcnic_change_filter(struct qlcnic_adapter * adapter,u64 uaddr,__le16 vlan_id,struct qlcnic_host_tx_ring * tx_ring)1896 static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1897 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
1898 {
1899 struct cmd_desc_type0 *hwdesc;
1900 struct qlcnic_nic_req *req;
1901 struct qlcnic_mac_req *mac_req;
1902 struct qlcnic_vlan_req *vlan_req;
1903 u32 producer;
1904 u64 word;
1905
1906 producer = tx_ring->producer;
1907 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1908
1909 req = (struct qlcnic_nic_req *)hwdesc;
1910 memset(req, 0, sizeof(struct qlcnic_nic_req));
1911 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1912
1913 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1914 req->req_hdr = cpu_to_le64(word);
1915
1916 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
1917 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
1918 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1919
1920 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1921 vlan_req->vlan_id = vlan_id;
1922
1923 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1924 smp_mb();
1925 }
1926
1927 #define QLCNIC_MAC_HASH(MAC)\
1928 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1929
1930 static void
qlcnic_send_filter(struct qlcnic_adapter * adapter,struct qlcnic_host_tx_ring * tx_ring,struct cmd_desc_type0 * first_desc,struct sk_buff * skb)1931 qlcnic_send_filter(struct qlcnic_adapter *adapter,
1932 struct qlcnic_host_tx_ring *tx_ring,
1933 struct cmd_desc_type0 *first_desc,
1934 struct sk_buff *skb)
1935 {
1936 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1937 struct qlcnic_filter *fil, *tmp_fil;
1938 struct hlist_node *tmp_hnode, *n;
1939 struct hlist_head *head;
1940 u64 src_addr = 0;
1941 __le16 vlan_id = 0;
1942 u8 hindex;
1943
1944 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1945 return;
1946
1947 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1948 return;
1949
1950 /* Only NPAR capable devices support vlan based learning*/
1951 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1952 vlan_id = first_desc->vlan_TCI;
1953 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1954 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1955 head = &(adapter->fhash.fhead[hindex]);
1956
1957 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
1958 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1959 tmp_fil->vlan_id == vlan_id) {
1960
1961 if (jiffies >
1962 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1963 qlcnic_change_filter(adapter, src_addr, vlan_id,
1964 tx_ring);
1965 tmp_fil->ftime = jiffies;
1966 return;
1967 }
1968 }
1969
1970 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1971 if (!fil)
1972 return;
1973
1974 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
1975
1976 fil->ftime = jiffies;
1977 fil->vlan_id = vlan_id;
1978 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1979 spin_lock(&adapter->mac_learn_lock);
1980 hlist_add_head(&(fil->fnode), head);
1981 adapter->fhash.fnum++;
1982 spin_unlock(&adapter->mac_learn_lock);
1983 }
1984
1985 static int
qlcnic_tx_pkt(struct qlcnic_adapter * adapter,struct cmd_desc_type0 * first_desc,struct sk_buff * skb)1986 qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
1987 struct cmd_desc_type0 *first_desc,
1988 struct sk_buff *skb)
1989 {
1990 u8 opcode = 0, hdr_len = 0;
1991 u16 flags = 0, vlan_tci = 0;
1992 int copied, offset, copy_len;
1993 struct cmd_desc_type0 *hwdesc;
1994 struct vlan_ethhdr *vh;
1995 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1996 u16 protocol = ntohs(skb->protocol);
1997 u32 producer = tx_ring->producer;
1998
1999 if (protocol == ETH_P_8021Q) {
2000 vh = (struct vlan_ethhdr *)skb->data;
2001 flags = FLAGS_VLAN_TAGGED;
2002 vlan_tci = vh->h_vlan_TCI;
2003 } else if (vlan_tx_tag_present(skb)) {
2004 flags = FLAGS_VLAN_OOB;
2005 vlan_tci = vlan_tx_tag_get(skb);
2006 }
2007 if (unlikely(adapter->pvid)) {
2008 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2009 return -EIO;
2010 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
2011 goto set_flags;
2012
2013 flags = FLAGS_VLAN_OOB;
2014 vlan_tci = adapter->pvid;
2015 }
2016 set_flags:
2017 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
2018 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2019
2020 if (*(skb->data) & BIT_0) {
2021 flags |= BIT_0;
2022 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
2023 }
2024 opcode = TX_ETHER_PKT;
2025 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
2026 skb_shinfo(skb)->gso_size > 0) {
2027
2028 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2029
2030 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2031 first_desc->total_hdr_length = hdr_len;
2032
2033 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
2034
2035 /* For LSO, we need to copy the MAC/IP/TCP headers into
2036 * the descriptor ring */
2037 copied = 0;
2038 offset = 2;
2039
2040 if (flags & FLAGS_VLAN_OOB) {
2041 first_desc->total_hdr_length += VLAN_HLEN;
2042 first_desc->tcp_hdr_offset = VLAN_HLEN;
2043 first_desc->ip_hdr_offset = VLAN_HLEN;
2044 /* Only in case of TSO on vlan device */
2045 flags |= FLAGS_VLAN_TAGGED;
2046
2047 /* Create a TSO vlan header template for firmware */
2048
2049 hwdesc = &tx_ring->desc_head[producer];
2050 tx_ring->cmd_buf_arr[producer].skb = NULL;
2051
2052 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2053 offset, hdr_len + VLAN_HLEN);
2054
2055 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
2056 skb_copy_from_linear_data(skb, vh, 12);
2057 vh->h_vlan_proto = htons(ETH_P_8021Q);
2058 vh->h_vlan_TCI = htons(vlan_tci);
2059
2060 skb_copy_from_linear_data_offset(skb, 12,
2061 (char *)vh + 16, copy_len - 16);
2062
2063 copied = copy_len - VLAN_HLEN;
2064 offset = 0;
2065
2066 producer = get_next_index(producer, tx_ring->num_desc);
2067 }
2068
2069 while (copied < hdr_len) {
2070
2071 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2072 offset, (hdr_len - copied));
2073
2074 hwdesc = &tx_ring->desc_head[producer];
2075 tx_ring->cmd_buf_arr[producer].skb = NULL;
2076
2077 skb_copy_from_linear_data_offset(skb, copied,
2078 (char *) hwdesc + offset, copy_len);
2079
2080 copied += copy_len;
2081 offset = 0;
2082
2083 producer = get_next_index(producer, tx_ring->num_desc);
2084 }
2085
2086 tx_ring->producer = producer;
2087 smp_mb();
2088 adapter->stats.lso_frames++;
2089
2090 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2091 u8 l4proto;
2092
2093 if (protocol == ETH_P_IP) {
2094 l4proto = ip_hdr(skb)->protocol;
2095
2096 if (l4proto == IPPROTO_TCP)
2097 opcode = TX_TCP_PKT;
2098 else if (l4proto == IPPROTO_UDP)
2099 opcode = TX_UDP_PKT;
2100 } else if (protocol == ETH_P_IPV6) {
2101 l4proto = ipv6_hdr(skb)->nexthdr;
2102
2103 if (l4proto == IPPROTO_TCP)
2104 opcode = TX_TCPV6_PKT;
2105 else if (l4proto == IPPROTO_UDP)
2106 opcode = TX_UDPV6_PKT;
2107 }
2108 }
2109 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2110 first_desc->ip_hdr_offset += skb_network_offset(skb);
2111 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2112
2113 return 0;
2114 }
2115
2116 static int
qlcnic_map_tx_skb(struct pci_dev * pdev,struct sk_buff * skb,struct qlcnic_cmd_buffer * pbuf)2117 qlcnic_map_tx_skb(struct pci_dev *pdev,
2118 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2119 {
2120 struct qlcnic_skb_frag *nf;
2121 struct skb_frag_struct *frag;
2122 int i, nr_frags;
2123 dma_addr_t map;
2124
2125 nr_frags = skb_shinfo(skb)->nr_frags;
2126 nf = &pbuf->frag_array[0];
2127
2128 map = pci_map_single(pdev, skb->data,
2129 skb_headlen(skb), PCI_DMA_TODEVICE);
2130 if (pci_dma_mapping_error(pdev, map))
2131 goto out_err;
2132
2133 nf->dma = map;
2134 nf->length = skb_headlen(skb);
2135
2136 for (i = 0; i < nr_frags; i++) {
2137 frag = &skb_shinfo(skb)->frags[i];
2138 nf = &pbuf->frag_array[i+1];
2139
2140 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
2141 DMA_TO_DEVICE);
2142 if (dma_mapping_error(&pdev->dev, map))
2143 goto unwind;
2144
2145 nf->dma = map;
2146 nf->length = skb_frag_size(frag);
2147 }
2148
2149 return 0;
2150
2151 unwind:
2152 while (--i >= 0) {
2153 nf = &pbuf->frag_array[i+1];
2154 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2155 }
2156
2157 nf = &pbuf->frag_array[0];
2158 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2159
2160 out_err:
2161 return -ENOMEM;
2162 }
2163
2164 static void
qlcnic_unmap_buffers(struct pci_dev * pdev,struct sk_buff * skb,struct qlcnic_cmd_buffer * pbuf)2165 qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2166 struct qlcnic_cmd_buffer *pbuf)
2167 {
2168 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2169 int nr_frags = skb_shinfo(skb)->nr_frags;
2170 int i;
2171
2172 for (i = 0; i < nr_frags; i++) {
2173 nf = &pbuf->frag_array[i+1];
2174 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2175 }
2176
2177 nf = &pbuf->frag_array[0];
2178 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2179 pbuf->skb = NULL;
2180 }
2181
2182 static inline void
qlcnic_clear_cmddesc(u64 * desc)2183 qlcnic_clear_cmddesc(u64 *desc)
2184 {
2185 desc[0] = 0ULL;
2186 desc[2] = 0ULL;
2187 desc[7] = 0ULL;
2188 }
2189
2190 netdev_tx_t
qlcnic_xmit_frame(struct sk_buff * skb,struct net_device * netdev)2191 qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2192 {
2193 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2194 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2195 struct qlcnic_cmd_buffer *pbuf;
2196 struct qlcnic_skb_frag *buffrag;
2197 struct cmd_desc_type0 *hwdesc, *first_desc;
2198 struct pci_dev *pdev;
2199 struct ethhdr *phdr;
2200 int delta = 0;
2201 int i, k;
2202
2203 u32 producer;
2204 int frag_count;
2205 u32 num_txd = tx_ring->num_desc;
2206
2207 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2208 netif_stop_queue(netdev);
2209 return NETDEV_TX_BUSY;
2210 }
2211
2212 if (adapter->flags & QLCNIC_MACSPOOF) {
2213 phdr = (struct ethhdr *)skb->data;
2214 if (compare_ether_addr(phdr->h_source,
2215 adapter->mac_addr))
2216 goto drop_packet;
2217 }
2218
2219 frag_count = skb_shinfo(skb)->nr_frags + 1;
2220 /* 14 frags supported for normal packet and
2221 * 32 frags supported for TSO packet
2222 */
2223 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2224
2225 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2226 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2227
2228 if (!__pskb_pull_tail(skb, delta))
2229 goto drop_packet;
2230
2231 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2232 }
2233
2234 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
2235 netif_stop_queue(netdev);
2236 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2237 netif_start_queue(netdev);
2238 else {
2239 adapter->stats.xmit_off++;
2240 return NETDEV_TX_BUSY;
2241 }
2242 }
2243
2244 producer = tx_ring->producer;
2245 pbuf = &tx_ring->cmd_buf_arr[producer];
2246
2247 pdev = adapter->pdev;
2248
2249 first_desc = hwdesc = &tx_ring->desc_head[producer];
2250 qlcnic_clear_cmddesc((u64 *)hwdesc);
2251
2252 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2253 adapter->stats.tx_dma_map_error++;
2254 goto drop_packet;
2255 }
2256
2257 pbuf->skb = skb;
2258 pbuf->frag_count = frag_count;
2259
2260 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2261 qlcnic_set_tx_port(first_desc, adapter->portnum);
2262
2263 for (i = 0; i < frag_count; i++) {
2264
2265 k = i % 4;
2266
2267 if ((k == 0) && (i > 0)) {
2268 /* move to next desc.*/
2269 producer = get_next_index(producer, num_txd);
2270 hwdesc = &tx_ring->desc_head[producer];
2271 qlcnic_clear_cmddesc((u64 *)hwdesc);
2272 tx_ring->cmd_buf_arr[producer].skb = NULL;
2273 }
2274
2275 buffrag = &pbuf->frag_array[i];
2276
2277 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2278 switch (k) {
2279 case 0:
2280 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2281 break;
2282 case 1:
2283 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2284 break;
2285 case 2:
2286 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2287 break;
2288 case 3:
2289 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2290 break;
2291 }
2292 }
2293
2294 tx_ring->producer = get_next_index(producer, num_txd);
2295 smp_mb();
2296
2297 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2298 goto unwind_buff;
2299
2300 if (adapter->mac_learn)
2301 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2302
2303 adapter->stats.txbytes += skb->len;
2304 adapter->stats.xmitcalled++;
2305
2306 qlcnic_update_cmd_producer(adapter, tx_ring);
2307
2308 return NETDEV_TX_OK;
2309
2310 unwind_buff:
2311 qlcnic_unmap_buffers(pdev, skb, pbuf);
2312 drop_packet:
2313 adapter->stats.txdropped++;
2314 dev_kfree_skb_any(skb);
2315 return NETDEV_TX_OK;
2316 }
2317
qlcnic_check_temp(struct qlcnic_adapter * adapter)2318 static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2319 {
2320 struct net_device *netdev = adapter->netdev;
2321 u32 temp, temp_state, temp_val;
2322 int rv = 0;
2323
2324 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2325
2326 temp_state = qlcnic_get_temp_state(temp);
2327 temp_val = qlcnic_get_temp_val(temp);
2328
2329 if (temp_state == QLCNIC_TEMP_PANIC) {
2330 dev_err(&netdev->dev,
2331 "Device temperature %d degrees C exceeds"
2332 " maximum allowed. Hardware has been shut down.\n",
2333 temp_val);
2334 rv = 1;
2335 } else if (temp_state == QLCNIC_TEMP_WARN) {
2336 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2337 dev_err(&netdev->dev,
2338 "Device temperature %d degrees C "
2339 "exceeds operating range."
2340 " Immediate action needed.\n",
2341 temp_val);
2342 }
2343 } else {
2344 if (adapter->temp == QLCNIC_TEMP_WARN) {
2345 dev_info(&netdev->dev,
2346 "Device temperature is now %d degrees C"
2347 " in normal range.\n", temp_val);
2348 }
2349 }
2350 adapter->temp = temp_state;
2351 return rv;
2352 }
2353
qlcnic_advert_link_change(struct qlcnic_adapter * adapter,int linkup)2354 void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2355 {
2356 struct net_device *netdev = adapter->netdev;
2357
2358 if (adapter->ahw->linkup && !linkup) {
2359 netdev_info(netdev, "NIC Link is down\n");
2360 adapter->ahw->linkup = 0;
2361 if (netif_running(netdev)) {
2362 netif_carrier_off(netdev);
2363 netif_stop_queue(netdev);
2364 }
2365 } else if (!adapter->ahw->linkup && linkup) {
2366 netdev_info(netdev, "NIC Link is up\n");
2367 adapter->ahw->linkup = 1;
2368 if (netif_running(netdev)) {
2369 netif_carrier_on(netdev);
2370 netif_wake_queue(netdev);
2371 }
2372 }
2373 }
2374
qlcnic_tx_timeout(struct net_device * netdev)2375 static void qlcnic_tx_timeout(struct net_device *netdev)
2376 {
2377 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2378
2379 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2380 return;
2381
2382 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
2383
2384 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
2385 adapter->need_fw_reset = 1;
2386 else
2387 adapter->reset_context = 1;
2388 }
2389
qlcnic_get_stats(struct net_device * netdev)2390 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2391 {
2392 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2393 struct net_device_stats *stats = &netdev->stats;
2394
2395 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2396 stats->tx_packets = adapter->stats.xmitfinished;
2397 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
2398 stats->tx_bytes = adapter->stats.txbytes;
2399 stats->rx_dropped = adapter->stats.rxdropped;
2400 stats->tx_dropped = adapter->stats.txdropped;
2401
2402 return stats;
2403 }
2404
qlcnic_clear_legacy_intr(struct qlcnic_adapter * adapter)2405 static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
2406 {
2407 u32 status;
2408
2409 status = readl(adapter->isr_int_vec);
2410
2411 if (!(status & adapter->int_vec_bit))
2412 return IRQ_NONE;
2413
2414 /* check interrupt state machine, to be sure */
2415 status = readl(adapter->crb_int_state_reg);
2416 if (!ISR_LEGACY_INT_TRIGGERED(status))
2417 return IRQ_NONE;
2418
2419 writel(0xffffffff, adapter->tgt_status_reg);
2420 /* read twice to ensure write is flushed */
2421 readl(adapter->isr_int_vec);
2422 readl(adapter->isr_int_vec);
2423
2424 return IRQ_HANDLED;
2425 }
2426
qlcnic_tmp_intr(int irq,void * data)2427 static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2428 {
2429 struct qlcnic_host_sds_ring *sds_ring = data;
2430 struct qlcnic_adapter *adapter = sds_ring->adapter;
2431
2432 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2433 goto done;
2434 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2435 writel(0xffffffff, adapter->tgt_status_reg);
2436 goto done;
2437 }
2438
2439 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2440 return IRQ_NONE;
2441
2442 done:
2443 adapter->diag_cnt++;
2444 qlcnic_enable_int(sds_ring);
2445 return IRQ_HANDLED;
2446 }
2447
qlcnic_intr(int irq,void * data)2448 static irqreturn_t qlcnic_intr(int irq, void *data)
2449 {
2450 struct qlcnic_host_sds_ring *sds_ring = data;
2451 struct qlcnic_adapter *adapter = sds_ring->adapter;
2452
2453 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2454 return IRQ_NONE;
2455
2456 napi_schedule(&sds_ring->napi);
2457
2458 return IRQ_HANDLED;
2459 }
2460
qlcnic_msi_intr(int irq,void * data)2461 static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2462 {
2463 struct qlcnic_host_sds_ring *sds_ring = data;
2464 struct qlcnic_adapter *adapter = sds_ring->adapter;
2465
2466 /* clear interrupt */
2467 writel(0xffffffff, adapter->tgt_status_reg);
2468
2469 napi_schedule(&sds_ring->napi);
2470 return IRQ_HANDLED;
2471 }
2472
qlcnic_msix_intr(int irq,void * data)2473 static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2474 {
2475 struct qlcnic_host_sds_ring *sds_ring = data;
2476
2477 napi_schedule(&sds_ring->napi);
2478 return IRQ_HANDLED;
2479 }
2480
qlcnic_process_cmd_ring(struct qlcnic_adapter * adapter)2481 static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2482 {
2483 u32 sw_consumer, hw_consumer;
2484 int count = 0, i;
2485 struct qlcnic_cmd_buffer *buffer;
2486 struct pci_dev *pdev = adapter->pdev;
2487 struct net_device *netdev = adapter->netdev;
2488 struct qlcnic_skb_frag *frag;
2489 int done;
2490 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2491
2492 if (!spin_trylock(&adapter->tx_clean_lock))
2493 return 1;
2494
2495 sw_consumer = tx_ring->sw_consumer;
2496 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2497
2498 while (sw_consumer != hw_consumer) {
2499 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2500 if (buffer->skb) {
2501 frag = &buffer->frag_array[0];
2502 pci_unmap_single(pdev, frag->dma, frag->length,
2503 PCI_DMA_TODEVICE);
2504 frag->dma = 0ULL;
2505 for (i = 1; i < buffer->frag_count; i++) {
2506 frag++;
2507 pci_unmap_page(pdev, frag->dma, frag->length,
2508 PCI_DMA_TODEVICE);
2509 frag->dma = 0ULL;
2510 }
2511
2512 adapter->stats.xmitfinished++;
2513 dev_kfree_skb_any(buffer->skb);
2514 buffer->skb = NULL;
2515 }
2516
2517 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2518 if (++count >= MAX_STATUS_HANDLE)
2519 break;
2520 }
2521
2522 if (count && netif_running(netdev)) {
2523 tx_ring->sw_consumer = sw_consumer;
2524
2525 smp_mb();
2526
2527 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
2528 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2529 netif_wake_queue(netdev);
2530 adapter->stats.xmit_on++;
2531 }
2532 }
2533 adapter->tx_timeo_cnt = 0;
2534 }
2535 /*
2536 * If everything is freed up to consumer then check if the ring is full
2537 * If the ring is full then check if more needs to be freed and
2538 * schedule the call back again.
2539 *
2540 * This happens when there are 2 CPUs. One could be freeing and the
2541 * other filling it. If the ring is full when we get out of here and
2542 * the card has already interrupted the host then the host can miss the
2543 * interrupt.
2544 *
2545 * There is still a possible race condition and the host could miss an
2546 * interrupt. The card has to take care of this.
2547 */
2548 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2549 done = (sw_consumer == hw_consumer);
2550 spin_unlock(&adapter->tx_clean_lock);
2551
2552 return done;
2553 }
2554
qlcnic_poll(struct napi_struct * napi,int budget)2555 static int qlcnic_poll(struct napi_struct *napi, int budget)
2556 {
2557 struct qlcnic_host_sds_ring *sds_ring =
2558 container_of(napi, struct qlcnic_host_sds_ring, napi);
2559
2560 struct qlcnic_adapter *adapter = sds_ring->adapter;
2561
2562 int tx_complete;
2563 int work_done;
2564
2565 tx_complete = qlcnic_process_cmd_ring(adapter);
2566
2567 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2568
2569 if ((work_done < budget) && tx_complete) {
2570 napi_complete(&sds_ring->napi);
2571 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2572 qlcnic_enable_int(sds_ring);
2573 }
2574
2575 return work_done;
2576 }
2577
qlcnic_rx_poll(struct napi_struct * napi,int budget)2578 static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2579 {
2580 struct qlcnic_host_sds_ring *sds_ring =
2581 container_of(napi, struct qlcnic_host_sds_ring, napi);
2582
2583 struct qlcnic_adapter *adapter = sds_ring->adapter;
2584 int work_done;
2585
2586 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2587
2588 if (work_done < budget) {
2589 napi_complete(&sds_ring->napi);
2590 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2591 qlcnic_enable_int(sds_ring);
2592 }
2593
2594 return work_done;
2595 }
2596
2597 #ifdef CONFIG_NET_POLL_CONTROLLER
qlcnic_poll_controller(struct net_device * netdev)2598 static void qlcnic_poll_controller(struct net_device *netdev)
2599 {
2600 int ring;
2601 struct qlcnic_host_sds_ring *sds_ring;
2602 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2603 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2604
2605 disable_irq(adapter->irq);
2606 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2607 sds_ring = &recv_ctx->sds_rings[ring];
2608 qlcnic_intr(adapter->irq, sds_ring);
2609 }
2610 enable_irq(adapter->irq);
2611 }
2612 #endif
2613
2614 static void
qlcnic_idc_debug_info(struct qlcnic_adapter * adapter,u8 encoding)2615 qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2616 {
2617 u32 val;
2618
2619 val = adapter->portnum & 0xf;
2620 val |= encoding << 7;
2621 val |= (jiffies - adapter->dev_rst_time) << 8;
2622
2623 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2624 adapter->dev_rst_time = jiffies;
2625 }
2626
2627 static int
qlcnic_set_drv_state(struct qlcnic_adapter * adapter,u8 state)2628 qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
2629 {
2630 u32 val;
2631
2632 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2633 state != QLCNIC_DEV_NEED_QUISCENT);
2634
2635 if (qlcnic_api_lock(adapter))
2636 return -EIO;
2637
2638 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2639
2640 if (state == QLCNIC_DEV_NEED_RESET)
2641 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2642 else if (state == QLCNIC_DEV_NEED_QUISCENT)
2643 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
2644
2645 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2646
2647 qlcnic_api_unlock(adapter);
2648
2649 return 0;
2650 }
2651
2652 static int
qlcnic_clr_drv_state(struct qlcnic_adapter * adapter)2653 qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2654 {
2655 u32 val;
2656
2657 if (qlcnic_api_lock(adapter))
2658 return -EBUSY;
2659
2660 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2661 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2662 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2663
2664 qlcnic_api_unlock(adapter);
2665
2666 return 0;
2667 }
2668
2669 static void
qlcnic_clr_all_drv_state(struct qlcnic_adapter * adapter,u8 failed)2670 qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
2671 {
2672 u32 val;
2673
2674 if (qlcnic_api_lock(adapter))
2675 goto err;
2676
2677 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2678 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
2679 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2680
2681 if (failed) {
2682 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2683 dev_info(&adapter->pdev->dev,
2684 "Device state set to Failed. Please Reboot\n");
2685 } else if (!(val & 0x11111111))
2686 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2687
2688 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2689 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2690 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2691
2692 qlcnic_api_unlock(adapter);
2693 err:
2694 adapter->fw_fail_cnt = 0;
2695 adapter->flags &= ~QLCNIC_FW_HANG;
2696 clear_bit(__QLCNIC_START_FW, &adapter->state);
2697 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2698 }
2699
2700 /* Grab api lock, before checking state */
2701 static int
qlcnic_check_drv_state(struct qlcnic_adapter * adapter)2702 qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2703 {
2704 int act, state, active_mask;
2705
2706 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2707 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2708
2709 if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
2710 active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
2711 act = act & active_mask;
2712 }
2713
2714 if (((state & 0x11111111) == (act & 0x11111111)) ||
2715 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2716 return 0;
2717 else
2718 return 1;
2719 }
2720
qlcnic_check_idc_ver(struct qlcnic_adapter * adapter)2721 static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2722 {
2723 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2724
2725 if (val != QLCNIC_DRV_IDC_VER) {
2726 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2727 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2728 }
2729
2730 return 0;
2731 }
2732
2733 static int
qlcnic_can_start_firmware(struct qlcnic_adapter * adapter)2734 qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2735 {
2736 u32 val, prev_state;
2737 u8 dev_init_timeo = adapter->dev_init_timeo;
2738 u8 portnum = adapter->portnum;
2739 u8 ret;
2740
2741 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2742 return 1;
2743
2744 if (qlcnic_api_lock(adapter))
2745 return -1;
2746
2747 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2748 if (!(val & (1 << (portnum * 4)))) {
2749 QLC_DEV_SET_REF_CNT(val, portnum);
2750 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2751 }
2752
2753 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2754 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
2755
2756 switch (prev_state) {
2757 case QLCNIC_DEV_COLD:
2758 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2759 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
2760 qlcnic_idc_debug_info(adapter, 0);
2761 qlcnic_api_unlock(adapter);
2762 return 1;
2763
2764 case QLCNIC_DEV_READY:
2765 ret = qlcnic_check_idc_ver(adapter);
2766 qlcnic_api_unlock(adapter);
2767 return ret;
2768
2769 case QLCNIC_DEV_NEED_RESET:
2770 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2771 QLC_DEV_SET_RST_RDY(val, portnum);
2772 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2773 break;
2774
2775 case QLCNIC_DEV_NEED_QUISCENT:
2776 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2777 QLC_DEV_SET_QSCNT_RDY(val, portnum);
2778 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2779 break;
2780
2781 case QLCNIC_DEV_FAILED:
2782 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
2783 qlcnic_api_unlock(adapter);
2784 return -1;
2785
2786 case QLCNIC_DEV_INITIALIZING:
2787 case QLCNIC_DEV_QUISCENT:
2788 break;
2789 }
2790
2791 qlcnic_api_unlock(adapter);
2792
2793 do {
2794 msleep(1000);
2795 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2796
2797 if (prev_state == QLCNIC_DEV_QUISCENT)
2798 continue;
2799 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
2800
2801 if (!dev_init_timeo) {
2802 dev_err(&adapter->pdev->dev,
2803 "Waiting for device to initialize timeout\n");
2804 return -1;
2805 }
2806
2807 if (qlcnic_api_lock(adapter))
2808 return -1;
2809
2810 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2811 QLC_DEV_CLR_RST_QSCNT(val, portnum);
2812 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2813
2814 ret = qlcnic_check_idc_ver(adapter);
2815 qlcnic_api_unlock(adapter);
2816
2817 return ret;
2818 }
2819
2820 static void
qlcnic_fwinit_work(struct work_struct * work)2821 qlcnic_fwinit_work(struct work_struct *work)
2822 {
2823 struct qlcnic_adapter *adapter = container_of(work,
2824 struct qlcnic_adapter, fw_work.work);
2825 u32 dev_state = 0xf;
2826 u32 val;
2827
2828 if (qlcnic_api_lock(adapter))
2829 goto err_ret;
2830
2831 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2832 if (dev_state == QLCNIC_DEV_QUISCENT ||
2833 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2834 qlcnic_api_unlock(adapter);
2835 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2836 FW_POLL_DELAY * 2);
2837 return;
2838 }
2839
2840 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2841 qlcnic_api_unlock(adapter);
2842 goto wait_npar;
2843 }
2844
2845 if (dev_state == QLCNIC_DEV_INITIALIZING ||
2846 dev_state == QLCNIC_DEV_READY) {
2847 dev_info(&adapter->pdev->dev, "Detected state change from "
2848 "DEV_NEED_RESET, skipping ack check\n");
2849 goto skip_ack_check;
2850 }
2851
2852 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2853 dev_info(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2854 adapter->reset_ack_timeo);
2855 goto skip_ack_check;
2856 }
2857
2858 if (!qlcnic_check_drv_state(adapter)) {
2859 skip_ack_check:
2860 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2861
2862 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2863 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2864 QLCNIC_DEV_INITIALIZING);
2865 set_bit(__QLCNIC_START_FW, &adapter->state);
2866 QLCDB(adapter, DRV, "Restarting fw\n");
2867 qlcnic_idc_debug_info(adapter, 0);
2868 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2869 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2870 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2871 }
2872
2873 qlcnic_api_unlock(adapter);
2874
2875 rtnl_lock();
2876 if (adapter->ahw->fw_dump.enable &&
2877 (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
2878 QLCDB(adapter, DRV, "Take FW dump\n");
2879 qlcnic_dump_fw(adapter);
2880 adapter->flags |= QLCNIC_FW_HANG;
2881 }
2882 rtnl_unlock();
2883
2884 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
2885 if (!adapter->nic_ops->start_firmware(adapter)) {
2886 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2887 adapter->fw_wait_cnt = 0;
2888 return;
2889 }
2890 goto err_ret;
2891 }
2892
2893 qlcnic_api_unlock(adapter);
2894
2895 wait_npar:
2896 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2897 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2898
2899 switch (dev_state) {
2900 case QLCNIC_DEV_READY:
2901 if (!adapter->nic_ops->start_firmware(adapter)) {
2902 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2903 adapter->fw_wait_cnt = 0;
2904 return;
2905 }
2906 case QLCNIC_DEV_FAILED:
2907 break;
2908 default:
2909 qlcnic_schedule_work(adapter,
2910 qlcnic_fwinit_work, FW_POLL_DELAY);
2911 return;
2912 }
2913
2914 err_ret:
2915 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2916 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
2917 netif_device_attach(adapter->netdev);
2918 qlcnic_clr_all_drv_state(adapter, 0);
2919 }
2920
2921 static void
qlcnic_detach_work(struct work_struct * work)2922 qlcnic_detach_work(struct work_struct *work)
2923 {
2924 struct qlcnic_adapter *adapter = container_of(work,
2925 struct qlcnic_adapter, fw_work.work);
2926 struct net_device *netdev = adapter->netdev;
2927 u32 status;
2928
2929 netif_device_detach(netdev);
2930
2931 /* Dont grab rtnl lock during Quiscent mode */
2932 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2933 if (netif_running(netdev))
2934 __qlcnic_down(adapter, netdev);
2935 } else
2936 qlcnic_down(adapter, netdev);
2937
2938 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2939
2940 if (status & QLCNIC_RCODE_FATAL_ERROR) {
2941 dev_err(&adapter->pdev->dev,
2942 "Detaching the device: peg halt status1=0x%x\n",
2943 status);
2944
2945 if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
2946 dev_err(&adapter->pdev->dev,
2947 "On board active cooling fan failed. "
2948 "Device has been halted.\n");
2949 dev_err(&adapter->pdev->dev,
2950 "Replace the adapter.\n");
2951 }
2952
2953 goto err_ret;
2954 }
2955
2956 if (adapter->temp == QLCNIC_TEMP_PANIC) {
2957 dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
2958 adapter->temp);
2959 goto err_ret;
2960 }
2961
2962 /* Dont ack if this instance is the reset owner */
2963 if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
2964 if (qlcnic_set_drv_state(adapter, adapter->dev_state)) {
2965 dev_err(&adapter->pdev->dev,
2966 "Failed to set driver state,"
2967 "detaching the device.\n");
2968 goto err_ret;
2969 }
2970 }
2971
2972 adapter->fw_wait_cnt = 0;
2973
2974 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2975
2976 return;
2977
2978 err_ret:
2979 netif_device_attach(netdev);
2980 qlcnic_clr_all_drv_state(adapter, 1);
2981 }
2982
2983 /*Transit NPAR state to NON Operational */
2984 static void
qlcnic_set_npar_non_operational(struct qlcnic_adapter * adapter)2985 qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2986 {
2987 u32 state;
2988
2989 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2990 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2991 return;
2992
2993 if (qlcnic_api_lock(adapter))
2994 return;
2995 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2996 qlcnic_api_unlock(adapter);
2997 }
2998
2999 /*Transit to RESET state from READY state only */
3000 void
qlcnic_dev_request_reset(struct qlcnic_adapter * adapter)3001 qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
3002 {
3003 u32 state;
3004
3005 adapter->need_fw_reset = 1;
3006 if (qlcnic_api_lock(adapter))
3007 return;
3008
3009 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3010
3011 if (state == QLCNIC_DEV_READY) {
3012 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
3013 adapter->flags |= QLCNIC_FW_RESET_OWNER;
3014 QLCDB(adapter, DRV, "NEED_RESET state set\n");
3015 qlcnic_idc_debug_info(adapter, 0);
3016 }
3017
3018 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
3019 qlcnic_api_unlock(adapter);
3020 }
3021
3022 /* Transit to NPAR READY state from NPAR NOT READY state */
3023 static void
qlcnic_dev_set_npar_ready(struct qlcnic_adapter * adapter)3024 qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
3025 {
3026 if (qlcnic_api_lock(adapter))
3027 return;
3028
3029 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
3030 QLCDB(adapter, DRV, "NPAR operational state set\n");
3031
3032 qlcnic_api_unlock(adapter);
3033 }
3034
3035 static void
qlcnic_schedule_work(struct qlcnic_adapter * adapter,work_func_t func,int delay)3036 qlcnic_schedule_work(struct qlcnic_adapter *adapter,
3037 work_func_t func, int delay)
3038 {
3039 if (test_bit(__QLCNIC_AER, &adapter->state))
3040 return;
3041
3042 INIT_DELAYED_WORK(&adapter->fw_work, func);
3043 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
3044 round_jiffies_relative(delay));
3045 }
3046
3047 static void
qlcnic_cancel_fw_work(struct qlcnic_adapter * adapter)3048 qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
3049 {
3050 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3051 msleep(10);
3052
3053 cancel_delayed_work_sync(&adapter->fw_work);
3054 }
3055
3056 static void
qlcnic_attach_work(struct work_struct * work)3057 qlcnic_attach_work(struct work_struct *work)
3058 {
3059 struct qlcnic_adapter *adapter = container_of(work,
3060 struct qlcnic_adapter, fw_work.work);
3061 struct net_device *netdev = adapter->netdev;
3062 u32 npar_state;
3063
3064 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
3065 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
3066 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
3067 qlcnic_clr_all_drv_state(adapter, 0);
3068 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
3069 qlcnic_schedule_work(adapter, qlcnic_attach_work,
3070 FW_POLL_DELAY);
3071 else
3072 goto attach;
3073 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
3074 return;
3075 }
3076 attach:
3077 if (netif_running(netdev)) {
3078 if (qlcnic_up(adapter, netdev))
3079 goto done;
3080
3081 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3082 }
3083
3084 done:
3085 netif_device_attach(netdev);
3086 adapter->fw_fail_cnt = 0;
3087 adapter->flags &= ~QLCNIC_FW_HANG;
3088 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3089
3090 if (!qlcnic_clr_drv_state(adapter))
3091 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3092 FW_POLL_DELAY);
3093 }
3094
3095 static int
qlcnic_check_health(struct qlcnic_adapter * adapter)3096 qlcnic_check_health(struct qlcnic_adapter *adapter)
3097 {
3098 u32 state = 0, heartbeat;
3099 u32 peg_status;
3100
3101 if (qlcnic_check_temp(adapter))
3102 goto detach;
3103
3104 if (adapter->need_fw_reset)
3105 qlcnic_dev_request_reset(adapter);
3106
3107 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3108 if (state == QLCNIC_DEV_NEED_RESET) {
3109 qlcnic_set_npar_non_operational(adapter);
3110 adapter->need_fw_reset = 1;
3111 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3112 goto detach;
3113
3114 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3115 if (heartbeat != adapter->heartbeat) {
3116 adapter->heartbeat = heartbeat;
3117 adapter->fw_fail_cnt = 0;
3118 if (adapter->need_fw_reset)
3119 goto detach;
3120
3121 if (adapter->reset_context && auto_fw_reset) {
3122 qlcnic_reset_hw_context(adapter);
3123 adapter->netdev->trans_start = jiffies;
3124 }
3125
3126 return 0;
3127 }
3128
3129 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3130 return 0;
3131
3132 adapter->flags |= QLCNIC_FW_HANG;
3133
3134 qlcnic_dev_request_reset(adapter);
3135
3136 if (auto_fw_reset)
3137 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
3138
3139 dev_err(&adapter->pdev->dev, "firmware hang detected\n");
3140 dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
3141 "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
3142 "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
3143 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
3144 "PEG_NET_4_PC: 0x%x\n",
3145 QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
3146 QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
3147 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
3148 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
3149 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
3150 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
3151 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
3152 peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
3153 if (LSW(MSB(peg_status)) == 0x67)
3154 dev_err(&adapter->pdev->dev,
3155 "Firmware aborted with error code 0x00006700. "
3156 "Device is being reset.\n");
3157 detach:
3158 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3159 QLCNIC_DEV_NEED_RESET;
3160
3161 if (auto_fw_reset &&
3162 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3163
3164 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
3165 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3166 }
3167
3168 return 1;
3169 }
3170
3171 static void
qlcnic_fw_poll_work(struct work_struct * work)3172 qlcnic_fw_poll_work(struct work_struct *work)
3173 {
3174 struct qlcnic_adapter *adapter = container_of(work,
3175 struct qlcnic_adapter, fw_work.work);
3176
3177 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3178 goto reschedule;
3179
3180
3181 if (qlcnic_check_health(adapter))
3182 return;
3183
3184 if (adapter->fhash.fnum)
3185 qlcnic_prune_lb_filters(adapter);
3186
3187 reschedule:
3188 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3189 }
3190
qlcnic_is_first_func(struct pci_dev * pdev)3191 static int qlcnic_is_first_func(struct pci_dev *pdev)
3192 {
3193 struct pci_dev *oth_pdev;
3194 int val = pdev->devfn;
3195
3196 while (val-- > 0) {
3197 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3198 (pdev->bus), pdev->bus->number,
3199 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
3200 if (!oth_pdev)
3201 continue;
3202
3203 if (oth_pdev->current_state != PCI_D3cold) {
3204 pci_dev_put(oth_pdev);
3205 return 0;
3206 }
3207 pci_dev_put(oth_pdev);
3208 }
3209 return 1;
3210 }
3211
qlcnic_attach_func(struct pci_dev * pdev)3212 static int qlcnic_attach_func(struct pci_dev *pdev)
3213 {
3214 int err, first_func;
3215 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3216 struct net_device *netdev = adapter->netdev;
3217
3218 pdev->error_state = pci_channel_io_normal;
3219
3220 err = pci_enable_device(pdev);
3221 if (err)
3222 return err;
3223
3224 pci_set_power_state(pdev, PCI_D0);
3225 pci_set_master(pdev);
3226 pci_restore_state(pdev);
3227
3228 first_func = qlcnic_is_first_func(pdev);
3229
3230 if (qlcnic_api_lock(adapter))
3231 return -EINVAL;
3232
3233 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
3234 adapter->need_fw_reset = 1;
3235 set_bit(__QLCNIC_START_FW, &adapter->state);
3236 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3237 QLCDB(adapter, DRV, "Restarting fw\n");
3238 }
3239 qlcnic_api_unlock(adapter);
3240
3241 err = adapter->nic_ops->start_firmware(adapter);
3242 if (err)
3243 return err;
3244
3245 qlcnic_clr_drv_state(adapter);
3246 qlcnic_setup_intr(adapter);
3247
3248 if (netif_running(netdev)) {
3249 err = qlcnic_attach(adapter);
3250 if (err) {
3251 qlcnic_clr_all_drv_state(adapter, 1);
3252 clear_bit(__QLCNIC_AER, &adapter->state);
3253 netif_device_attach(netdev);
3254 return err;
3255 }
3256
3257 err = qlcnic_up(adapter, netdev);
3258 if (err)
3259 goto done;
3260
3261 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3262 }
3263 done:
3264 netif_device_attach(netdev);
3265 return err;
3266 }
3267
qlcnic_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)3268 static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3269 pci_channel_state_t state)
3270 {
3271 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3272 struct net_device *netdev = adapter->netdev;
3273
3274 if (state == pci_channel_io_perm_failure)
3275 return PCI_ERS_RESULT_DISCONNECT;
3276
3277 if (state == pci_channel_io_normal)
3278 return PCI_ERS_RESULT_RECOVERED;
3279
3280 set_bit(__QLCNIC_AER, &adapter->state);
3281 netif_device_detach(netdev);
3282
3283 cancel_delayed_work_sync(&adapter->fw_work);
3284
3285 if (netif_running(netdev))
3286 qlcnic_down(adapter, netdev);
3287
3288 qlcnic_detach(adapter);
3289 qlcnic_teardown_intr(adapter);
3290
3291 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3292
3293 pci_save_state(pdev);
3294 pci_disable_device(pdev);
3295
3296 return PCI_ERS_RESULT_NEED_RESET;
3297 }
3298
qlcnic_io_slot_reset(struct pci_dev * pdev)3299 static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3300 {
3301 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3302 PCI_ERS_RESULT_RECOVERED;
3303 }
3304
qlcnic_io_resume(struct pci_dev * pdev)3305 static void qlcnic_io_resume(struct pci_dev *pdev)
3306 {
3307 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3308
3309 pci_cleanup_aer_uncorrect_error_status(pdev);
3310
3311 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3312 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3313 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3314 FW_POLL_DELAY);
3315 }
3316
3317 static int
qlcnicvf_start_firmware(struct qlcnic_adapter * adapter)3318 qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3319 {
3320 int err;
3321
3322 err = qlcnic_can_start_firmware(adapter);
3323 if (err)
3324 return err;
3325
3326 err = qlcnic_check_npar_opertional(adapter);
3327 if (err)
3328 return err;
3329
3330 err = qlcnic_initialize_nic(adapter);
3331 if (err)
3332 return err;
3333
3334 qlcnic_check_options(adapter);
3335
3336 err = qlcnic_set_eswitch_port_config(adapter);
3337 if (err)
3338 return err;
3339
3340 adapter->need_fw_reset = 0;
3341
3342 return err;
3343 }
3344
3345 static int
qlcnicvf_config_bridged_mode(struct qlcnic_adapter * adapter,u32 enable)3346 qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3347 {
3348 return -EOPNOTSUPP;
3349 }
3350
3351 static int
qlcnicvf_config_led(struct qlcnic_adapter * adapter,u32 state,u32 rate)3352 qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3353 {
3354 return -EOPNOTSUPP;
3355 }
3356
3357 static ssize_t
qlcnic_store_bridged_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)3358 qlcnic_store_bridged_mode(struct device *dev,
3359 struct device_attribute *attr, const char *buf, size_t len)
3360 {
3361 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3362 unsigned long new;
3363 int ret = -EINVAL;
3364
3365 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3366 goto err_out;
3367
3368 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3369 goto err_out;
3370
3371 if (strict_strtoul(buf, 2, &new))
3372 goto err_out;
3373
3374 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
3375 ret = len;
3376
3377 err_out:
3378 return ret;
3379 }
3380
3381 static ssize_t
qlcnic_show_bridged_mode(struct device * dev,struct device_attribute * attr,char * buf)3382 qlcnic_show_bridged_mode(struct device *dev,
3383 struct device_attribute *attr, char *buf)
3384 {
3385 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3386 int bridged_mode = 0;
3387
3388 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3389 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3390
3391 return sprintf(buf, "%d\n", bridged_mode);
3392 }
3393
3394 static struct device_attribute dev_attr_bridged_mode = {
3395 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3396 .show = qlcnic_show_bridged_mode,
3397 .store = qlcnic_store_bridged_mode,
3398 };
3399
3400 static ssize_t
qlcnic_store_diag_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)3401 qlcnic_store_diag_mode(struct device *dev,
3402 struct device_attribute *attr, const char *buf, size_t len)
3403 {
3404 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3405 unsigned long new;
3406
3407 if (strict_strtoul(buf, 2, &new))
3408 return -EINVAL;
3409
3410 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3411 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3412
3413 return len;
3414 }
3415
3416 static ssize_t
qlcnic_show_diag_mode(struct device * dev,struct device_attribute * attr,char * buf)3417 qlcnic_show_diag_mode(struct device *dev,
3418 struct device_attribute *attr, char *buf)
3419 {
3420 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3421
3422 return sprintf(buf, "%d\n",
3423 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3424 }
3425
3426 static struct device_attribute dev_attr_diag_mode = {
3427 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3428 .show = qlcnic_show_diag_mode,
3429 .store = qlcnic_store_diag_mode,
3430 };
3431
qlcnic_validate_max_rss(struct net_device * netdev,u8 max_hw,u8 val)3432 int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
3433 {
3434 if (!use_msi_x && !use_msi) {
3435 netdev_info(netdev, "no msix or msi support, hence no rss\n");
3436 return -EINVAL;
3437 }
3438
3439 if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
3440 netdev_info(netdev, "rss_ring valid range [2 - %x] in "
3441 " powers of 2\n", max_hw);
3442 return -EINVAL;
3443 }
3444 return 0;
3445
3446 }
3447
qlcnic_set_max_rss(struct qlcnic_adapter * adapter,u8 data)3448 int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
3449 {
3450 struct net_device *netdev = adapter->netdev;
3451 int err = 0;
3452
3453 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3454 return -EBUSY;
3455
3456 netif_device_detach(netdev);
3457 if (netif_running(netdev))
3458 __qlcnic_down(adapter, netdev);
3459 qlcnic_detach(adapter);
3460 qlcnic_teardown_intr(adapter);
3461
3462 if (qlcnic_enable_msix(adapter, data)) {
3463 netdev_info(netdev, "failed setting max_rss; rss disabled\n");
3464 qlcnic_enable_msi_legacy(adapter);
3465 }
3466
3467 if (netif_running(netdev)) {
3468 err = qlcnic_attach(adapter);
3469 if (err)
3470 goto done;
3471 err = __qlcnic_up(adapter, netdev);
3472 if (err)
3473 goto done;
3474 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3475 }
3476 done:
3477 netif_device_attach(netdev);
3478 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3479 return err;
3480 }
3481
3482 static int
qlcnic_validate_beacon(struct qlcnic_adapter * adapter,u16 beacon,u8 * state,u8 * rate)3483 qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, u8 *state,
3484 u8 *rate)
3485 {
3486 *rate = LSB(beacon);
3487 *state = MSB(beacon);
3488
3489 QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
3490
3491 if (!*state) {
3492 *rate = __QLCNIC_MAX_LED_RATE;
3493 return 0;
3494 } else if (*state > __QLCNIC_MAX_LED_STATE)
3495 return -EINVAL;
3496
3497 if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
3498 return -EINVAL;
3499
3500 return 0;
3501 }
3502
3503 static ssize_t
qlcnic_store_beacon(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)3504 qlcnic_store_beacon(struct device *dev,
3505 struct device_attribute *attr, const char *buf, size_t len)
3506 {
3507 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3508 int max_sds_rings = adapter->max_sds_rings;
3509 u16 beacon;
3510 u8 b_state, b_rate;
3511 int err;
3512
3513 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3514 dev_warn(dev, "LED test not supported for non "
3515 "privilege function\n");
3516 return -EOPNOTSUPP;
3517 }
3518
3519 if (len != sizeof(u16))
3520 return QL_STATUS_INVALID_PARAM;
3521
3522 memcpy(&beacon, buf, sizeof(u16));
3523 err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
3524 if (err)
3525 return err;
3526
3527 if (adapter->ahw->beacon_state == b_state)
3528 return len;
3529
3530 rtnl_lock();
3531
3532 if (!adapter->ahw->beacon_state)
3533 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
3534 rtnl_unlock();
3535 return -EBUSY;
3536 }
3537
3538 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
3539 err = -EIO;
3540 goto out;
3541 }
3542
3543 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
3544 err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
3545 if (err)
3546 goto out;
3547 set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
3548 }
3549
3550 err = qlcnic_config_led(adapter, b_state, b_rate);
3551
3552 if (!err) {
3553 err = len;
3554 adapter->ahw->beacon_state = b_state;
3555 }
3556
3557 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
3558 qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
3559
3560 out:
3561 if (!adapter->ahw->beacon_state)
3562 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
3563 rtnl_unlock();
3564
3565 return err;
3566 }
3567
3568 static ssize_t
qlcnic_show_beacon(struct device * dev,struct device_attribute * attr,char * buf)3569 qlcnic_show_beacon(struct device *dev,
3570 struct device_attribute *attr, char *buf)
3571 {
3572 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3573
3574 return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
3575 }
3576
3577 static struct device_attribute dev_attr_beacon = {
3578 .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
3579 .show = qlcnic_show_beacon,
3580 .store = qlcnic_store_beacon,
3581 };
3582
3583 static int
qlcnic_sysfs_validate_crb(struct qlcnic_adapter * adapter,loff_t offset,size_t size)3584 qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3585 loff_t offset, size_t size)
3586 {
3587 size_t crb_size = 4;
3588
3589 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3590 return -EIO;
3591
3592 if (offset < QLCNIC_PCI_CRBSPACE) {
3593 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3594 QLCNIC_PCI_CAMQM_END))
3595 crb_size = 8;
3596 else
3597 return -EINVAL;
3598 }
3599
3600 if ((size != crb_size) || (offset & (crb_size-1)))
3601 return -EINVAL;
3602
3603 return 0;
3604 }
3605
3606 static ssize_t
qlcnic_sysfs_read_crb(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)3607 qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3608 struct bin_attribute *attr,
3609 char *buf, loff_t offset, size_t size)
3610 {
3611 struct device *dev = container_of(kobj, struct device, kobj);
3612 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3613 u32 data;
3614 u64 qmdata;
3615 int ret;
3616
3617 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3618 if (ret != 0)
3619 return ret;
3620
3621 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3622 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3623 memcpy(buf, &qmdata, size);
3624 } else {
3625 data = QLCRD32(adapter, offset);
3626 memcpy(buf, &data, size);
3627 }
3628 return size;
3629 }
3630
3631 static ssize_t
qlcnic_sysfs_write_crb(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)3632 qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3633 struct bin_attribute *attr,
3634 char *buf, loff_t offset, size_t size)
3635 {
3636 struct device *dev = container_of(kobj, struct device, kobj);
3637 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3638 u32 data;
3639 u64 qmdata;
3640 int ret;
3641
3642 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3643 if (ret != 0)
3644 return ret;
3645
3646 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3647 memcpy(&qmdata, buf, size);
3648 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3649 } else {
3650 memcpy(&data, buf, size);
3651 QLCWR32(adapter, offset, data);
3652 }
3653 return size;
3654 }
3655
3656 static int
qlcnic_sysfs_validate_mem(struct qlcnic_adapter * adapter,loff_t offset,size_t size)3657 qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3658 loff_t offset, size_t size)
3659 {
3660 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3661 return -EIO;
3662
3663 if ((size != 8) || (offset & 0x7))
3664 return -EIO;
3665
3666 return 0;
3667 }
3668
3669 static ssize_t
qlcnic_sysfs_read_mem(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)3670 qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3671 struct bin_attribute *attr,
3672 char *buf, loff_t offset, size_t size)
3673 {
3674 struct device *dev = container_of(kobj, struct device, kobj);
3675 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3676 u64 data;
3677 int ret;
3678
3679 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3680 if (ret != 0)
3681 return ret;
3682
3683 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3684 return -EIO;
3685
3686 memcpy(buf, &data, size);
3687
3688 return size;
3689 }
3690
3691 static ssize_t
qlcnic_sysfs_write_mem(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)3692 qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3693 struct bin_attribute *attr,
3694 char *buf, loff_t offset, size_t size)
3695 {
3696 struct device *dev = container_of(kobj, struct device, kobj);
3697 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3698 u64 data;
3699 int ret;
3700
3701 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3702 if (ret != 0)
3703 return ret;
3704
3705 memcpy(&data, buf, size);
3706
3707 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3708 return -EIO;
3709
3710 return size;
3711 }
3712
3713 static struct bin_attribute bin_attr_crb = {
3714 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3715 .size = 0,
3716 .read = qlcnic_sysfs_read_crb,
3717 .write = qlcnic_sysfs_write_crb,
3718 };
3719
3720 static struct bin_attribute bin_attr_mem = {
3721 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3722 .size = 0,
3723 .read = qlcnic_sysfs_read_mem,
3724 .write = qlcnic_sysfs_write_mem,
3725 };
3726
3727 static int
validate_pm_config(struct qlcnic_adapter * adapter,struct qlcnic_pm_func_cfg * pm_cfg,int count)3728 validate_pm_config(struct qlcnic_adapter *adapter,
3729 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3730 {
3731
3732 u8 src_pci_func, s_esw_id, d_esw_id;
3733 u8 dest_pci_func;
3734 int i;
3735
3736 for (i = 0; i < count; i++) {
3737 src_pci_func = pm_cfg[i].pci_func;
3738 dest_pci_func = pm_cfg[i].dest_npar;
3739 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3740 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3741 return QL_STATUS_INVALID_PARAM;
3742
3743 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3744 return QL_STATUS_INVALID_PARAM;
3745
3746 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3747 return QL_STATUS_INVALID_PARAM;
3748
3749 s_esw_id = adapter->npars[src_pci_func].phy_port;
3750 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3751
3752 if (s_esw_id != d_esw_id)
3753 return QL_STATUS_INVALID_PARAM;
3754
3755 }
3756 return 0;
3757
3758 }
3759
3760 static ssize_t
qlcnic_sysfs_write_pm_config(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)3761 qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3762 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3763 {
3764 struct device *dev = container_of(kobj, struct device, kobj);
3765 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3766 struct qlcnic_pm_func_cfg *pm_cfg;
3767 u32 id, action, pci_func;
3768 int count, rem, i, ret;
3769
3770 count = size / sizeof(struct qlcnic_pm_func_cfg);
3771 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3772 if (rem)
3773 return QL_STATUS_INVALID_PARAM;
3774
3775 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3776
3777 ret = validate_pm_config(adapter, pm_cfg, count);
3778 if (ret)
3779 return ret;
3780 for (i = 0; i < count; i++) {
3781 pci_func = pm_cfg[i].pci_func;
3782 action = !!pm_cfg[i].action;
3783 id = adapter->npars[pci_func].phy_port;
3784 ret = qlcnic_config_port_mirroring(adapter, id,
3785 action, pci_func);
3786 if (ret)
3787 return ret;
3788 }
3789
3790 for (i = 0; i < count; i++) {
3791 pci_func = pm_cfg[i].pci_func;
3792 id = adapter->npars[pci_func].phy_port;
3793 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
3794 adapter->npars[pci_func].dest_npar = id;
3795 }
3796 return size;
3797 }
3798
3799 static ssize_t
qlcnic_sysfs_read_pm_config(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)3800 qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3801 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3802 {
3803 struct device *dev = container_of(kobj, struct device, kobj);
3804 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3805 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3806 int i;
3807
3808 if (size != sizeof(pm_cfg))
3809 return QL_STATUS_INVALID_PARAM;
3810
3811 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3812 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3813 continue;
3814 pm_cfg[i].action = adapter->npars[i].enable_pm;
3815 pm_cfg[i].dest_npar = 0;
3816 pm_cfg[i].pci_func = i;
3817 }
3818 memcpy(buf, &pm_cfg, size);
3819
3820 return size;
3821 }
3822
3823 static int
validate_esw_config(struct qlcnic_adapter * adapter,struct qlcnic_esw_func_cfg * esw_cfg,int count)3824 validate_esw_config(struct qlcnic_adapter *adapter,
3825 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3826 {
3827 u32 op_mode;
3828 u8 pci_func;
3829 int i;
3830
3831 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
3832
3833 for (i = 0; i < count; i++) {
3834 pci_func = esw_cfg[i].pci_func;
3835 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3836 return QL_STATUS_INVALID_PARAM;
3837
3838 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3839 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3840 return QL_STATUS_INVALID_PARAM;
3841
3842 switch (esw_cfg[i].op_mode) {
3843 case QLCNIC_PORT_DEFAULTS:
3844 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
3845 QLCNIC_NON_PRIV_FUNC) {
3846 if (esw_cfg[i].mac_anti_spoof != 0)
3847 return QL_STATUS_INVALID_PARAM;
3848 if (esw_cfg[i].mac_override != 1)
3849 return QL_STATUS_INVALID_PARAM;
3850 if (esw_cfg[i].promisc_mode != 1)
3851 return QL_STATUS_INVALID_PARAM;
3852 }
3853 break;
3854 case QLCNIC_ADD_VLAN:
3855 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3856 return QL_STATUS_INVALID_PARAM;
3857 if (!esw_cfg[i].op_type)
3858 return QL_STATUS_INVALID_PARAM;
3859 break;
3860 case QLCNIC_DEL_VLAN:
3861 if (!esw_cfg[i].op_type)
3862 return QL_STATUS_INVALID_PARAM;
3863 break;
3864 default:
3865 return QL_STATUS_INVALID_PARAM;
3866 }
3867 }
3868 return 0;
3869 }
3870
3871 static ssize_t
qlcnic_sysfs_write_esw_config(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)3872 qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3873 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3874 {
3875 struct device *dev = container_of(kobj, struct device, kobj);
3876 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3877 struct qlcnic_esw_func_cfg *esw_cfg;
3878 struct qlcnic_npar_info *npar;
3879 int count, rem, i, ret;
3880 u8 pci_func, op_mode = 0;
3881
3882 count = size / sizeof(struct qlcnic_esw_func_cfg);
3883 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3884 if (rem)
3885 return QL_STATUS_INVALID_PARAM;
3886
3887 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3888 ret = validate_esw_config(adapter, esw_cfg, count);
3889 if (ret)
3890 return ret;
3891
3892 for (i = 0; i < count; i++) {
3893 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3894 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3895 return QL_STATUS_INVALID_PARAM;
3896
3897 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
3898 continue;
3899
3900 op_mode = esw_cfg[i].op_mode;
3901 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3902 esw_cfg[i].op_mode = op_mode;
3903 esw_cfg[i].pci_func = adapter->ahw->pci_func;
3904
3905 switch (esw_cfg[i].op_mode) {
3906 case QLCNIC_PORT_DEFAULTS:
3907 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3908 break;
3909 case QLCNIC_ADD_VLAN:
3910 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3911 break;
3912 case QLCNIC_DEL_VLAN:
3913 esw_cfg[i].vlan_id = 0;
3914 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3915 break;
3916 }
3917 }
3918
3919 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3920 goto out;
3921
3922 for (i = 0; i < count; i++) {
3923 pci_func = esw_cfg[i].pci_func;
3924 npar = &adapter->npars[pci_func];
3925 switch (esw_cfg[i].op_mode) {
3926 case QLCNIC_PORT_DEFAULTS:
3927 npar->promisc_mode = esw_cfg[i].promisc_mode;
3928 npar->mac_override = esw_cfg[i].mac_override;
3929 npar->offload_flags = esw_cfg[i].offload_flags;
3930 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3931 npar->discard_tagged = esw_cfg[i].discard_tagged;
3932 break;
3933 case QLCNIC_ADD_VLAN:
3934 npar->pvid = esw_cfg[i].vlan_id;
3935 break;
3936 case QLCNIC_DEL_VLAN:
3937 npar->pvid = 0;
3938 break;
3939 }
3940 }
3941 out:
3942 return size;
3943 }
3944
3945 static ssize_t
qlcnic_sysfs_read_esw_config(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)3946 qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3947 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3948 {
3949 struct device *dev = container_of(kobj, struct device, kobj);
3950 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3951 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3952 u8 i;
3953
3954 if (size != sizeof(esw_cfg))
3955 return QL_STATUS_INVALID_PARAM;
3956
3957 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3958 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3959 continue;
3960 esw_cfg[i].pci_func = i;
3961 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3962 return QL_STATUS_INVALID_PARAM;
3963 }
3964 memcpy(buf, &esw_cfg, size);
3965
3966 return size;
3967 }
3968
3969 static int
validate_npar_config(struct qlcnic_adapter * adapter,struct qlcnic_npar_func_cfg * np_cfg,int count)3970 validate_npar_config(struct qlcnic_adapter *adapter,
3971 struct qlcnic_npar_func_cfg *np_cfg, int count)
3972 {
3973 u8 pci_func, i;
3974
3975 for (i = 0; i < count; i++) {
3976 pci_func = np_cfg[i].pci_func;
3977 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3978 return QL_STATUS_INVALID_PARAM;
3979
3980 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3981 return QL_STATUS_INVALID_PARAM;
3982
3983 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3984 !IS_VALID_BW(np_cfg[i].max_bw))
3985 return QL_STATUS_INVALID_PARAM;
3986 }
3987 return 0;
3988 }
3989
3990 static ssize_t
qlcnic_sysfs_write_npar_config(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)3991 qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3992 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3993 {
3994 struct device *dev = container_of(kobj, struct device, kobj);
3995 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3996 struct qlcnic_info nic_info;
3997 struct qlcnic_npar_func_cfg *np_cfg;
3998 int i, count, rem, ret;
3999 u8 pci_func;
4000
4001 count = size / sizeof(struct qlcnic_npar_func_cfg);
4002 rem = size % sizeof(struct qlcnic_npar_func_cfg);
4003 if (rem)
4004 return QL_STATUS_INVALID_PARAM;
4005
4006 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
4007 ret = validate_npar_config(adapter, np_cfg, count);
4008 if (ret)
4009 return ret;
4010
4011 for (i = 0; i < count ; i++) {
4012 pci_func = np_cfg[i].pci_func;
4013 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
4014 if (ret)
4015 return ret;
4016 nic_info.pci_func = pci_func;
4017 nic_info.min_tx_bw = np_cfg[i].min_bw;
4018 nic_info.max_tx_bw = np_cfg[i].max_bw;
4019 ret = qlcnic_set_nic_info(adapter, &nic_info);
4020 if (ret)
4021 return ret;
4022 adapter->npars[i].min_bw = nic_info.min_tx_bw;
4023 adapter->npars[i].max_bw = nic_info.max_tx_bw;
4024 }
4025
4026 return size;
4027
4028 }
4029 static ssize_t
qlcnic_sysfs_read_npar_config(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)4030 qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
4031 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4032 {
4033 struct device *dev = container_of(kobj, struct device, kobj);
4034 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4035 struct qlcnic_info nic_info;
4036 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
4037 int i, ret;
4038
4039 if (size != sizeof(np_cfg))
4040 return QL_STATUS_INVALID_PARAM;
4041
4042 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4043 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
4044 continue;
4045 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
4046 if (ret)
4047 return ret;
4048
4049 np_cfg[i].pci_func = i;
4050 np_cfg[i].op_mode = (u8)nic_info.op_mode;
4051 np_cfg[i].port_num = nic_info.phys_port;
4052 np_cfg[i].fw_capab = nic_info.capabilities;
4053 np_cfg[i].min_bw = nic_info.min_tx_bw ;
4054 np_cfg[i].max_bw = nic_info.max_tx_bw;
4055 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
4056 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
4057 }
4058 memcpy(buf, &np_cfg, size);
4059 return size;
4060 }
4061
4062 static ssize_t
qlcnic_sysfs_get_port_stats(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)4063 qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
4064 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4065 {
4066 struct device *dev = container_of(kobj, struct device, kobj);
4067 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4068 struct qlcnic_esw_statistics port_stats;
4069 int ret;
4070
4071 if (size != sizeof(struct qlcnic_esw_statistics))
4072 return QL_STATUS_INVALID_PARAM;
4073
4074 if (offset >= QLCNIC_MAX_PCI_FUNC)
4075 return QL_STATUS_INVALID_PARAM;
4076
4077 memset(&port_stats, 0, size);
4078 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
4079 &port_stats.rx);
4080 if (ret)
4081 return ret;
4082
4083 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
4084 &port_stats.tx);
4085 if (ret)
4086 return ret;
4087
4088 memcpy(buf, &port_stats, size);
4089 return size;
4090 }
4091
4092 static ssize_t
qlcnic_sysfs_get_esw_stats(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)4093 qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
4094 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4095 {
4096 struct device *dev = container_of(kobj, struct device, kobj);
4097 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4098 struct qlcnic_esw_statistics esw_stats;
4099 int ret;
4100
4101 if (size != sizeof(struct qlcnic_esw_statistics))
4102 return QL_STATUS_INVALID_PARAM;
4103
4104 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
4105 return QL_STATUS_INVALID_PARAM;
4106
4107 memset(&esw_stats, 0, size);
4108 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
4109 &esw_stats.rx);
4110 if (ret)
4111 return ret;
4112
4113 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
4114 &esw_stats.tx);
4115 if (ret)
4116 return ret;
4117
4118 memcpy(buf, &esw_stats, size);
4119 return size;
4120 }
4121
4122 static ssize_t
qlcnic_sysfs_clear_esw_stats(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)4123 qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
4124 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4125 {
4126 struct device *dev = container_of(kobj, struct device, kobj);
4127 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4128 int ret;
4129
4130 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
4131 return QL_STATUS_INVALID_PARAM;
4132
4133 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
4134 QLCNIC_QUERY_RX_COUNTER);
4135 if (ret)
4136 return ret;
4137
4138 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
4139 QLCNIC_QUERY_TX_COUNTER);
4140 if (ret)
4141 return ret;
4142
4143 return size;
4144 }
4145
4146 static ssize_t
qlcnic_sysfs_clear_port_stats(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)4147 qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
4148 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4149 {
4150
4151 struct device *dev = container_of(kobj, struct device, kobj);
4152 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4153 int ret;
4154
4155 if (offset >= QLCNIC_MAX_PCI_FUNC)
4156 return QL_STATUS_INVALID_PARAM;
4157
4158 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4159 QLCNIC_QUERY_RX_COUNTER);
4160 if (ret)
4161 return ret;
4162
4163 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4164 QLCNIC_QUERY_TX_COUNTER);
4165 if (ret)
4166 return ret;
4167
4168 return size;
4169 }
4170
4171 static ssize_t
qlcnic_sysfs_read_pci_config(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t offset,size_t size)4172 qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
4173 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4174 {
4175 struct device *dev = container_of(kobj, struct device, kobj);
4176 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4177 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
4178 struct qlcnic_pci_info *pci_info;
4179 int i, ret;
4180
4181 if (size != sizeof(pci_cfg))
4182 return QL_STATUS_INVALID_PARAM;
4183
4184 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
4185 if (!pci_info)
4186 return -ENOMEM;
4187
4188 ret = qlcnic_get_pci_info(adapter, pci_info);
4189 if (ret) {
4190 kfree(pci_info);
4191 return ret;
4192 }
4193
4194 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4195 pci_cfg[i].pci_func = pci_info[i].id;
4196 pci_cfg[i].func_type = pci_info[i].type;
4197 pci_cfg[i].port_num = pci_info[i].default_port;
4198 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
4199 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
4200 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
4201 }
4202 memcpy(buf, &pci_cfg, size);
4203 kfree(pci_info);
4204 return size;
4205 }
4206 static struct bin_attribute bin_attr_npar_config = {
4207 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
4208 .size = 0,
4209 .read = qlcnic_sysfs_read_npar_config,
4210 .write = qlcnic_sysfs_write_npar_config,
4211 };
4212
4213 static struct bin_attribute bin_attr_pci_config = {
4214 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
4215 .size = 0,
4216 .read = qlcnic_sysfs_read_pci_config,
4217 .write = NULL,
4218 };
4219
4220 static struct bin_attribute bin_attr_port_stats = {
4221 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
4222 .size = 0,
4223 .read = qlcnic_sysfs_get_port_stats,
4224 .write = qlcnic_sysfs_clear_port_stats,
4225 };
4226
4227 static struct bin_attribute bin_attr_esw_stats = {
4228 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
4229 .size = 0,
4230 .read = qlcnic_sysfs_get_esw_stats,
4231 .write = qlcnic_sysfs_clear_esw_stats,
4232 };
4233
4234 static struct bin_attribute bin_attr_esw_config = {
4235 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
4236 .size = 0,
4237 .read = qlcnic_sysfs_read_esw_config,
4238 .write = qlcnic_sysfs_write_esw_config,
4239 };
4240
4241 static struct bin_attribute bin_attr_pm_config = {
4242 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
4243 .size = 0,
4244 .read = qlcnic_sysfs_read_pm_config,
4245 .write = qlcnic_sysfs_write_pm_config,
4246 };
4247
4248 static void
qlcnic_create_sysfs_entries(struct qlcnic_adapter * adapter)4249 qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
4250 {
4251 struct device *dev = &adapter->pdev->dev;
4252
4253 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4254 if (device_create_file(dev, &dev_attr_bridged_mode))
4255 dev_warn(dev,
4256 "failed to create bridged_mode sysfs entry\n");
4257 }
4258
4259 static void
qlcnic_remove_sysfs_entries(struct qlcnic_adapter * adapter)4260 qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
4261 {
4262 struct device *dev = &adapter->pdev->dev;
4263
4264 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4265 device_remove_file(dev, &dev_attr_bridged_mode);
4266 }
4267
4268 static void
qlcnic_create_diag_entries(struct qlcnic_adapter * adapter)4269 qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4270 {
4271 struct device *dev = &adapter->pdev->dev;
4272
4273 if (device_create_bin_file(dev, &bin_attr_port_stats))
4274 dev_info(dev, "failed to create port stats sysfs entry");
4275
4276 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4277 return;
4278 if (device_create_file(dev, &dev_attr_diag_mode))
4279 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4280 if (device_create_file(dev, &dev_attr_beacon))
4281 dev_info(dev, "failed to create beacon sysfs entry");
4282 if (device_create_bin_file(dev, &bin_attr_crb))
4283 dev_info(dev, "failed to create crb sysfs entry\n");
4284 if (device_create_bin_file(dev, &bin_attr_mem))
4285 dev_info(dev, "failed to create mem sysfs entry\n");
4286 if (device_create_bin_file(dev, &bin_attr_pci_config))
4287 dev_info(dev, "failed to create pci config sysfs entry");
4288 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4289 return;
4290 if (device_create_bin_file(dev, &bin_attr_esw_config))
4291 dev_info(dev, "failed to create esw config sysfs entry");
4292 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
4293 return;
4294 if (device_create_bin_file(dev, &bin_attr_npar_config))
4295 dev_info(dev, "failed to create npar config sysfs entry");
4296 if (device_create_bin_file(dev, &bin_attr_pm_config))
4297 dev_info(dev, "failed to create pm config sysfs entry");
4298 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4299 dev_info(dev, "failed to create eswitch stats sysfs entry");
4300 }
4301
4302 static void
qlcnic_remove_diag_entries(struct qlcnic_adapter * adapter)4303 qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4304 {
4305 struct device *dev = &adapter->pdev->dev;
4306
4307 device_remove_bin_file(dev, &bin_attr_port_stats);
4308
4309 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4310 return;
4311 device_remove_file(dev, &dev_attr_diag_mode);
4312 device_remove_file(dev, &dev_attr_beacon);
4313 device_remove_bin_file(dev, &bin_attr_crb);
4314 device_remove_bin_file(dev, &bin_attr_mem);
4315 device_remove_bin_file(dev, &bin_attr_pci_config);
4316 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4317 return;
4318 device_remove_bin_file(dev, &bin_attr_esw_config);
4319 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
4320 return;
4321 device_remove_bin_file(dev, &bin_attr_npar_config);
4322 device_remove_bin_file(dev, &bin_attr_pm_config);
4323 device_remove_bin_file(dev, &bin_attr_esw_stats);
4324 }
4325
4326 #ifdef CONFIG_INET
4327
4328 #define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4329
4330 static void
qlcnic_config_indev_addr(struct qlcnic_adapter * adapter,struct net_device * dev,unsigned long event)4331 qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4332 struct net_device *dev, unsigned long event)
4333 {
4334 struct in_device *indev;
4335
4336 indev = in_dev_get(dev);
4337 if (!indev)
4338 return;
4339
4340 for_ifa(indev) {
4341 switch (event) {
4342 case NETDEV_UP:
4343 qlcnic_config_ipaddr(adapter,
4344 ifa->ifa_address, QLCNIC_IP_UP);
4345 break;
4346 case NETDEV_DOWN:
4347 qlcnic_config_ipaddr(adapter,
4348 ifa->ifa_address, QLCNIC_IP_DOWN);
4349 break;
4350 default:
4351 break;
4352 }
4353 } endfor_ifa(indev);
4354
4355 in_dev_put(indev);
4356 }
4357
4358 static void
qlcnic_restore_indev_addr(struct net_device * netdev,unsigned long event)4359 qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4360 {
4361 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4362 struct net_device *dev;
4363 u16 vid;
4364
4365 qlcnic_config_indev_addr(adapter, netdev, event);
4366
4367 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4368 dev = __vlan_find_dev_deep(netdev, vid);
4369 if (!dev)
4370 continue;
4371 qlcnic_config_indev_addr(adapter, dev, event);
4372 }
4373 }
4374
qlcnic_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)4375 static int qlcnic_netdev_event(struct notifier_block *this,
4376 unsigned long event, void *ptr)
4377 {
4378 struct qlcnic_adapter *adapter;
4379 struct net_device *dev = (struct net_device *)ptr;
4380
4381 recheck:
4382 if (dev == NULL)
4383 goto done;
4384
4385 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4386 dev = vlan_dev_real_dev(dev);
4387 goto recheck;
4388 }
4389
4390 if (!is_qlcnic_netdev(dev))
4391 goto done;
4392
4393 adapter = netdev_priv(dev);
4394
4395 if (!adapter)
4396 goto done;
4397
4398 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
4399 goto done;
4400
4401 qlcnic_config_indev_addr(adapter, dev, event);
4402 done:
4403 return NOTIFY_DONE;
4404 }
4405
4406 static int
qlcnic_inetaddr_event(struct notifier_block * this,unsigned long event,void * ptr)4407 qlcnic_inetaddr_event(struct notifier_block *this,
4408 unsigned long event, void *ptr)
4409 {
4410 struct qlcnic_adapter *adapter;
4411 struct net_device *dev;
4412
4413 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4414
4415 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4416
4417 recheck:
4418 if (dev == NULL)
4419 goto done;
4420
4421 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4422 dev = vlan_dev_real_dev(dev);
4423 goto recheck;
4424 }
4425
4426 if (!is_qlcnic_netdev(dev))
4427 goto done;
4428
4429 adapter = netdev_priv(dev);
4430
4431 if (!adapter)
4432 goto done;
4433
4434 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
4435 goto done;
4436
4437 switch (event) {
4438 case NETDEV_UP:
4439 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4440 break;
4441 case NETDEV_DOWN:
4442 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4443 break;
4444 default:
4445 break;
4446 }
4447
4448 done:
4449 return NOTIFY_DONE;
4450 }
4451
4452 static struct notifier_block qlcnic_netdev_cb = {
4453 .notifier_call = qlcnic_netdev_event,
4454 };
4455
4456 static struct notifier_block qlcnic_inetaddr_cb = {
4457 .notifier_call = qlcnic_inetaddr_event,
4458 };
4459 #else
4460 static void
qlcnic_restore_indev_addr(struct net_device * dev,unsigned long event)4461 qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
4462 { }
4463 #endif
4464 static struct pci_error_handlers qlcnic_err_handler = {
4465 .error_detected = qlcnic_io_error_detected,
4466 .slot_reset = qlcnic_io_slot_reset,
4467 .resume = qlcnic_io_resume,
4468 };
4469
4470 static struct pci_driver qlcnic_driver = {
4471 .name = qlcnic_driver_name,
4472 .id_table = qlcnic_pci_tbl,
4473 .probe = qlcnic_probe,
4474 .remove = __devexit_p(qlcnic_remove),
4475 #ifdef CONFIG_PM
4476 .suspend = qlcnic_suspend,
4477 .resume = qlcnic_resume,
4478 #endif
4479 .shutdown = qlcnic_shutdown,
4480 .err_handler = &qlcnic_err_handler
4481
4482 };
4483
qlcnic_init_module(void)4484 static int __init qlcnic_init_module(void)
4485 {
4486 int ret;
4487
4488 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4489
4490 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4491 if (qlcnic_wq == NULL) {
4492 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4493 return -ENOMEM;
4494 }
4495
4496 #ifdef CONFIG_INET
4497 register_netdevice_notifier(&qlcnic_netdev_cb);
4498 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4499 #endif
4500
4501 ret = pci_register_driver(&qlcnic_driver);
4502 if (ret) {
4503 #ifdef CONFIG_INET
4504 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4505 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4506 #endif
4507 destroy_workqueue(qlcnic_wq);
4508 }
4509
4510 return ret;
4511 }
4512
4513 module_init(qlcnic_init_module);
4514
qlcnic_exit_module(void)4515 static void __exit qlcnic_exit_module(void)
4516 {
4517
4518 pci_unregister_driver(&qlcnic_driver);
4519
4520 #ifdef CONFIG_INET
4521 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4522 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4523 #endif
4524 destroy_workqueue(qlcnic_wq);
4525 }
4526
4527 module_exit(qlcnic_exit_module);
4528