1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * File: qlnx_os.c
30 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
31 */
32
33 #include <sys/cdefs.h>
34 #include "qlnx_os.h"
35 #include "bcm_osal.h"
36 #include "reg_addr.h"
37 #include "ecore_gtt_reg_addr.h"
38 #include "ecore.h"
39 #include "ecore_chain.h"
40 #include "ecore_status.h"
41 #include "ecore_hw.h"
42 #include "ecore_rt_defs.h"
43 #include "ecore_init_ops.h"
44 #include "ecore_int.h"
45 #include "ecore_cxt.h"
46 #include "ecore_spq.h"
47 #include "ecore_init_fw_funcs.h"
48 #include "ecore_sp_commands.h"
49 #include "ecore_dev_api.h"
50 #include "ecore_l2_api.h"
51 #include "ecore_l2.h"
52 #include "ecore_mcp.h"
53 #include "ecore_hw_defs.h"
54 #include "mcp_public.h"
55 #include "ecore_iro.h"
56 #include "nvm_cfg.h"
57 #include "ecore_dbg_fw_funcs.h"
58 #include "ecore_iov_api.h"
59 #include "ecore_vf_api.h"
60
61 #include "qlnx_ioctl.h"
62 #include "qlnx_def.h"
63 #include "qlnx_ver.h"
64
65 #ifdef QLNX_ENABLE_IWARP
66 #include "qlnx_rdma.h"
67 #endif /* #ifdef QLNX_ENABLE_IWARP */
68
69 #ifdef CONFIG_ECORE_SRIOV
70 #include <sys/nv.h>
71 #include <sys/iov_schema.h>
72 #include <dev/pci/pci_iov.h>
73 #endif /* #ifdef CONFIG_ECORE_SRIOV */
74
75 #include <sys/smp.h>
76
77 /*
78 * static functions
79 */
80 /*
81 * ioctl related functions
82 */
83 static void qlnx_add_sysctls(qlnx_host_t *ha);
84
85 /*
86 * main driver
87 */
88 static void qlnx_release(qlnx_host_t *ha);
89 static void qlnx_fp_isr(void *arg);
90 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
91 static void qlnx_init(void *arg);
92 static void qlnx_init_locked(qlnx_host_t *ha);
93 static int qlnx_set_multi(qlnx_host_t *ha);
94 static int qlnx_set_promisc_allmulti(qlnx_host_t *ha, int flags);
95 static int _qlnx_set_promisc_allmulti(qlnx_host_t *ha, bool promisc, bool allmulti);
96 static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data);
97 static int qlnx_media_change(if_t ifp);
98 static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr);
99 static void qlnx_stop(qlnx_host_t *ha);
100 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
101 struct mbuf **m_headp);
102 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
103 static void qlnx_get_mac_addr(qlnx_host_t *ha);
104 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
105 struct qlnx_link_output *if_link);
106 static int qlnx_transmit(if_t ifp, struct mbuf *mp);
107 static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp,
108 struct mbuf *mp);
109 static void qlnx_qflush(if_t ifp);
110
111 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
112 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
113 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
114 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
115 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
116 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
117
118 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
119 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
120
121 static int qlnx_nic_setup(struct ecore_dev *cdev,
122 struct ecore_pf_params *func_params);
123 static int qlnx_nic_start(struct ecore_dev *cdev);
124 static int qlnx_slowpath_start(qlnx_host_t *ha);
125 static int qlnx_slowpath_stop(qlnx_host_t *ha);
126 static int qlnx_init_hw(qlnx_host_t *ha);
127 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
128 char ver_str[VER_SIZE]);
129 static void qlnx_unload(qlnx_host_t *ha);
130 static int qlnx_load(qlnx_host_t *ha);
131 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
132 uint32_t len);
133 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
134 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
135 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
136 struct qlnx_rx_queue *rxq);
137 static int qlnx_remove_all_mcast_mac(qlnx_host_t *ha);
138 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
139 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
140 int hwfn_index);
141 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
142 int hwfn_index);
143 static void qlnx_timer(void *arg);
144 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
145 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
146 static void qlnx_trigger_dump(qlnx_host_t *ha);
147 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
148 struct qlnx_tx_queue *txq);
149 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
150 struct qlnx_tx_queue *txq);
151 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
152 int lro_enable);
153 static void qlnx_fp_taskqueue(void *context, int pending);
154 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
155 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
156 struct qlnx_agg_info *tpa);
157 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
158
159 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
160
161 /*
162 * Hooks to the Operating Systems
163 */
164 static int qlnx_pci_probe (device_t);
165 static int qlnx_pci_attach (device_t);
166 static int qlnx_pci_detach (device_t);
167
168 #ifndef QLNX_VF
169
170 #ifdef CONFIG_ECORE_SRIOV
171
172 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
173 static void qlnx_iov_uninit(device_t dev);
174 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
175 static void qlnx_initialize_sriov(qlnx_host_t *ha);
176 static void qlnx_pf_taskqueue(void *context, int pending);
177 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
178 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
179 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
180
181 #endif /* #ifdef CONFIG_ECORE_SRIOV */
182
183 static device_method_t qlnx_pci_methods[] = {
184 /* Device interface */
185 DEVMETHOD(device_probe, qlnx_pci_probe),
186 DEVMETHOD(device_attach, qlnx_pci_attach),
187 DEVMETHOD(device_detach, qlnx_pci_detach),
188
189 #ifdef CONFIG_ECORE_SRIOV
190 DEVMETHOD(pci_iov_init, qlnx_iov_init),
191 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
192 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
193 #endif /* #ifdef CONFIG_ECORE_SRIOV */
194 DEVMETHOD_END
195 };
196
197 static driver_t qlnx_pci_driver = {
198 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
199 };
200
201 MODULE_VERSION(if_qlnxe,1);
202 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0);
203
204 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
205 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
206
207 #else
208
209 static device_method_t qlnxv_pci_methods[] = {
210 /* Device interface */
211 DEVMETHOD(device_probe, qlnx_pci_probe),
212 DEVMETHOD(device_attach, qlnx_pci_attach),
213 DEVMETHOD(device_detach, qlnx_pci_detach),
214 DEVMETHOD_END
215 };
216
217 static driver_t qlnxv_pci_driver = {
218 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
219 };
220
221 MODULE_VERSION(if_qlnxev,1);
222 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0);
223
224 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
225 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
226
227 #endif /* #ifdef QLNX_VF */
228
229 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
230
231 char qlnx_ver_str[VER_SIZE];
232 char qlnx_name_str[NAME_SIZE];
233
234 /*
235 * Some PCI Configuration Space Related Defines
236 */
237
238 #ifndef PCI_VENDOR_QLOGIC
239 #define PCI_VENDOR_QLOGIC 0x1077
240 #endif
241
242 /* 40G Adapter QLE45xxx*/
243 #ifndef QLOGIC_PCI_DEVICE_ID_1634
244 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
245 #endif
246
247 /* 100G Adapter QLE45xxx*/
248 #ifndef QLOGIC_PCI_DEVICE_ID_1644
249 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
250 #endif
251
252 /* 25G Adapter QLE45xxx*/
253 #ifndef QLOGIC_PCI_DEVICE_ID_1656
254 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
255 #endif
256
257 /* 50G Adapter QLE45xxx*/
258 #ifndef QLOGIC_PCI_DEVICE_ID_1654
259 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
260 #endif
261
262 /* 10G/25G/40G Adapter QLE41xxx*/
263 #ifndef QLOGIC_PCI_DEVICE_ID_8070
264 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070
265 #endif
266
267 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
268 #ifndef QLOGIC_PCI_DEVICE_ID_8090
269 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090
270 #endif
271
272 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
273 "qlnxe driver parameters");
274
275 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
276 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
277
278 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
279 &qlnxe_queue_count, 0, "Multi-Queue queue count");
280
281 /*
282 * Note on RDMA personality setting
283 *
284 * Read the personality configured in NVRAM
285 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
286 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
287 * use the personality in NVRAM.
288
289 * Otherwise use t the personality configured in sysctl.
290 *
291 */
292 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */
293 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */
294 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */
295 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */
296 #define QLNX_PERSONALITY_BITS_PER_FUNC 4
297 #define QLNX_PERSONALIY_MASK 0xF
298
299 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
300 static uint64_t qlnxe_rdma_configuration = 0x22222222;
301
302 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
303 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
304
305 int
qlnx_vf_device(qlnx_host_t * ha)306 qlnx_vf_device(qlnx_host_t *ha)
307 {
308 uint16_t device_id;
309
310 device_id = ha->device_id;
311
312 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
313 return 0;
314
315 return -1;
316 }
317
318 static int
qlnx_valid_device(qlnx_host_t * ha)319 qlnx_valid_device(qlnx_host_t *ha)
320 {
321 uint16_t device_id;
322
323 device_id = ha->device_id;
324
325 #ifndef QLNX_VF
326 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
327 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
328 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
329 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
330 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
331 return 0;
332 #else
333 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
334 return 0;
335
336 #endif /* #ifndef QLNX_VF */
337 return -1;
338 }
339
340 #ifdef QLNX_ENABLE_IWARP
341 static int
qlnx_rdma_supported(struct qlnx_host * ha)342 qlnx_rdma_supported(struct qlnx_host *ha)
343 {
344 uint16_t device_id;
345
346 device_id = pci_get_device(ha->pci_dev);
347
348 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
349 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
350 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
351 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
352 return (0);
353
354 return (-1);
355 }
356 #endif /* #ifdef QLNX_ENABLE_IWARP */
357
358 /*
359 * Name: qlnx_pci_probe
360 * Function: Validate the PCI device to be a QLA80XX device
361 */
362 static int
qlnx_pci_probe(device_t dev)363 qlnx_pci_probe(device_t dev)
364 {
365 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
366 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
367 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
368
369 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
370 return (ENXIO);
371 }
372
373 switch (pci_get_device(dev)) {
374 #ifndef QLNX_VF
375
376 case QLOGIC_PCI_DEVICE_ID_1644:
377 device_set_descf(dev, "%s v%d.%d.%d",
378 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
379 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
380 QLNX_VERSION_BUILD);
381 break;
382
383 case QLOGIC_PCI_DEVICE_ID_1634:
384 device_set_descf(dev, "%s v%d.%d.%d",
385 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
386 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
387 QLNX_VERSION_BUILD);
388 break;
389
390 case QLOGIC_PCI_DEVICE_ID_1656:
391 device_set_descf(dev, "%s v%d.%d.%d",
392 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
393 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
394 QLNX_VERSION_BUILD);
395 break;
396
397 case QLOGIC_PCI_DEVICE_ID_1654:
398 device_set_descf(dev, "%s v%d.%d.%d",
399 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
400 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
401 QLNX_VERSION_BUILD);
402 break;
403
404 case QLOGIC_PCI_DEVICE_ID_8070:
405 device_set_descf(dev, "%s v%d.%d.%d",
406 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
407 " Adapter-Ethernet Function",
408 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
409 QLNX_VERSION_BUILD);
410 break;
411
412 #else
413 case QLOGIC_PCI_DEVICE_ID_8090:
414 device_set_descf(dev, "%s v%d.%d.%d",
415 "Qlogic SRIOV PCI CNA (AH) "
416 "Adapter-Ethernet Function",
417 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
418 QLNX_VERSION_BUILD);
419 break;
420
421 #endif /* #ifndef QLNX_VF */
422
423 default:
424 return (ENXIO);
425 }
426
427 #ifdef QLNX_ENABLE_IWARP
428 qlnx_rdma_init();
429 #endif /* #ifdef QLNX_ENABLE_IWARP */
430
431 return (BUS_PROBE_DEFAULT);
432 }
433
434 static uint16_t
qlnx_num_tx_compl(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)435 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
436 struct qlnx_tx_queue *txq)
437 {
438 u16 hw_bd_cons;
439 u16 ecore_cons_idx;
440
441 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
442
443 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
444
445 return (hw_bd_cons - ecore_cons_idx);
446 }
447
448 static void
qlnx_sp_intr(void * arg)449 qlnx_sp_intr(void *arg)
450 {
451 struct ecore_hwfn *p_hwfn;
452 qlnx_host_t *ha;
453 int i;
454
455 p_hwfn = arg;
456
457 if (p_hwfn == NULL) {
458 printf("%s: spurious slowpath intr\n", __func__);
459 return;
460 }
461
462 ha = (qlnx_host_t *)p_hwfn->p_dev;
463
464 QL_DPRINT2(ha, "enter\n");
465
466 for (i = 0; i < ha->cdev.num_hwfns; i++) {
467 if (&ha->cdev.hwfns[i] == p_hwfn) {
468 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
469 break;
470 }
471 }
472 QL_DPRINT2(ha, "exit\n");
473
474 return;
475 }
476
477 static void
qlnx_sp_taskqueue(void * context,int pending)478 qlnx_sp_taskqueue(void *context, int pending)
479 {
480 struct ecore_hwfn *p_hwfn;
481
482 p_hwfn = context;
483
484 if (p_hwfn != NULL) {
485 qlnx_sp_isr(p_hwfn);
486 }
487 return;
488 }
489
490 static int
qlnx_create_sp_taskqueues(qlnx_host_t * ha)491 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
492 {
493 int i;
494 uint8_t tq_name[32];
495
496 for (i = 0; i < ha->cdev.num_hwfns; i++) {
497 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
498
499 bzero(tq_name, sizeof (tq_name));
500 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
501
502 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
503
504 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
505 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
506
507 if (ha->sp_taskqueue[i] == NULL)
508 return (-1);
509
510 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
511 tq_name);
512
513 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
514 }
515
516 return (0);
517 }
518
519 static void
qlnx_destroy_sp_taskqueues(qlnx_host_t * ha)520 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
521 {
522 int i;
523
524 for (i = 0; i < ha->cdev.num_hwfns; i++) {
525 if (ha->sp_taskqueue[i] != NULL) {
526 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
527 taskqueue_free(ha->sp_taskqueue[i]);
528 }
529 }
530 return;
531 }
532
533 static void
qlnx_fp_taskqueue(void * context,int pending)534 qlnx_fp_taskqueue(void *context, int pending)
535 {
536 struct qlnx_fastpath *fp;
537 qlnx_host_t *ha;
538 if_t ifp;
539
540 fp = context;
541
542 if (fp == NULL)
543 return;
544
545 ha = (qlnx_host_t *)fp->edev;
546
547 ifp = ha->ifp;
548
549 if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
550 if (!drbr_empty(ifp, fp->tx_br)) {
551 if(mtx_trylock(&fp->tx_mtx)) {
552 #ifdef QLNX_TRACE_PERF_DATA
553 tx_pkts = fp->tx_pkts_transmitted;
554 tx_compl = fp->tx_pkts_completed;
555 #endif
556
557 qlnx_transmit_locked(ifp, fp, NULL);
558
559 #ifdef QLNX_TRACE_PERF_DATA
560 fp->tx_pkts_trans_fp +=
561 (fp->tx_pkts_transmitted - tx_pkts);
562 fp->tx_pkts_compl_fp +=
563 (fp->tx_pkts_completed - tx_compl);
564 #endif
565 mtx_unlock(&fp->tx_mtx);
566 }
567 }
568 }
569
570 QL_DPRINT2(ha, "exit \n");
571 return;
572 }
573
574 static int
qlnx_create_fp_taskqueues(qlnx_host_t * ha)575 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
576 {
577 int i;
578 uint8_t tq_name[32];
579 struct qlnx_fastpath *fp;
580
581 for (i = 0; i < ha->num_rss; i++) {
582 fp = &ha->fp_array[i];
583
584 bzero(tq_name, sizeof (tq_name));
585 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
586
587 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
588
589 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
590 taskqueue_thread_enqueue,
591 &fp->fp_taskqueue);
592
593 if (fp->fp_taskqueue == NULL)
594 return (-1);
595
596 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
597 tq_name);
598
599 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
600 }
601
602 return (0);
603 }
604
605 static void
qlnx_destroy_fp_taskqueues(qlnx_host_t * ha)606 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
607 {
608 int i;
609 struct qlnx_fastpath *fp;
610
611 for (i = 0; i < ha->num_rss; i++) {
612 fp = &ha->fp_array[i];
613
614 if (fp->fp_taskqueue != NULL) {
615 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
616 taskqueue_free(fp->fp_taskqueue);
617 fp->fp_taskqueue = NULL;
618 }
619 }
620 return;
621 }
622
623 static void
qlnx_drain_fp_taskqueues(qlnx_host_t * ha)624 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
625 {
626 int i;
627 struct qlnx_fastpath *fp;
628
629 for (i = 0; i < ha->num_rss; i++) {
630 fp = &ha->fp_array[i];
631
632 if (fp->fp_taskqueue != NULL) {
633 QLNX_UNLOCK(ha);
634 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
635 QLNX_LOCK(ha);
636 }
637 }
638 return;
639 }
640
641 static void
qlnx_get_params(qlnx_host_t * ha)642 qlnx_get_params(qlnx_host_t *ha)
643 {
644 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
645 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
646 qlnxe_queue_count);
647 qlnxe_queue_count = 0;
648 }
649 return;
650 }
651
652 static void
qlnx_error_recovery_taskqueue(void * context,int pending)653 qlnx_error_recovery_taskqueue(void *context, int pending)
654 {
655 qlnx_host_t *ha;
656
657 ha = context;
658
659 QL_DPRINT2(ha, "enter\n");
660
661 QLNX_LOCK(ha);
662 qlnx_stop(ha);
663 QLNX_UNLOCK(ha);
664
665 #ifdef QLNX_ENABLE_IWARP
666 qlnx_rdma_dev_remove(ha);
667 #endif /* #ifdef QLNX_ENABLE_IWARP */
668
669 qlnx_slowpath_stop(ha);
670 qlnx_slowpath_start(ha);
671
672 #ifdef QLNX_ENABLE_IWARP
673 qlnx_rdma_dev_add(ha);
674 #endif /* #ifdef QLNX_ENABLE_IWARP */
675
676 qlnx_init(ha);
677
678 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
679
680 QL_DPRINT2(ha, "exit\n");
681
682 return;
683 }
684
685 static int
qlnx_create_error_recovery_taskqueue(qlnx_host_t * ha)686 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
687 {
688 uint8_t tq_name[32];
689
690 bzero(tq_name, sizeof (tq_name));
691 snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
692
693 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
694
695 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
696 taskqueue_thread_enqueue, &ha->err_taskqueue);
697
698 if (ha->err_taskqueue == NULL)
699 return (-1);
700
701 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
702
703 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
704
705 return (0);
706 }
707
708 static void
qlnx_destroy_error_recovery_taskqueue(qlnx_host_t * ha)709 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
710 {
711 if (ha->err_taskqueue != NULL) {
712 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
713 taskqueue_free(ha->err_taskqueue);
714 }
715
716 ha->err_taskqueue = NULL;
717
718 return;
719 }
720
721 /*
722 * Name: qlnx_pci_attach
723 * Function: attaches the device to the operating system
724 */
725 static int
qlnx_pci_attach(device_t dev)726 qlnx_pci_attach(device_t dev)
727 {
728 qlnx_host_t *ha = NULL;
729 uint32_t rsrc_len_reg __unused = 0;
730 uint32_t rsrc_len_dbells = 0;
731 uint32_t rsrc_len_msix __unused = 0;
732 int i;
733 uint32_t mfw_ver;
734 uint32_t num_sp_msix = 0;
735 uint32_t num_rdma_irqs = 0;
736
737 if ((ha = device_get_softc(dev)) == NULL) {
738 device_printf(dev, "cannot get softc\n");
739 return (ENOMEM);
740 }
741
742 memset(ha, 0, sizeof (qlnx_host_t));
743
744 ha->device_id = pci_get_device(dev);
745
746 if (qlnx_valid_device(ha) != 0) {
747 device_printf(dev, "device is not valid device\n");
748 return (ENXIO);
749 }
750 ha->pci_func = pci_get_function(dev);
751
752 ha->pci_dev = dev;
753
754 sx_init(&ha->hw_lock, "qlnx_hw_lock");
755
756 ha->flags.lock_init = 1;
757
758 pci_enable_busmaster(dev);
759
760 /*
761 * map the PCI BARs
762 */
763
764 ha->reg_rid = PCIR_BAR(0);
765 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
766 RF_ACTIVE);
767
768 if (ha->pci_reg == NULL) {
769 device_printf(dev, "unable to map BAR0\n");
770 goto qlnx_pci_attach_err;
771 }
772
773 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
774 ha->reg_rid);
775
776 ha->dbells_rid = PCIR_BAR(2);
777 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
778 SYS_RES_MEMORY,
779 ha->dbells_rid);
780 if (rsrc_len_dbells) {
781 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
782 &ha->dbells_rid, RF_ACTIVE);
783
784 if (ha->pci_dbells == NULL) {
785 device_printf(dev, "unable to map BAR1\n");
786 goto qlnx_pci_attach_err;
787 }
788 ha->dbells_phys_addr = (uint64_t)
789 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
790
791 ha->dbells_size = rsrc_len_dbells;
792 } else {
793 if (qlnx_vf_device(ha) != 0) {
794 device_printf(dev, " BAR1 size is zero\n");
795 goto qlnx_pci_attach_err;
796 }
797 }
798
799 ha->msix_rid = PCIR_BAR(4);
800 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
801 &ha->msix_rid, RF_ACTIVE);
802
803 if (ha->msix_bar == NULL) {
804 device_printf(dev, "unable to map BAR2\n");
805 goto qlnx_pci_attach_err;
806 }
807
808 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
809 ha->msix_rid);
810
811 ha->dbg_level = 0x0000;
812
813 QL_DPRINT1(ha, "\n\t\t\t"
814 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
815 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
816 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
817 " msix_avail = 0x%x "
818 "\n\t\t\t[ncpus = %d]\n",
819 ha->pci_dev, ha->pci_reg, rsrc_len_reg,
820 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
821 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
822 mp_ncpus);
823 /*
824 * allocate dma tags
825 */
826
827 if (qlnx_alloc_parent_dma_tag(ha))
828 goto qlnx_pci_attach_err;
829
830 if (qlnx_alloc_tx_dma_tag(ha))
831 goto qlnx_pci_attach_err;
832
833 if (qlnx_alloc_rx_dma_tag(ha))
834 goto qlnx_pci_attach_err;
835
836
837 if (qlnx_init_hw(ha) != 0)
838 goto qlnx_pci_attach_err;
839
840 ha->flags.hw_init = 1;
841
842 qlnx_get_params(ha);
843
844 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
845 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
846 qlnxe_queue_count = QLNX_MAX_RSS;
847 }
848
849 /*
850 * Allocate MSI-x vectors
851 */
852 if (qlnx_vf_device(ha) != 0) {
853 if (qlnxe_queue_count == 0)
854 ha->num_rss = QLNX_DEFAULT_RSS;
855 else
856 ha->num_rss = qlnxe_queue_count;
857
858 num_sp_msix = ha->cdev.num_hwfns;
859 } else {
860 uint8_t max_rxq;
861 uint8_t max_txq;
862
863 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
864 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
865
866 if (max_rxq < max_txq)
867 ha->num_rss = max_rxq;
868 else
869 ha->num_rss = max_txq;
870
871 if (ha->num_rss > QLNX_MAX_VF_RSS)
872 ha->num_rss = QLNX_MAX_VF_RSS;
873
874 num_sp_msix = 0;
875 }
876
877 if (ha->num_rss > mp_ncpus)
878 ha->num_rss = mp_ncpus;
879
880 ha->num_tc = QLNX_MAX_TC;
881
882 ha->msix_count = pci_msix_count(dev);
883
884 #ifdef QLNX_ENABLE_IWARP
885
886 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
887
888 #endif /* #ifdef QLNX_ENABLE_IWARP */
889
890 if (!ha->msix_count ||
891 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
892 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
893 ha->msix_count);
894 goto qlnx_pci_attach_err;
895 }
896
897 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
898 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
899 else
900 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
901
902 QL_DPRINT1(ha, "\n\t\t\t"
903 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
904 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
905 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
906 " msix_avail = 0x%x msix_alloc = 0x%x"
907 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
908 ha->pci_reg, rsrc_len_reg,
909 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
910 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
911 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
912
913 if (pci_alloc_msix(dev, &ha->msix_count)) {
914 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
915 ha->msix_count);
916 ha->msix_count = 0;
917 goto qlnx_pci_attach_err;
918 }
919
920 /*
921 * Initialize slow path interrupt and task queue
922 */
923
924 if (num_sp_msix) {
925 if (qlnx_create_sp_taskqueues(ha) != 0)
926 goto qlnx_pci_attach_err;
927
928 for (i = 0; i < ha->cdev.num_hwfns; i++) {
929 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
930
931 ha->sp_irq_rid[i] = i + 1;
932 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
933 &ha->sp_irq_rid[i],
934 (RF_ACTIVE | RF_SHAREABLE));
935 if (ha->sp_irq[i] == NULL) {
936 device_printf(dev,
937 "could not allocate mbx interrupt\n");
938 goto qlnx_pci_attach_err;
939 }
940
941 if (bus_setup_intr(dev, ha->sp_irq[i],
942 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
943 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
944 device_printf(dev,
945 "could not setup slow path interrupt\n");
946 goto qlnx_pci_attach_err;
947 }
948
949 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
950 " sp_irq %p sp_handle %p\n", p_hwfn,
951 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
952 }
953 }
954
955 /*
956 * initialize fast path interrupt
957 */
958 if (qlnx_create_fp_taskqueues(ha) != 0)
959 goto qlnx_pci_attach_err;
960
961 for (i = 0; i < ha->num_rss; i++) {
962 ha->irq_vec[i].rss_idx = i;
963 ha->irq_vec[i].ha = ha;
964 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
965
966 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
967 &ha->irq_vec[i].irq_rid,
968 (RF_ACTIVE | RF_SHAREABLE));
969
970 if (ha->irq_vec[i].irq == NULL) {
971 device_printf(dev,
972 "could not allocate interrupt[%d] irq_rid = %d\n",
973 i, ha->irq_vec[i].irq_rid);
974 goto qlnx_pci_attach_err;
975 }
976
977 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
978 device_printf(dev, "could not allocate tx_br[%d]\n", i);
979 goto qlnx_pci_attach_err;
980 }
981 }
982
983 if (qlnx_vf_device(ha) != 0) {
984 callout_init(&ha->qlnx_callout, 1);
985 ha->flags.callout_init = 1;
986
987 for (i = 0; i < ha->cdev.num_hwfns; i++) {
988 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
989 goto qlnx_pci_attach_err;
990 if (ha->grcdump_size[i] == 0)
991 goto qlnx_pci_attach_err;
992
993 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
994 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
995 i, ha->grcdump_size[i]);
996
997 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
998 if (ha->grcdump[i] == NULL) {
999 device_printf(dev, "grcdump alloc[%d] failed\n", i);
1000 goto qlnx_pci_attach_err;
1001 }
1002
1003 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1004 goto qlnx_pci_attach_err;
1005 if (ha->idle_chk_size[i] == 0)
1006 goto qlnx_pci_attach_err;
1007
1008 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1009 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1010 i, ha->idle_chk_size[i]);
1011
1012 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1013
1014 if (ha->idle_chk[i] == NULL) {
1015 device_printf(dev, "idle_chk alloc failed\n");
1016 goto qlnx_pci_attach_err;
1017 }
1018 }
1019
1020 if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1021 goto qlnx_pci_attach_err;
1022 }
1023
1024 if (qlnx_slowpath_start(ha) != 0)
1025 goto qlnx_pci_attach_err;
1026 else
1027 ha->flags.slowpath_start = 1;
1028
1029 if (qlnx_vf_device(ha) != 0) {
1030 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1031 qlnx_mdelay(__func__, 1000);
1032 qlnx_trigger_dump(ha);
1033
1034 goto qlnx_pci_attach_err0;
1035 }
1036
1037 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1038 qlnx_mdelay(__func__, 1000);
1039 qlnx_trigger_dump(ha);
1040
1041 goto qlnx_pci_attach_err0;
1042 }
1043 } else {
1044 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1045 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1046 }
1047
1048 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1049 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1050 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1051 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1052 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1053 FW_ENGINEERING_VERSION);
1054
1055 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1056 ha->stormfw_ver, ha->mfw_ver);
1057
1058 qlnx_init_ifnet(dev, ha);
1059
1060 /*
1061 * add sysctls
1062 */
1063 qlnx_add_sysctls(ha);
1064
1065 qlnx_pci_attach_err0:
1066 /*
1067 * create ioctl device interface
1068 */
1069 if (qlnx_vf_device(ha) != 0) {
1070 if (qlnx_make_cdev(ha)) {
1071 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1072 goto qlnx_pci_attach_err;
1073 }
1074
1075 #ifdef QLNX_ENABLE_IWARP
1076 qlnx_rdma_dev_add(ha);
1077 #endif /* #ifdef QLNX_ENABLE_IWARP */
1078 }
1079
1080 #ifndef QLNX_VF
1081 #ifdef CONFIG_ECORE_SRIOV
1082
1083 if (qlnx_vf_device(ha) != 0)
1084 qlnx_initialize_sriov(ha);
1085
1086 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1087 #endif /* #ifdef QLNX_VF */
1088
1089 QL_DPRINT2(ha, "success\n");
1090
1091 return (0);
1092
1093 qlnx_pci_attach_err:
1094
1095 qlnx_release(ha);
1096
1097 return (ENXIO);
1098 }
1099
1100 /*
1101 * Name: qlnx_pci_detach
1102 * Function: Unhooks the device from the operating system
1103 */
1104 static int
qlnx_pci_detach(device_t dev)1105 qlnx_pci_detach(device_t dev)
1106 {
1107 qlnx_host_t *ha = NULL;
1108
1109 if ((ha = device_get_softc(dev)) == NULL) {
1110 device_printf(dev, "%s: cannot get softc\n", __func__);
1111 return (ENOMEM);
1112 }
1113
1114 if (qlnx_vf_device(ha) != 0) {
1115 #ifdef CONFIG_ECORE_SRIOV
1116 int ret;
1117
1118 ret = pci_iov_detach(dev);
1119 if (ret) {
1120 device_printf(dev, "%s: SRIOV in use\n", __func__);
1121 return (ret);
1122 }
1123
1124 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1125
1126 #ifdef QLNX_ENABLE_IWARP
1127 if (qlnx_rdma_dev_remove(ha) != 0)
1128 return (EBUSY);
1129 #endif /* #ifdef QLNX_ENABLE_IWARP */
1130 }
1131
1132 QLNX_LOCK(ha);
1133 qlnx_stop(ha);
1134 QLNX_UNLOCK(ha);
1135
1136 qlnx_release(ha);
1137
1138 return (0);
1139 }
1140
1141 #ifdef QLNX_ENABLE_IWARP
1142
1143 static uint8_t
qlnx_get_personality(uint8_t pci_func)1144 qlnx_get_personality(uint8_t pci_func)
1145 {
1146 uint8_t personality;
1147
1148 personality = (qlnxe_rdma_configuration >>
1149 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1150 QLNX_PERSONALIY_MASK;
1151 return (personality);
1152 }
1153
1154 static void
qlnx_set_personality(qlnx_host_t * ha)1155 qlnx_set_personality(qlnx_host_t *ha)
1156 {
1157 uint8_t personality;
1158
1159 personality = qlnx_get_personality(ha->pci_func);
1160
1161 switch (personality) {
1162 case QLNX_PERSONALITY_DEFAULT:
1163 device_printf(ha->pci_dev, "%s: DEFAULT\n",
1164 __func__);
1165 ha->personality = ECORE_PCI_DEFAULT;
1166 break;
1167
1168 case QLNX_PERSONALITY_ETH_ONLY:
1169 device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1170 __func__);
1171 ha->personality = ECORE_PCI_ETH;
1172 break;
1173
1174 case QLNX_PERSONALITY_ETH_IWARP:
1175 device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1176 __func__);
1177 ha->personality = ECORE_PCI_ETH_IWARP;
1178 break;
1179
1180 case QLNX_PERSONALITY_ETH_ROCE:
1181 device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1182 __func__);
1183 ha->personality = ECORE_PCI_ETH_ROCE;
1184 break;
1185 }
1186
1187 return;
1188 }
1189
1190 #endif /* #ifdef QLNX_ENABLE_IWARP */
1191
1192 static int
qlnx_init_hw(qlnx_host_t * ha)1193 qlnx_init_hw(qlnx_host_t *ha)
1194 {
1195 int rval = 0;
1196 struct ecore_hw_prepare_params params;
1197
1198 ha->cdev.ha = ha;
1199 ecore_init_struct(&ha->cdev);
1200
1201 /* ha->dp_module = ECORE_MSG_PROBE |
1202 ECORE_MSG_INTR |
1203 ECORE_MSG_SP |
1204 ECORE_MSG_LINK |
1205 ECORE_MSG_SPQ |
1206 ECORE_MSG_RDMA;
1207 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1208 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1209 ha->dp_level = ECORE_LEVEL_NOTICE;
1210 //ha->dp_level = ECORE_LEVEL_VERBOSE;
1211
1212 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1213
1214 ha->cdev.regview = ha->pci_reg;
1215
1216 ha->personality = ECORE_PCI_DEFAULT;
1217
1218 if (qlnx_vf_device(ha) == 0) {
1219 ha->cdev.b_is_vf = true;
1220
1221 if (ha->pci_dbells != NULL) {
1222 ha->cdev.doorbells = ha->pci_dbells;
1223 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1224 ha->cdev.db_size = ha->dbells_size;
1225 } else {
1226 ha->pci_dbells = ha->pci_reg;
1227 }
1228 } else {
1229 ha->cdev.doorbells = ha->pci_dbells;
1230 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1231 ha->cdev.db_size = ha->dbells_size;
1232
1233 #ifdef QLNX_ENABLE_IWARP
1234
1235 if (qlnx_rdma_supported(ha) == 0)
1236 qlnx_set_personality(ha);
1237
1238 #endif /* #ifdef QLNX_ENABLE_IWARP */
1239 }
1240 QL_DPRINT2(ha, "%s: %s\n", __func__,
1241 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1242
1243 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
1244
1245 params.personality = ha->personality;
1246
1247 params.drv_resc_alloc = false;
1248 params.chk_reg_fifo = false;
1249 params.initiate_pf_flr = true;
1250 params.epoch = 0;
1251
1252 ecore_hw_prepare(&ha->cdev, ¶ms);
1253
1254 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1255
1256 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1257 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1258
1259 return (rval);
1260 }
1261
1262 static void
qlnx_release(qlnx_host_t * ha)1263 qlnx_release(qlnx_host_t *ha)
1264 {
1265 device_t dev;
1266 int i;
1267
1268 dev = ha->pci_dev;
1269
1270 QL_DPRINT2(ha, "enter\n");
1271
1272 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1273 if (ha->idle_chk[i] != NULL) {
1274 free(ha->idle_chk[i], M_QLNXBUF);
1275 ha->idle_chk[i] = NULL;
1276 }
1277
1278 if (ha->grcdump[i] != NULL) {
1279 free(ha->grcdump[i], M_QLNXBUF);
1280 ha->grcdump[i] = NULL;
1281 }
1282 }
1283
1284 if (ha->flags.callout_init)
1285 callout_drain(&ha->qlnx_callout);
1286
1287 if (ha->flags.slowpath_start) {
1288 qlnx_slowpath_stop(ha);
1289 }
1290
1291 if (ha->flags.hw_init)
1292 ecore_hw_remove(&ha->cdev);
1293
1294 qlnx_del_cdev(ha);
1295
1296 if (ha->ifp != NULL)
1297 ether_ifdetach(ha->ifp);
1298
1299 qlnx_free_tx_dma_tag(ha);
1300
1301 qlnx_free_rx_dma_tag(ha);
1302
1303 qlnx_free_parent_dma_tag(ha);
1304
1305 if (qlnx_vf_device(ha) != 0) {
1306 qlnx_destroy_error_recovery_taskqueue(ha);
1307 }
1308
1309 for (i = 0; i < ha->num_rss; i++) {
1310 struct qlnx_fastpath *fp = &ha->fp_array[i];
1311
1312 if (ha->irq_vec[i].handle) {
1313 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1314 ha->irq_vec[i].handle);
1315 }
1316
1317 if (ha->irq_vec[i].irq) {
1318 (void)bus_release_resource(dev, SYS_RES_IRQ,
1319 ha->irq_vec[i].irq_rid,
1320 ha->irq_vec[i].irq);
1321 }
1322
1323 qlnx_free_tx_br(ha, fp);
1324 }
1325 qlnx_destroy_fp_taskqueues(ha);
1326
1327 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1328 if (ha->sp_handle[i])
1329 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1330 ha->sp_handle[i]);
1331
1332 if (ha->sp_irq[i])
1333 (void) bus_release_resource(dev, SYS_RES_IRQ,
1334 ha->sp_irq_rid[i], ha->sp_irq[i]);
1335 }
1336
1337 qlnx_destroy_sp_taskqueues(ha);
1338
1339 if (ha->msix_count)
1340 pci_release_msi(dev);
1341
1342 if (ha->flags.lock_init) {
1343 sx_destroy(&ha->hw_lock);
1344 }
1345
1346 if (ha->pci_reg)
1347 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1348 ha->pci_reg);
1349
1350 if (ha->dbells_size && ha->pci_dbells)
1351 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1352 ha->pci_dbells);
1353
1354 if (ha->msix_bar)
1355 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1356 ha->msix_bar);
1357
1358 QL_DPRINT2(ha, "exit\n");
1359 return;
1360 }
1361
1362 static void
qlnx_trigger_dump(qlnx_host_t * ha)1363 qlnx_trigger_dump(qlnx_host_t *ha)
1364 {
1365 int i;
1366
1367 if (ha->ifp != NULL)
1368 if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
1369
1370 QL_DPRINT2(ha, "enter\n");
1371
1372 if (qlnx_vf_device(ha) == 0)
1373 return;
1374
1375 ha->error_recovery = 1;
1376
1377 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1378 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1379 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1380 }
1381
1382 QL_DPRINT2(ha, "exit\n");
1383
1384 return;
1385 }
1386
1387 static int
qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)1388 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1389 {
1390 int err, ret = 0;
1391 qlnx_host_t *ha;
1392
1393 err = sysctl_handle_int(oidp, &ret, 0, req);
1394
1395 if (err || !req->newptr)
1396 return (err);
1397
1398 if (ret == 1) {
1399 ha = (qlnx_host_t *)arg1;
1400 qlnx_trigger_dump(ha);
1401 }
1402 return (err);
1403 }
1404
1405 static int
qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)1406 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1407 {
1408 int err, i, ret = 0, usecs = 0;
1409 qlnx_host_t *ha;
1410 struct ecore_hwfn *p_hwfn;
1411 struct qlnx_fastpath *fp;
1412
1413 err = sysctl_handle_int(oidp, &usecs, 0, req);
1414
1415 if (err || !req->newptr || !usecs || (usecs > 255))
1416 return (err);
1417
1418 ha = (qlnx_host_t *)arg1;
1419
1420 if (qlnx_vf_device(ha) == 0)
1421 return (-1);
1422
1423 for (i = 0; i < ha->num_rss; i++) {
1424 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1425
1426 fp = &ha->fp_array[i];
1427
1428 if (fp->txq[0]->handle != NULL) {
1429 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1430 (uint16_t)usecs, fp->txq[0]->handle);
1431 }
1432 }
1433
1434 if (!ret)
1435 ha->tx_coalesce_usecs = (uint8_t)usecs;
1436
1437 return (err);
1438 }
1439
1440 static int
qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)1441 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1442 {
1443 int err, i, ret = 0, usecs = 0;
1444 qlnx_host_t *ha;
1445 struct ecore_hwfn *p_hwfn;
1446 struct qlnx_fastpath *fp;
1447
1448 err = sysctl_handle_int(oidp, &usecs, 0, req);
1449
1450 if (err || !req->newptr || !usecs || (usecs > 255))
1451 return (err);
1452
1453 ha = (qlnx_host_t *)arg1;
1454
1455 if (qlnx_vf_device(ha) == 0)
1456 return (-1);
1457
1458 for (i = 0; i < ha->num_rss; i++) {
1459 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1460
1461 fp = &ha->fp_array[i];
1462
1463 if (fp->rxq->handle != NULL) {
1464 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1465 0, fp->rxq->handle);
1466 }
1467 }
1468
1469 if (!ret)
1470 ha->rx_coalesce_usecs = (uint8_t)usecs;
1471
1472 return (err);
1473 }
1474
1475 static void
qlnx_add_sp_stats_sysctls(qlnx_host_t * ha)1476 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1477 {
1478 struct sysctl_ctx_list *ctx;
1479 struct sysctl_oid_list *children;
1480 struct sysctl_oid *ctx_oid;
1481
1482 ctx = device_get_sysctl_ctx(ha->pci_dev);
1483 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1484
1485 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1486 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1487 children = SYSCTL_CHILDREN(ctx_oid);
1488
1489 SYSCTL_ADD_QUAD(ctx, children,
1490 OID_AUTO, "sp_interrupts",
1491 CTLFLAG_RD, &ha->sp_interrupts,
1492 "No. of slowpath interrupts");
1493
1494 return;
1495 }
1496
1497 static void
qlnx_add_fp_stats_sysctls(qlnx_host_t * ha)1498 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1499 {
1500 struct sysctl_ctx_list *ctx;
1501 struct sysctl_oid_list *children;
1502 struct sysctl_oid_list *node_children;
1503 struct sysctl_oid *ctx_oid;
1504 int i, j;
1505 uint8_t name_str[16];
1506
1507 ctx = device_get_sysctl_ctx(ha->pci_dev);
1508 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1509
1510 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1511 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1512 children = SYSCTL_CHILDREN(ctx_oid);
1513
1514 for (i = 0; i < ha->num_rss; i++) {
1515 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1516 snprintf(name_str, sizeof(name_str), "%d", i);
1517
1518 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1519 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1520 node_children = SYSCTL_CHILDREN(ctx_oid);
1521
1522 /* Tx Related */
1523
1524 SYSCTL_ADD_QUAD(ctx, node_children,
1525 OID_AUTO, "tx_pkts_processed",
1526 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1527 "No. of packets processed for transmission");
1528
1529 SYSCTL_ADD_QUAD(ctx, node_children,
1530 OID_AUTO, "tx_pkts_freed",
1531 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1532 "No. of freed packets");
1533
1534 SYSCTL_ADD_QUAD(ctx, node_children,
1535 OID_AUTO, "tx_pkts_transmitted",
1536 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1537 "No. of transmitted packets");
1538
1539 SYSCTL_ADD_QUAD(ctx, node_children,
1540 OID_AUTO, "tx_pkts_completed",
1541 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1542 "No. of transmit completions");
1543
1544 SYSCTL_ADD_QUAD(ctx, node_children,
1545 OID_AUTO, "tx_non_tso_pkts",
1546 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1547 "No. of non LSO transmited packets");
1548
1549 #ifdef QLNX_TRACE_PERF_DATA
1550
1551 SYSCTL_ADD_QUAD(ctx, node_children,
1552 OID_AUTO, "tx_pkts_trans_ctx",
1553 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1554 "No. of transmitted packets in transmit context");
1555
1556 SYSCTL_ADD_QUAD(ctx, node_children,
1557 OID_AUTO, "tx_pkts_compl_ctx",
1558 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1559 "No. of transmit completions in transmit context");
1560
1561 SYSCTL_ADD_QUAD(ctx, node_children,
1562 OID_AUTO, "tx_pkts_trans_fp",
1563 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1564 "No. of transmitted packets in taskqueue");
1565
1566 SYSCTL_ADD_QUAD(ctx, node_children,
1567 OID_AUTO, "tx_pkts_compl_fp",
1568 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1569 "No. of transmit completions in taskqueue");
1570
1571 SYSCTL_ADD_QUAD(ctx, node_children,
1572 OID_AUTO, "tx_pkts_compl_intr",
1573 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1574 "No. of transmit completions in interrupt ctx");
1575 #endif
1576
1577 SYSCTL_ADD_QUAD(ctx, node_children,
1578 OID_AUTO, "tx_tso_pkts",
1579 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1580 "No. of LSO transmited packets");
1581
1582 SYSCTL_ADD_QUAD(ctx, node_children,
1583 OID_AUTO, "tx_lso_wnd_min_len",
1584 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1585 "tx_lso_wnd_min_len");
1586
1587 SYSCTL_ADD_QUAD(ctx, node_children,
1588 OID_AUTO, "tx_defrag",
1589 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1590 "tx_defrag");
1591
1592 SYSCTL_ADD_QUAD(ctx, node_children,
1593 OID_AUTO, "tx_nsegs_gt_elem_left",
1594 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1595 "tx_nsegs_gt_elem_left");
1596
1597 SYSCTL_ADD_UINT(ctx, node_children,
1598 OID_AUTO, "tx_tso_max_nsegs",
1599 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1600 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1601
1602 SYSCTL_ADD_UINT(ctx, node_children,
1603 OID_AUTO, "tx_tso_min_nsegs",
1604 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1605 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1606
1607 SYSCTL_ADD_UINT(ctx, node_children,
1608 OID_AUTO, "tx_tso_max_pkt_len",
1609 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1610 ha->fp_array[i].tx_tso_max_pkt_len,
1611 "tx_tso_max_pkt_len");
1612
1613 SYSCTL_ADD_UINT(ctx, node_children,
1614 OID_AUTO, "tx_tso_min_pkt_len",
1615 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1616 ha->fp_array[i].tx_tso_min_pkt_len,
1617 "tx_tso_min_pkt_len");
1618
1619 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1620 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1621 snprintf(name_str, sizeof(name_str),
1622 "tx_pkts_nseg_%02d", (j+1));
1623
1624 SYSCTL_ADD_QUAD(ctx, node_children,
1625 OID_AUTO, name_str, CTLFLAG_RD,
1626 &ha->fp_array[i].tx_pkts[j], name_str);
1627 }
1628
1629 #ifdef QLNX_TRACE_PERF_DATA
1630 for (j = 0; j < 18; j++) {
1631 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1632 snprintf(name_str, sizeof(name_str),
1633 "tx_pkts_hist_%02d", (j+1));
1634
1635 SYSCTL_ADD_QUAD(ctx, node_children,
1636 OID_AUTO, name_str, CTLFLAG_RD,
1637 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1638 }
1639 for (j = 0; j < 5; j++) {
1640 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1641 snprintf(name_str, sizeof(name_str),
1642 "tx_comInt_%02d", (j+1));
1643
1644 SYSCTL_ADD_QUAD(ctx, node_children,
1645 OID_AUTO, name_str, CTLFLAG_RD,
1646 &ha->fp_array[i].tx_comInt[j], name_str);
1647 }
1648 for (j = 0; j < 18; j++) {
1649 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1650 snprintf(name_str, sizeof(name_str),
1651 "tx_pkts_q_%02d", (j+1));
1652
1653 SYSCTL_ADD_QUAD(ctx, node_children,
1654 OID_AUTO, name_str, CTLFLAG_RD,
1655 &ha->fp_array[i].tx_pkts_q[j], name_str);
1656 }
1657 #endif
1658
1659 SYSCTL_ADD_QUAD(ctx, node_children,
1660 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1661 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1662 "err_tx_nsegs_gt_elem_left");
1663
1664 SYSCTL_ADD_QUAD(ctx, node_children,
1665 OID_AUTO, "err_tx_dmamap_create",
1666 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1667 "err_tx_dmamap_create");
1668
1669 SYSCTL_ADD_QUAD(ctx, node_children,
1670 OID_AUTO, "err_tx_defrag_dmamap_load",
1671 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1672 "err_tx_defrag_dmamap_load");
1673
1674 SYSCTL_ADD_QUAD(ctx, node_children,
1675 OID_AUTO, "err_tx_non_tso_max_seg",
1676 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1677 "err_tx_non_tso_max_seg");
1678
1679 SYSCTL_ADD_QUAD(ctx, node_children,
1680 OID_AUTO, "err_tx_dmamap_load",
1681 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1682 "err_tx_dmamap_load");
1683
1684 SYSCTL_ADD_QUAD(ctx, node_children,
1685 OID_AUTO, "err_tx_defrag",
1686 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1687 "err_tx_defrag");
1688
1689 SYSCTL_ADD_QUAD(ctx, node_children,
1690 OID_AUTO, "err_tx_free_pkt_null",
1691 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1692 "err_tx_free_pkt_null");
1693
1694 SYSCTL_ADD_QUAD(ctx, node_children,
1695 OID_AUTO, "err_tx_cons_idx_conflict",
1696 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1697 "err_tx_cons_idx_conflict");
1698
1699 SYSCTL_ADD_QUAD(ctx, node_children,
1700 OID_AUTO, "lro_cnt_64",
1701 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1702 "lro_cnt_64");
1703
1704 SYSCTL_ADD_QUAD(ctx, node_children,
1705 OID_AUTO, "lro_cnt_128",
1706 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1707 "lro_cnt_128");
1708
1709 SYSCTL_ADD_QUAD(ctx, node_children,
1710 OID_AUTO, "lro_cnt_256",
1711 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1712 "lro_cnt_256");
1713
1714 SYSCTL_ADD_QUAD(ctx, node_children,
1715 OID_AUTO, "lro_cnt_512",
1716 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1717 "lro_cnt_512");
1718
1719 SYSCTL_ADD_QUAD(ctx, node_children,
1720 OID_AUTO, "lro_cnt_1024",
1721 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1722 "lro_cnt_1024");
1723
1724 /* Rx Related */
1725
1726 SYSCTL_ADD_QUAD(ctx, node_children,
1727 OID_AUTO, "rx_pkts",
1728 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1729 "No. of received packets");
1730
1731 SYSCTL_ADD_QUAD(ctx, node_children,
1732 OID_AUTO, "tpa_start",
1733 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1734 "No. of tpa_start packets");
1735
1736 SYSCTL_ADD_QUAD(ctx, node_children,
1737 OID_AUTO, "tpa_cont",
1738 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1739 "No. of tpa_cont packets");
1740
1741 SYSCTL_ADD_QUAD(ctx, node_children,
1742 OID_AUTO, "tpa_end",
1743 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1744 "No. of tpa_end packets");
1745
1746 SYSCTL_ADD_QUAD(ctx, node_children,
1747 OID_AUTO, "err_m_getcl",
1748 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1749 "err_m_getcl");
1750
1751 SYSCTL_ADD_QUAD(ctx, node_children,
1752 OID_AUTO, "err_m_getjcl",
1753 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1754 "err_m_getjcl");
1755
1756 SYSCTL_ADD_QUAD(ctx, node_children,
1757 OID_AUTO, "err_rx_hw_errors",
1758 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1759 "err_rx_hw_errors");
1760
1761 SYSCTL_ADD_QUAD(ctx, node_children,
1762 OID_AUTO, "err_rx_alloc_errors",
1763 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1764 "err_rx_alloc_errors");
1765 }
1766
1767 return;
1768 }
1769
1770 static void
qlnx_add_hw_stats_sysctls(qlnx_host_t * ha)1771 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1772 {
1773 struct sysctl_ctx_list *ctx;
1774 struct sysctl_oid_list *children;
1775 struct sysctl_oid *ctx_oid;
1776
1777 ctx = device_get_sysctl_ctx(ha->pci_dev);
1778 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1779
1780 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1781 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1782 children = SYSCTL_CHILDREN(ctx_oid);
1783
1784 SYSCTL_ADD_QUAD(ctx, children,
1785 OID_AUTO, "no_buff_discards",
1786 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1787 "No. of packets discarded due to lack of buffer");
1788
1789 SYSCTL_ADD_QUAD(ctx, children,
1790 OID_AUTO, "packet_too_big_discard",
1791 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1792 "No. of packets discarded because packet was too big");
1793
1794 SYSCTL_ADD_QUAD(ctx, children,
1795 OID_AUTO, "ttl0_discard",
1796 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1797 "ttl0_discard");
1798
1799 SYSCTL_ADD_QUAD(ctx, children,
1800 OID_AUTO, "rx_ucast_bytes",
1801 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1802 "rx_ucast_bytes");
1803
1804 SYSCTL_ADD_QUAD(ctx, children,
1805 OID_AUTO, "rx_mcast_bytes",
1806 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1807 "rx_mcast_bytes");
1808
1809 SYSCTL_ADD_QUAD(ctx, children,
1810 OID_AUTO, "rx_bcast_bytes",
1811 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1812 "rx_bcast_bytes");
1813
1814 SYSCTL_ADD_QUAD(ctx, children,
1815 OID_AUTO, "rx_ucast_pkts",
1816 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1817 "rx_ucast_pkts");
1818
1819 SYSCTL_ADD_QUAD(ctx, children,
1820 OID_AUTO, "rx_mcast_pkts",
1821 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1822 "rx_mcast_pkts");
1823
1824 SYSCTL_ADD_QUAD(ctx, children,
1825 OID_AUTO, "rx_bcast_pkts",
1826 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1827 "rx_bcast_pkts");
1828
1829 SYSCTL_ADD_QUAD(ctx, children,
1830 OID_AUTO, "mftag_filter_discards",
1831 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1832 "mftag_filter_discards");
1833
1834 SYSCTL_ADD_QUAD(ctx, children,
1835 OID_AUTO, "mac_filter_discards",
1836 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1837 "mac_filter_discards");
1838
1839 SYSCTL_ADD_QUAD(ctx, children,
1840 OID_AUTO, "tx_ucast_bytes",
1841 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1842 "tx_ucast_bytes");
1843
1844 SYSCTL_ADD_QUAD(ctx, children,
1845 OID_AUTO, "tx_mcast_bytes",
1846 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1847 "tx_mcast_bytes");
1848
1849 SYSCTL_ADD_QUAD(ctx, children,
1850 OID_AUTO, "tx_bcast_bytes",
1851 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1852 "tx_bcast_bytes");
1853
1854 SYSCTL_ADD_QUAD(ctx, children,
1855 OID_AUTO, "tx_ucast_pkts",
1856 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1857 "tx_ucast_pkts");
1858
1859 SYSCTL_ADD_QUAD(ctx, children,
1860 OID_AUTO, "tx_mcast_pkts",
1861 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1862 "tx_mcast_pkts");
1863
1864 SYSCTL_ADD_QUAD(ctx, children,
1865 OID_AUTO, "tx_bcast_pkts",
1866 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1867 "tx_bcast_pkts");
1868
1869 SYSCTL_ADD_QUAD(ctx, children,
1870 OID_AUTO, "tx_err_drop_pkts",
1871 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1872 "tx_err_drop_pkts");
1873
1874 SYSCTL_ADD_QUAD(ctx, children,
1875 OID_AUTO, "tpa_coalesced_pkts",
1876 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1877 "tpa_coalesced_pkts");
1878
1879 SYSCTL_ADD_QUAD(ctx, children,
1880 OID_AUTO, "tpa_coalesced_events",
1881 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1882 "tpa_coalesced_events");
1883
1884 SYSCTL_ADD_QUAD(ctx, children,
1885 OID_AUTO, "tpa_aborts_num",
1886 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1887 "tpa_aborts_num");
1888
1889 SYSCTL_ADD_QUAD(ctx, children,
1890 OID_AUTO, "tpa_not_coalesced_pkts",
1891 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1892 "tpa_not_coalesced_pkts");
1893
1894 SYSCTL_ADD_QUAD(ctx, children,
1895 OID_AUTO, "tpa_coalesced_bytes",
1896 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1897 "tpa_coalesced_bytes");
1898
1899 SYSCTL_ADD_QUAD(ctx, children,
1900 OID_AUTO, "rx_64_byte_packets",
1901 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1902 "rx_64_byte_packets");
1903
1904 SYSCTL_ADD_QUAD(ctx, children,
1905 OID_AUTO, "rx_65_to_127_byte_packets",
1906 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1907 "rx_65_to_127_byte_packets");
1908
1909 SYSCTL_ADD_QUAD(ctx, children,
1910 OID_AUTO, "rx_128_to_255_byte_packets",
1911 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1912 "rx_128_to_255_byte_packets");
1913
1914 SYSCTL_ADD_QUAD(ctx, children,
1915 OID_AUTO, "rx_256_to_511_byte_packets",
1916 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1917 "rx_256_to_511_byte_packets");
1918
1919 SYSCTL_ADD_QUAD(ctx, children,
1920 OID_AUTO, "rx_512_to_1023_byte_packets",
1921 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1922 "rx_512_to_1023_byte_packets");
1923
1924 SYSCTL_ADD_QUAD(ctx, children,
1925 OID_AUTO, "rx_1024_to_1518_byte_packets",
1926 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1927 "rx_1024_to_1518_byte_packets");
1928
1929 SYSCTL_ADD_QUAD(ctx, children,
1930 OID_AUTO, "rx_1519_to_1522_byte_packets",
1931 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1932 "rx_1519_to_1522_byte_packets");
1933
1934 SYSCTL_ADD_QUAD(ctx, children,
1935 OID_AUTO, "rx_1523_to_2047_byte_packets",
1936 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1937 "rx_1523_to_2047_byte_packets");
1938
1939 SYSCTL_ADD_QUAD(ctx, children,
1940 OID_AUTO, "rx_2048_to_4095_byte_packets",
1941 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1942 "rx_2048_to_4095_byte_packets");
1943
1944 SYSCTL_ADD_QUAD(ctx, children,
1945 OID_AUTO, "rx_4096_to_9216_byte_packets",
1946 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1947 "rx_4096_to_9216_byte_packets");
1948
1949 SYSCTL_ADD_QUAD(ctx, children,
1950 OID_AUTO, "rx_9217_to_16383_byte_packets",
1951 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1952 "rx_9217_to_16383_byte_packets");
1953
1954 SYSCTL_ADD_QUAD(ctx, children,
1955 OID_AUTO, "rx_crc_errors",
1956 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1957 "rx_crc_errors");
1958
1959 SYSCTL_ADD_QUAD(ctx, children,
1960 OID_AUTO, "rx_mac_crtl_frames",
1961 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1962 "rx_mac_crtl_frames");
1963
1964 SYSCTL_ADD_QUAD(ctx, children,
1965 OID_AUTO, "rx_pause_frames",
1966 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1967 "rx_pause_frames");
1968
1969 SYSCTL_ADD_QUAD(ctx, children,
1970 OID_AUTO, "rx_pfc_frames",
1971 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1972 "rx_pfc_frames");
1973
1974 SYSCTL_ADD_QUAD(ctx, children,
1975 OID_AUTO, "rx_align_errors",
1976 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1977 "rx_align_errors");
1978
1979 SYSCTL_ADD_QUAD(ctx, children,
1980 OID_AUTO, "rx_carrier_errors",
1981 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1982 "rx_carrier_errors");
1983
1984 SYSCTL_ADD_QUAD(ctx, children,
1985 OID_AUTO, "rx_oversize_packets",
1986 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1987 "rx_oversize_packets");
1988
1989 SYSCTL_ADD_QUAD(ctx, children,
1990 OID_AUTO, "rx_jabbers",
1991 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1992 "rx_jabbers");
1993
1994 SYSCTL_ADD_QUAD(ctx, children,
1995 OID_AUTO, "rx_undersize_packets",
1996 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1997 "rx_undersize_packets");
1998
1999 SYSCTL_ADD_QUAD(ctx, children,
2000 OID_AUTO, "rx_fragments",
2001 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2002 "rx_fragments");
2003
2004 SYSCTL_ADD_QUAD(ctx, children,
2005 OID_AUTO, "tx_64_byte_packets",
2006 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2007 "tx_64_byte_packets");
2008
2009 SYSCTL_ADD_QUAD(ctx, children,
2010 OID_AUTO, "tx_65_to_127_byte_packets",
2011 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2012 "tx_65_to_127_byte_packets");
2013
2014 SYSCTL_ADD_QUAD(ctx, children,
2015 OID_AUTO, "tx_128_to_255_byte_packets",
2016 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2017 "tx_128_to_255_byte_packets");
2018
2019 SYSCTL_ADD_QUAD(ctx, children,
2020 OID_AUTO, "tx_256_to_511_byte_packets",
2021 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2022 "tx_256_to_511_byte_packets");
2023
2024 SYSCTL_ADD_QUAD(ctx, children,
2025 OID_AUTO, "tx_512_to_1023_byte_packets",
2026 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2027 "tx_512_to_1023_byte_packets");
2028
2029 SYSCTL_ADD_QUAD(ctx, children,
2030 OID_AUTO, "tx_1024_to_1518_byte_packets",
2031 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2032 "tx_1024_to_1518_byte_packets");
2033
2034 SYSCTL_ADD_QUAD(ctx, children,
2035 OID_AUTO, "tx_1519_to_2047_byte_packets",
2036 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2037 "tx_1519_to_2047_byte_packets");
2038
2039 SYSCTL_ADD_QUAD(ctx, children,
2040 OID_AUTO, "tx_2048_to_4095_byte_packets",
2041 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2042 "tx_2048_to_4095_byte_packets");
2043
2044 SYSCTL_ADD_QUAD(ctx, children,
2045 OID_AUTO, "tx_4096_to_9216_byte_packets",
2046 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2047 "tx_4096_to_9216_byte_packets");
2048
2049 SYSCTL_ADD_QUAD(ctx, children,
2050 OID_AUTO, "tx_9217_to_16383_byte_packets",
2051 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2052 "tx_9217_to_16383_byte_packets");
2053
2054 SYSCTL_ADD_QUAD(ctx, children,
2055 OID_AUTO, "tx_pause_frames",
2056 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2057 "tx_pause_frames");
2058
2059 SYSCTL_ADD_QUAD(ctx, children,
2060 OID_AUTO, "tx_pfc_frames",
2061 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2062 "tx_pfc_frames");
2063
2064 SYSCTL_ADD_QUAD(ctx, children,
2065 OID_AUTO, "tx_lpi_entry_count",
2066 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2067 "tx_lpi_entry_count");
2068
2069 SYSCTL_ADD_QUAD(ctx, children,
2070 OID_AUTO, "tx_total_collisions",
2071 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2072 "tx_total_collisions");
2073
2074 SYSCTL_ADD_QUAD(ctx, children,
2075 OID_AUTO, "brb_truncates",
2076 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2077 "brb_truncates");
2078
2079 SYSCTL_ADD_QUAD(ctx, children,
2080 OID_AUTO, "brb_discards",
2081 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2082 "brb_discards");
2083
2084 SYSCTL_ADD_QUAD(ctx, children,
2085 OID_AUTO, "rx_mac_bytes",
2086 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2087 "rx_mac_bytes");
2088
2089 SYSCTL_ADD_QUAD(ctx, children,
2090 OID_AUTO, "rx_mac_uc_packets",
2091 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2092 "rx_mac_uc_packets");
2093
2094 SYSCTL_ADD_QUAD(ctx, children,
2095 OID_AUTO, "rx_mac_mc_packets",
2096 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2097 "rx_mac_mc_packets");
2098
2099 SYSCTL_ADD_QUAD(ctx, children,
2100 OID_AUTO, "rx_mac_bc_packets",
2101 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2102 "rx_mac_bc_packets");
2103
2104 SYSCTL_ADD_QUAD(ctx, children,
2105 OID_AUTO, "rx_mac_frames_ok",
2106 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2107 "rx_mac_frames_ok");
2108
2109 SYSCTL_ADD_QUAD(ctx, children,
2110 OID_AUTO, "tx_mac_bytes",
2111 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2112 "tx_mac_bytes");
2113
2114 SYSCTL_ADD_QUAD(ctx, children,
2115 OID_AUTO, "tx_mac_uc_packets",
2116 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2117 "tx_mac_uc_packets");
2118
2119 SYSCTL_ADD_QUAD(ctx, children,
2120 OID_AUTO, "tx_mac_mc_packets",
2121 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2122 "tx_mac_mc_packets");
2123
2124 SYSCTL_ADD_QUAD(ctx, children,
2125 OID_AUTO, "tx_mac_bc_packets",
2126 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2127 "tx_mac_bc_packets");
2128
2129 SYSCTL_ADD_QUAD(ctx, children,
2130 OID_AUTO, "tx_mac_ctrl_frames",
2131 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2132 "tx_mac_ctrl_frames");
2133 return;
2134 }
2135
2136 static void
qlnx_add_sysctls(qlnx_host_t * ha)2137 qlnx_add_sysctls(qlnx_host_t *ha)
2138 {
2139 device_t dev = ha->pci_dev;
2140 struct sysctl_ctx_list *ctx;
2141 struct sysctl_oid_list *children;
2142
2143 ctx = device_get_sysctl_ctx(dev);
2144 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2145
2146 qlnx_add_fp_stats_sysctls(ha);
2147 qlnx_add_sp_stats_sysctls(ha);
2148
2149 if (qlnx_vf_device(ha) != 0)
2150 qlnx_add_hw_stats_sysctls(ha);
2151
2152 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2153 CTLFLAG_RD, qlnx_ver_str, 0,
2154 "Driver Version");
2155
2156 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2157 CTLFLAG_RD, ha->stormfw_ver, 0,
2158 "STORM Firmware Version");
2159
2160 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2161 CTLFLAG_RD, ha->mfw_ver, 0,
2162 "Management Firmware Version");
2163
2164 SYSCTL_ADD_UINT(ctx, children,
2165 OID_AUTO, "personality", CTLFLAG_RD,
2166 &ha->personality, ha->personality,
2167 "\tpersonality = 0 => Ethernet Only\n"
2168 "\tpersonality = 3 => Ethernet and RoCE\n"
2169 "\tpersonality = 4 => Ethernet and iWARP\n"
2170 "\tpersonality = 6 => Default in Shared Memory\n");
2171
2172 ha->dbg_level = 0;
2173 SYSCTL_ADD_UINT(ctx, children,
2174 OID_AUTO, "debug", CTLFLAG_RW,
2175 &ha->dbg_level, ha->dbg_level, "Debug Level");
2176
2177 ha->dp_level = 0x01;
2178 SYSCTL_ADD_UINT(ctx, children,
2179 OID_AUTO, "dp_level", CTLFLAG_RW,
2180 &ha->dp_level, ha->dp_level, "DP Level");
2181
2182 ha->dbg_trace_lro_cnt = 0;
2183 SYSCTL_ADD_UINT(ctx, children,
2184 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2185 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2186 "Trace LRO Counts");
2187
2188 ha->dbg_trace_tso_pkt_len = 0;
2189 SYSCTL_ADD_UINT(ctx, children,
2190 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2191 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2192 "Trace TSO packet lengths");
2193
2194 ha->dp_module = 0;
2195 SYSCTL_ADD_UINT(ctx, children,
2196 OID_AUTO, "dp_module", CTLFLAG_RW,
2197 &ha->dp_module, ha->dp_module, "DP Module");
2198
2199 ha->err_inject = 0;
2200
2201 SYSCTL_ADD_UINT(ctx, children,
2202 OID_AUTO, "err_inject", CTLFLAG_RW,
2203 &ha->err_inject, ha->err_inject, "Error Inject");
2204
2205 ha->storm_stats_enable = 0;
2206
2207 SYSCTL_ADD_UINT(ctx, children,
2208 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2209 &ha->storm_stats_enable, ha->storm_stats_enable,
2210 "Enable Storm Statistics Gathering");
2211
2212 ha->storm_stats_index = 0;
2213
2214 SYSCTL_ADD_UINT(ctx, children,
2215 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2216 &ha->storm_stats_index, ha->storm_stats_index,
2217 "Enable Storm Statistics Gathering Current Index");
2218
2219 ha->grcdump_taken = 0;
2220 SYSCTL_ADD_UINT(ctx, children,
2221 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2222 &ha->grcdump_taken, ha->grcdump_taken,
2223 "grcdump_taken");
2224
2225 ha->idle_chk_taken = 0;
2226 SYSCTL_ADD_UINT(ctx, children,
2227 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2228 &ha->idle_chk_taken, ha->idle_chk_taken,
2229 "idle_chk_taken");
2230
2231 SYSCTL_ADD_UINT(ctx, children,
2232 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2233 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2234 "rx_coalesce_usecs");
2235
2236 SYSCTL_ADD_UINT(ctx, children,
2237 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2238 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2239 "tx_coalesce_usecs");
2240
2241 SYSCTL_ADD_PROC(ctx, children,
2242 OID_AUTO, "trigger_dump",
2243 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2244 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2245
2246 SYSCTL_ADD_PROC(ctx, children,
2247 OID_AUTO, "set_rx_coalesce_usecs",
2248 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2249 (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2250 "rx interrupt coalesce period microseconds");
2251
2252 SYSCTL_ADD_PROC(ctx, children,
2253 OID_AUTO, "set_tx_coalesce_usecs",
2254 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2255 (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2256 "tx interrupt coalesce period microseconds");
2257
2258 ha->rx_pkt_threshold = 128;
2259 SYSCTL_ADD_UINT(ctx, children,
2260 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2261 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2262 "No. of Rx Pkts to process at a time");
2263
2264 ha->rx_jumbo_buf_eq_mtu = 0;
2265 SYSCTL_ADD_UINT(ctx, children,
2266 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2267 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2268 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2269 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
2270
2271 SYSCTL_ADD_QUAD(ctx, children,
2272 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2273 &ha->err_illegal_intr, "err_illegal_intr");
2274
2275 SYSCTL_ADD_QUAD(ctx, children,
2276 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2277 &ha->err_fp_null, "err_fp_null");
2278
2279 SYSCTL_ADD_QUAD(ctx, children,
2280 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2281 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2282 return;
2283 }
2284
2285 /*****************************************************************************
2286 * Operating System Network Interface Functions
2287 *****************************************************************************/
2288
2289 static void
qlnx_init_ifnet(device_t dev,qlnx_host_t * ha)2290 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2291 {
2292 uint16_t device_id;
2293 if_t ifp;
2294
2295 ifp = ha->ifp = if_alloc(IFT_ETHER);
2296 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2297
2298 device_id = pci_get_device(ha->pci_dev);
2299
2300 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2301 if_setbaudrate(ifp, IF_Gbps(40));
2302 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2303 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
2304 if_setbaudrate(ifp, IF_Gbps(25));
2305 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2306 if_setbaudrate(ifp, IF_Gbps(50));
2307 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2308 if_setbaudrate(ifp, IF_Gbps(100));
2309
2310 if_setinitfn(ifp, qlnx_init);
2311 if_setsoftc(ifp, ha);
2312 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2313 if_setioctlfn(ifp, qlnx_ioctl);
2314 if_settransmitfn(ifp, qlnx_transmit);
2315 if_setqflushfn(ifp, qlnx_qflush);
2316
2317 if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha));
2318 if_setsendqready(ifp);
2319
2320 if_setgetcounterfn(ifp, qlnx_get_counter);
2321
2322 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2323
2324 qlnx_get_mac_addr(ha);
2325
2326 if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2327 !ha->primary_mac[2] && !ha->primary_mac[3] &&
2328 !ha->primary_mac[4] && !ha->primary_mac[5]) {
2329 uint32_t rnd;
2330
2331 rnd = arc4random();
2332
2333 ha->primary_mac[0] = 0x00;
2334 ha->primary_mac[1] = 0x0e;
2335 ha->primary_mac[2] = 0x1e;
2336 ha->primary_mac[3] = rnd & 0xFF;
2337 ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2338 ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2339 }
2340
2341 if_setcapabilities(ifp, IFCAP_HWCSUM);
2342 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
2343 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
2344 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
2345 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
2346 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
2347 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
2348 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
2349 if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
2350 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
2351 if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
2352 if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
2353
2354 if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE -
2355 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2356 if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */
2357 if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE);
2358
2359 if_setcapenable(ifp, if_getcapabilities(ifp));
2360
2361 if_sethwassist(ifp, CSUM_IP);
2362 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
2363 if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
2364 if_sethwassistbits(ifp, CSUM_TSO, 0);
2365
2366 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2367
2368 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2369 qlnx_media_status);
2370
2371 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2372 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2373 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2374 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2375 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2376 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2377 ifmedia_add(&ha->media, (IFM_ETHER | IFM_25G_SR), 0, NULL);
2378 ifmedia_add(&ha->media, (IFM_ETHER | IFM_25G_CR), 0, NULL);
2379 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2380 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2381 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2382 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2383 ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_LR4), 0, NULL);
2384 ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_SR4), 0, NULL);
2385 ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_CR4), 0, NULL);
2386 }
2387
2388 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2389 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2390
2391 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2392
2393 ether_ifattach(ifp, ha->primary_mac);
2394 QL_DPRINT2(ha, "exit\n");
2395
2396 return;
2397 }
2398
2399 static void
qlnx_init_locked(qlnx_host_t * ha)2400 qlnx_init_locked(qlnx_host_t *ha)
2401 {
2402 if_t ifp = ha->ifp;
2403
2404 QL_DPRINT1(ha, "Driver Initialization start \n");
2405
2406 qlnx_stop(ha);
2407
2408 if (qlnx_load(ha) == 0) {
2409 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2410 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2411
2412 #ifdef QLNX_ENABLE_IWARP
2413 if (qlnx_vf_device(ha) != 0) {
2414 qlnx_rdma_dev_open(ha);
2415 }
2416 #endif /* #ifdef QLNX_ENABLE_IWARP */
2417 }
2418
2419 return;
2420 }
2421
2422 static void
qlnx_init(void * arg)2423 qlnx_init(void *arg)
2424 {
2425 qlnx_host_t *ha;
2426
2427 ha = (qlnx_host_t *)arg;
2428
2429 QL_DPRINT2(ha, "enter\n");
2430
2431 QLNX_LOCK(ha);
2432 if ((if_getdrvflags(ha->ifp) & IFF_DRV_RUNNING) == 0)
2433 qlnx_init_locked(ha);
2434 QLNX_UNLOCK(ha);
2435
2436 QL_DPRINT2(ha, "exit\n");
2437
2438 return;
2439 }
2440
2441 static u_int
qlnx_mcast_bins_from_maddr(void * arg,struct sockaddr_dl * sdl,u_int mcnt)2442 qlnx_mcast_bins_from_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2443 {
2444 uint8_t bit;
2445 uint32_t *bins = arg;
2446
2447 bit = ecore_mcast_bin_from_mac(LLADDR(sdl));
2448 bins[bit / 32] |= 1 << (bit % 32);
2449
2450 return (1);
2451 }
2452
2453 static int
qlnx_set_multi(qlnx_host_t * ha)2454 qlnx_set_multi(qlnx_host_t *ha)
2455 {
2456 struct ecore_filter_mcast mcast;
2457 struct ecore_dev *cdev;
2458 if_t ifp = ha->ifp;
2459 u_int mcnt __unused;
2460 int rc;
2461
2462 if (qlnx_vf_device(ha) == 0)
2463 return (0);
2464
2465 bzero(&mcast, sizeof(struct ecore_filter_mcast));
2466 mcnt = if_foreach_llmaddr(ifp, qlnx_mcast_bins_from_maddr, mcast.bins);
2467 QL_DPRINT1(ha, "total %d multicast MACs found\n", mcnt);
2468
2469 if (memcmp(ha->ecore_mcast_bins, mcast.bins, sizeof(mcast.bins)) == 0)
2470 return (0);
2471
2472 cdev = &ha->cdev;
2473 mcast.opcode = ECORE_FILTER_REPLACE;
2474 rc = ecore_filter_mcast_cmd(cdev, &mcast, ECORE_SPQ_MODE_CB, NULL);
2475 if (rc == 0)
2476 memcpy(ha->ecore_mcast_bins, mcast.bins, sizeof(mcast.bins));
2477
2478 QL_DPRINT1(ha, "ecore_filter_mcast_cmd: end(%d)\n", rc);
2479 return (rc);
2480 }
2481
2482 static int
qlnx_set_promisc_allmulti(qlnx_host_t * ha,int flags)2483 qlnx_set_promisc_allmulti(qlnx_host_t *ha, int flags)
2484 {
2485 int rc = 0;
2486
2487 if (qlnx_vf_device(ha) == 0)
2488 return (0);
2489
2490 rc = _qlnx_set_promisc_allmulti(ha, flags & IFF_PROMISC,
2491 flags & IFF_ALLMULTI);
2492 return (rc);
2493 }
2494
2495 static int
_qlnx_set_promisc_allmulti(qlnx_host_t * ha,bool promisc,bool allmulti)2496 _qlnx_set_promisc_allmulti(qlnx_host_t *ha, bool promisc, bool allmulti)
2497 {
2498 int rc = 0;
2499 uint8_t filter;
2500 bool mcast, ucast;
2501
2502 filter = ha->filter;
2503 filter |= ECORE_ACCEPT_UCAST_MATCHED;
2504 filter |= ECORE_ACCEPT_MCAST_MATCHED;
2505 filter |= ECORE_ACCEPT_BCAST;
2506
2507 mcast = promisc || allmulti;
2508 ucast = promisc;
2509
2510 if (mcast)
2511 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2512 else
2513 filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
2514
2515 if (ucast)
2516 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2517 else
2518 filter &= ~ECORE_ACCEPT_UCAST_UNMATCHED;
2519
2520 if (filter == ha->filter)
2521 return (0);
2522
2523 rc = qlnx_set_rx_accept_filter(ha, filter);
2524 if (rc == 0)
2525 ha->filter = filter;
2526 return (rc);
2527 }
2528
2529 static int
qlnx_ioctl(if_t ifp,u_long cmd,caddr_t data)2530 qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
2531 {
2532 int ret = 0, mask;
2533 int flags;
2534 struct ifreq *ifr = (struct ifreq *)data;
2535 qlnx_host_t *ha;
2536
2537 ha = (qlnx_host_t *)if_getsoftc(ifp);
2538
2539 switch (cmd) {
2540 case SIOCSIFMTU:
2541 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2542
2543 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2544 ret = EINVAL;
2545 } else {
2546 QLNX_LOCK(ha);
2547 if_setmtu(ifp, ifr->ifr_mtu);
2548 ha->max_frame_size =
2549 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2550 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2551 qlnx_init_locked(ha);
2552 }
2553
2554 QLNX_UNLOCK(ha);
2555 }
2556
2557 break;
2558
2559 case SIOCSIFFLAGS:
2560 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2561
2562 QLNX_LOCK(ha);
2563 flags = if_getflags(ifp);
2564
2565 if (flags & IFF_UP) {
2566 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2567 if (qlnx_set_promisc_allmulti(ha, flags) != 0)
2568 ret = EINVAL;
2569 } else {
2570 ha->max_frame_size = if_getmtu(ifp) +
2571 ETHER_HDR_LEN + ETHER_CRC_LEN;
2572 qlnx_init_locked(ha);
2573 }
2574 } else {
2575 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2576 qlnx_stop(ha);
2577 }
2578
2579 QLNX_UNLOCK(ha);
2580 break;
2581
2582 case SIOCADDMULTI:
2583 case SIOCDELMULTI:
2584 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI/SIOCDELMULTI", cmd);
2585
2586 QLNX_LOCK(ha);
2587 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2588 if (qlnx_set_multi(ha) != 0)
2589 ret = EINVAL;
2590 }
2591 QLNX_UNLOCK(ha);
2592 break;
2593
2594 case SIOCSIFMEDIA:
2595 case SIOCGIFMEDIA:
2596 case SIOCGIFXMEDIA:
2597 QL_DPRINT4(ha,
2598 "SIOCSIFMEDIA/SIOCGIFMEDIA/SIOCGIFXMEDIA (0x%lx)\n", cmd);
2599
2600 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2601 break;
2602
2603 case SIOCSIFCAP:
2604
2605 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2606
2607 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2608
2609 if (mask & IFCAP_HWCSUM)
2610 if_togglecapenable(ifp, IFCAP_HWCSUM);
2611 if (mask & IFCAP_TSO4)
2612 if_togglecapenable(ifp, IFCAP_TSO4);
2613 if (mask & IFCAP_TSO6)
2614 if_togglecapenable(ifp, IFCAP_TSO6);
2615 if (mask & IFCAP_VLAN_HWTAGGING)
2616 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2617 if (mask & IFCAP_VLAN_HWTSO)
2618 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2619 if (mask & IFCAP_LRO)
2620 if_togglecapenable(ifp, IFCAP_LRO);
2621
2622 QLNX_LOCK(ha);
2623
2624 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2625 qlnx_init_locked(ha);
2626
2627 QLNX_UNLOCK(ha);
2628
2629 VLAN_CAPABILITIES(ifp);
2630 break;
2631
2632 case SIOCGI2C:
2633 {
2634 struct ifi2creq i2c;
2635 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2636 struct ecore_ptt *p_ptt;
2637
2638 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2639
2640 if (ret)
2641 break;
2642
2643 if ((i2c.len > sizeof (i2c.data)) ||
2644 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2645 ret = EINVAL;
2646 break;
2647 }
2648
2649 p_ptt = ecore_ptt_acquire(p_hwfn);
2650
2651 if (!p_ptt) {
2652 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2653 ret = ERESTART;
2654 break;
2655 }
2656
2657 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2658 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2659 i2c.len, &i2c.data[0]);
2660
2661 ecore_ptt_release(p_hwfn, p_ptt);
2662
2663 if (ret) {
2664 ret = ENODEV;
2665 break;
2666 }
2667
2668 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2669
2670 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2671 len = %d addr = 0x%02x offset = 0x%04x \
2672 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2673 0x%02x 0x%02x 0x%02x\n",
2674 ret, i2c.len, i2c.dev_addr, i2c.offset,
2675 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2676 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2677 break;
2678 }
2679
2680 default:
2681 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2682 ret = ether_ioctl(ifp, cmd, data);
2683 break;
2684 }
2685
2686 return (ret);
2687 }
2688
2689 static int
qlnx_media_change(if_t ifp)2690 qlnx_media_change(if_t ifp)
2691 {
2692 qlnx_host_t *ha;
2693 struct ifmedia *ifm;
2694 int ret = 0;
2695
2696 ha = (qlnx_host_t *)if_getsoftc(ifp);
2697
2698 QL_DPRINT2(ha, "enter\n");
2699
2700 ifm = &ha->media;
2701
2702 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2703 ret = EINVAL;
2704
2705 QL_DPRINT2(ha, "exit\n");
2706
2707 return (ret);
2708 }
2709
2710 static void
qlnx_media_status(if_t ifp,struct ifmediareq * ifmr)2711 qlnx_media_status(if_t ifp, struct ifmediareq *ifmr)
2712 {
2713 qlnx_host_t *ha;
2714
2715 ha = (qlnx_host_t *)if_getsoftc(ifp);
2716
2717 QL_DPRINT2(ha, "enter\n");
2718
2719 ifmr->ifm_status = IFM_AVALID;
2720 ifmr->ifm_active = IFM_ETHER;
2721
2722 if (ha->link_up) {
2723 ifmr->ifm_status |= IFM_ACTIVE;
2724 ifmr->ifm_active |=
2725 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2726
2727 if (ha->if_link.link_partner_caps &
2728 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2729 ifmr->ifm_active |=
2730 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2731 }
2732
2733 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2734
2735 return;
2736 }
2737
2738 static void
qlnx_free_tx_pkt(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)2739 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2740 struct qlnx_tx_queue *txq)
2741 {
2742 u16 idx;
2743 struct mbuf *mp;
2744 bus_dmamap_t map;
2745 int i;
2746 // struct eth_tx_bd *tx_data_bd;
2747 struct eth_tx_1st_bd *first_bd;
2748 int nbds = 0;
2749
2750 idx = txq->sw_tx_cons;
2751 mp = txq->sw_tx_ring[idx].mp;
2752 map = txq->sw_tx_ring[idx].map;
2753
2754 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2755 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2756
2757 QL_DPRINT1(ha, "(mp == NULL) "
2758 " tx_idx = 0x%x"
2759 " ecore_prod_idx = 0x%x"
2760 " ecore_cons_idx = 0x%x"
2761 " hw_bd_cons = 0x%x"
2762 " txq_db_last = 0x%x"
2763 " elem_left = 0x%x\n",
2764 fp->rss_id,
2765 ecore_chain_get_prod_idx(&txq->tx_pbl),
2766 ecore_chain_get_cons_idx(&txq->tx_pbl),
2767 le16toh(*txq->hw_cons_ptr),
2768 txq->tx_db.raw,
2769 ecore_chain_get_elem_left(&txq->tx_pbl));
2770
2771 fp->err_tx_free_pkt_null++;
2772
2773 //DEBUG
2774 qlnx_trigger_dump(ha);
2775
2776 return;
2777 } else {
2778 QLNX_INC_OPACKETS((ha->ifp));
2779 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2780
2781 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2782 bus_dmamap_unload(ha->tx_tag, map);
2783
2784 fp->tx_pkts_freed++;
2785 fp->tx_pkts_completed++;
2786
2787 m_freem(mp);
2788 }
2789
2790 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2791 nbds = first_bd->data.nbds;
2792
2793 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2794
2795 for (i = 1; i < nbds; i++) {
2796 /* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
2797 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2798 }
2799 txq->sw_tx_ring[idx].flags = 0;
2800 txq->sw_tx_ring[idx].mp = NULL;
2801 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2802
2803 return;
2804 }
2805
2806 static void
qlnx_tx_int(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)2807 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2808 struct qlnx_tx_queue *txq)
2809 {
2810 u16 hw_bd_cons;
2811 u16 ecore_cons_idx;
2812 uint16_t diff;
2813 uint16_t idx, idx2;
2814
2815 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2816
2817 while (hw_bd_cons !=
2818 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2819 diff = hw_bd_cons - ecore_cons_idx;
2820 if ((diff > TX_RING_SIZE) ||
2821 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2822 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2823
2824 QL_DPRINT1(ha, "(diff = 0x%x) "
2825 " tx_idx = 0x%x"
2826 " ecore_prod_idx = 0x%x"
2827 " ecore_cons_idx = 0x%x"
2828 " hw_bd_cons = 0x%x"
2829 " txq_db_last = 0x%x"
2830 " elem_left = 0x%x\n",
2831 diff,
2832 fp->rss_id,
2833 ecore_chain_get_prod_idx(&txq->tx_pbl),
2834 ecore_chain_get_cons_idx(&txq->tx_pbl),
2835 le16toh(*txq->hw_cons_ptr),
2836 txq->tx_db.raw,
2837 ecore_chain_get_elem_left(&txq->tx_pbl));
2838
2839 fp->err_tx_cons_idx_conflict++;
2840
2841 //DEBUG
2842 qlnx_trigger_dump(ha);
2843 }
2844
2845 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2846 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
2847 prefetch(txq->sw_tx_ring[idx].mp);
2848 prefetch(txq->sw_tx_ring[idx2].mp);
2849
2850 qlnx_free_tx_pkt(ha, fp, txq);
2851
2852 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2853 }
2854 return;
2855 }
2856
2857 static int
qlnx_transmit_locked(if_t ifp,struct qlnx_fastpath * fp,struct mbuf * mp)2858 qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp)
2859 {
2860 int ret = 0;
2861 struct qlnx_tx_queue *txq;
2862 qlnx_host_t * ha;
2863 uint16_t elem_left;
2864
2865 txq = fp->txq[0];
2866 ha = (qlnx_host_t *)fp->edev;
2867
2868 if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) {
2869 if(mp != NULL)
2870 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2871 return (ret);
2872 }
2873
2874 if(mp != NULL)
2875 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2876
2877 mp = drbr_peek(ifp, fp->tx_br);
2878
2879 while (mp != NULL) {
2880 if (qlnx_send(ha, fp, &mp)) {
2881 if (mp != NULL) {
2882 drbr_putback(ifp, fp->tx_br, mp);
2883 } else {
2884 fp->tx_pkts_processed++;
2885 drbr_advance(ifp, fp->tx_br);
2886 }
2887 goto qlnx_transmit_locked_exit;
2888
2889 } else {
2890 drbr_advance(ifp, fp->tx_br);
2891 fp->tx_pkts_transmitted++;
2892 fp->tx_pkts_processed++;
2893 ETHER_BPF_MTAP(ifp, mp);
2894 }
2895
2896 mp = drbr_peek(ifp, fp->tx_br);
2897 }
2898
2899 qlnx_transmit_locked_exit:
2900 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
2901 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
2902 < QLNX_TX_ELEM_MAX_THRESH))
2903 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
2904
2905 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
2906 return ret;
2907 }
2908
2909 static int
qlnx_transmit(if_t ifp,struct mbuf * mp)2910 qlnx_transmit(if_t ifp, struct mbuf *mp)
2911 {
2912 qlnx_host_t *ha = (qlnx_host_t *)if_getsoftc(ifp);
2913 struct qlnx_fastpath *fp;
2914 int rss_id = 0, ret = 0;
2915
2916 #ifdef QLNX_TRACEPERF_DATA
2917 uint64_t tx_pkts = 0, tx_compl = 0;
2918 #endif
2919
2920 QL_DPRINT2(ha, "enter\n");
2921
2922 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
2923 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
2924 ha->num_rss;
2925
2926 fp = &ha->fp_array[rss_id];
2927
2928 if (fp->tx_br == NULL) {
2929 ret = EINVAL;
2930 goto qlnx_transmit_exit;
2931 }
2932
2933 if (mtx_trylock(&fp->tx_mtx)) {
2934 #ifdef QLNX_TRACEPERF_DATA
2935 tx_pkts = fp->tx_pkts_transmitted;
2936 tx_compl = fp->tx_pkts_completed;
2937 #endif
2938
2939 ret = qlnx_transmit_locked(ifp, fp, mp);
2940
2941 #ifdef QLNX_TRACEPERF_DATA
2942 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
2943 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
2944 #endif
2945 mtx_unlock(&fp->tx_mtx);
2946 } else {
2947 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
2948 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2949 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
2950 }
2951 }
2952
2953 qlnx_transmit_exit:
2954
2955 QL_DPRINT2(ha, "exit ret = %d\n", ret);
2956 return ret;
2957 }
2958
2959 static void
qlnx_qflush(if_t ifp)2960 qlnx_qflush(if_t ifp)
2961 {
2962 int rss_id;
2963 struct qlnx_fastpath *fp;
2964 struct mbuf *mp;
2965 qlnx_host_t *ha;
2966
2967 ha = (qlnx_host_t *)if_getsoftc(ifp);
2968
2969 QL_DPRINT2(ha, "enter\n");
2970
2971 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
2972 fp = &ha->fp_array[rss_id];
2973
2974 if (fp == NULL)
2975 continue;
2976
2977 if (fp->tx_br) {
2978 mtx_lock(&fp->tx_mtx);
2979
2980 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
2981 fp->tx_pkts_freed++;
2982 m_freem(mp);
2983 }
2984 mtx_unlock(&fp->tx_mtx);
2985 }
2986 }
2987 QL_DPRINT2(ha, "exit\n");
2988
2989 return;
2990 }
2991
2992 static void
qlnx_txq_doorbell_wr32(qlnx_host_t * ha,void * reg_addr,uint32_t value)2993 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
2994 {
2995 uint32_t offset;
2996
2997 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
2998
2999 bus_write_4(ha->pci_dbells, offset, value);
3000 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
3001 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
3002
3003 return;
3004 }
3005
3006 static uint32_t
qlnx_tcp_offset(qlnx_host_t * ha,struct mbuf * mp)3007 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3008 {
3009 struct ether_vlan_header *eh = NULL;
3010 struct ip *ip = NULL;
3011 struct ip6_hdr *ip6 = NULL;
3012 struct tcphdr *th = NULL;
3013 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
3014 uint16_t etype = 0;
3015 uint8_t buf[sizeof(struct ip6_hdr)];
3016
3017 eh = mtod(mp, struct ether_vlan_header *);
3018
3019 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3020 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3021 etype = ntohs(eh->evl_proto);
3022 } else {
3023 ehdrlen = ETHER_HDR_LEN;
3024 etype = ntohs(eh->evl_encap_proto);
3025 }
3026
3027 switch (etype) {
3028 case ETHERTYPE_IP:
3029 ip = (struct ip *)(mp->m_data + ehdrlen);
3030
3031 ip_hlen = sizeof (struct ip);
3032
3033 if (mp->m_len < (ehdrlen + ip_hlen)) {
3034 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3035 ip = (struct ip *)buf;
3036 }
3037
3038 th = (struct tcphdr *)(ip + 1);
3039 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3040 break;
3041
3042 case ETHERTYPE_IPV6:
3043 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3044
3045 ip_hlen = sizeof(struct ip6_hdr);
3046
3047 if (mp->m_len < (ehdrlen + ip_hlen)) {
3048 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3049 buf);
3050 ip6 = (struct ip6_hdr *)buf;
3051 }
3052 th = (struct tcphdr *)(ip6 + 1);
3053 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3054 break;
3055
3056 default:
3057 break;
3058 }
3059
3060 return (offset);
3061 }
3062
3063 static __inline int
qlnx_tso_check(struct qlnx_fastpath * fp,bus_dma_segment_t * segs,int nsegs,uint32_t offset)3064 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3065 uint32_t offset)
3066 {
3067 int i;
3068 uint32_t sum, nbds_in_hdr = 1;
3069 uint32_t window;
3070 bus_dma_segment_t *s_seg;
3071
3072 /* If the header spans multiple segments, skip those segments */
3073
3074 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3075 return (0);
3076
3077 i = 0;
3078
3079 while ((i < nsegs) && (offset >= segs->ds_len)) {
3080 offset = offset - segs->ds_len;
3081 segs++;
3082 i++;
3083 nbds_in_hdr++;
3084 }
3085
3086 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3087
3088 nsegs = nsegs - i;
3089
3090 while (nsegs >= window) {
3091 sum = 0;
3092 s_seg = segs;
3093
3094 for (i = 0; i < window; i++){
3095 sum += s_seg->ds_len;
3096 s_seg++;
3097 }
3098
3099 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3100 fp->tx_lso_wnd_min_len++;
3101 return (-1);
3102 }
3103
3104 nsegs = nsegs - 1;
3105 segs++;
3106 }
3107
3108 return (0);
3109 }
3110
3111 static int
qlnx_send(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct mbuf ** m_headp)3112 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3113 {
3114 bus_dma_segment_t *segs;
3115 bus_dmamap_t map = 0;
3116 uint32_t nsegs = 0;
3117 int ret = -1;
3118 struct mbuf *m_head = *m_headp;
3119 uint16_t idx = 0;
3120 uint16_t elem_left;
3121
3122 uint8_t nbd = 0;
3123 struct qlnx_tx_queue *txq;
3124
3125 struct eth_tx_1st_bd *first_bd;
3126 struct eth_tx_2nd_bd *second_bd;
3127 struct eth_tx_3rd_bd *third_bd;
3128 struct eth_tx_bd *tx_data_bd;
3129
3130 int seg_idx = 0;
3131 uint32_t nbds_in_hdr = 0;
3132 uint32_t offset = 0;
3133
3134 #ifdef QLNX_TRACE_PERF_DATA
3135 uint16_t bd_used;
3136 #endif
3137
3138 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3139
3140 if (!ha->link_up)
3141 return (-1);
3142
3143 first_bd = NULL;
3144 second_bd = NULL;
3145 third_bd = NULL;
3146 tx_data_bd = NULL;
3147
3148 txq = fp->txq[0];
3149
3150 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3151 QLNX_TX_ELEM_MIN_THRESH) {
3152 fp->tx_nsegs_gt_elem_left++;
3153 fp->err_tx_nsegs_gt_elem_left++;
3154
3155 return (ENOBUFS);
3156 }
3157
3158 idx = txq->sw_tx_prod;
3159
3160 map = txq->sw_tx_ring[idx].map;
3161 segs = txq->segs;
3162
3163 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3164 BUS_DMA_NOWAIT);
3165
3166 if (ha->dbg_trace_tso_pkt_len) {
3167 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3168 if (!fp->tx_tso_min_pkt_len) {
3169 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3170 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3171 } else {
3172 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3173 fp->tx_tso_min_pkt_len =
3174 m_head->m_pkthdr.len;
3175 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3176 fp->tx_tso_max_pkt_len =
3177 m_head->m_pkthdr.len;
3178 }
3179 }
3180 }
3181
3182 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3183 offset = qlnx_tcp_offset(ha, m_head);
3184
3185 if ((ret == EFBIG) ||
3186 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3187 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3188 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3189 qlnx_tso_check(fp, segs, nsegs, offset))))) {
3190 struct mbuf *m;
3191
3192 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3193
3194 fp->tx_defrag++;
3195
3196 m = m_defrag(m_head, M_NOWAIT);
3197 if (m == NULL) {
3198 fp->err_tx_defrag++;
3199 fp->tx_pkts_freed++;
3200 m_freem(m_head);
3201 *m_headp = NULL;
3202 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3203 return (ENOBUFS);
3204 }
3205
3206 m_head = m;
3207 *m_headp = m_head;
3208
3209 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3210 segs, &nsegs, BUS_DMA_NOWAIT))) {
3211 fp->err_tx_defrag_dmamap_load++;
3212
3213 QL_DPRINT1(ha,
3214 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3215 ret, m_head->m_pkthdr.len);
3216
3217 fp->tx_pkts_freed++;
3218 m_freem(m_head);
3219 *m_headp = NULL;
3220
3221 return (ret);
3222 }
3223
3224 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3225 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3226 fp->err_tx_non_tso_max_seg++;
3227
3228 QL_DPRINT1(ha,
3229 "(%d) nsegs too many for non-TSO [%d, %d]\n",
3230 ret, nsegs, m_head->m_pkthdr.len);
3231
3232 fp->tx_pkts_freed++;
3233 m_freem(m_head);
3234 *m_headp = NULL;
3235
3236 return (ret);
3237 }
3238 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3239 offset = qlnx_tcp_offset(ha, m_head);
3240
3241 } else if (ret) {
3242 fp->err_tx_dmamap_load++;
3243
3244 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3245 ret, m_head->m_pkthdr.len);
3246 fp->tx_pkts_freed++;
3247 m_freem(m_head);
3248 *m_headp = NULL;
3249 return (ret);
3250 }
3251
3252 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3253
3254 if (ha->dbg_trace_tso_pkt_len) {
3255 if (nsegs < QLNX_FP_MAX_SEGS)
3256 fp->tx_pkts[(nsegs - 1)]++;
3257 else
3258 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3259 }
3260
3261 #ifdef QLNX_TRACE_PERF_DATA
3262 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3263 if(m_head->m_pkthdr.len <= 2048)
3264 fp->tx_pkts_hist[0]++;
3265 else if((m_head->m_pkthdr.len > 2048) &&
3266 (m_head->m_pkthdr.len <= 4096))
3267 fp->tx_pkts_hist[1]++;
3268 else if((m_head->m_pkthdr.len > 4096) &&
3269 (m_head->m_pkthdr.len <= 8192))
3270 fp->tx_pkts_hist[2]++;
3271 else if((m_head->m_pkthdr.len > 8192) &&
3272 (m_head->m_pkthdr.len <= 12288 ))
3273 fp->tx_pkts_hist[3]++;
3274 else if((m_head->m_pkthdr.len > 11288) &&
3275 (m_head->m_pkthdr.len <= 16394))
3276 fp->tx_pkts_hist[4]++;
3277 else if((m_head->m_pkthdr.len > 16384) &&
3278 (m_head->m_pkthdr.len <= 20480))
3279 fp->tx_pkts_hist[5]++;
3280 else if((m_head->m_pkthdr.len > 20480) &&
3281 (m_head->m_pkthdr.len <= 24576))
3282 fp->tx_pkts_hist[6]++;
3283 else if((m_head->m_pkthdr.len > 24576) &&
3284 (m_head->m_pkthdr.len <= 28672))
3285 fp->tx_pkts_hist[7]++;
3286 else if((m_head->m_pkthdr.len > 28762) &&
3287 (m_head->m_pkthdr.len <= 32768))
3288 fp->tx_pkts_hist[8]++;
3289 else if((m_head->m_pkthdr.len > 32768) &&
3290 (m_head->m_pkthdr.len <= 36864))
3291 fp->tx_pkts_hist[9]++;
3292 else if((m_head->m_pkthdr.len > 36864) &&
3293 (m_head->m_pkthdr.len <= 40960))
3294 fp->tx_pkts_hist[10]++;
3295 else if((m_head->m_pkthdr.len > 40960) &&
3296 (m_head->m_pkthdr.len <= 45056))
3297 fp->tx_pkts_hist[11]++;
3298 else if((m_head->m_pkthdr.len > 45056) &&
3299 (m_head->m_pkthdr.len <= 49152))
3300 fp->tx_pkts_hist[12]++;
3301 else if((m_head->m_pkthdr.len > 49512) &&
3302 m_head->m_pkthdr.len <= 53248))
3303 fp->tx_pkts_hist[13]++;
3304 else if((m_head->m_pkthdr.len > 53248) &&
3305 (m_head->m_pkthdr.len <= 57344))
3306 fp->tx_pkts_hist[14]++;
3307 else if((m_head->m_pkthdr.len > 53248) &&
3308 (m_head->m_pkthdr.len <= 57344))
3309 fp->tx_pkts_hist[15]++;
3310 else if((m_head->m_pkthdr.len > 57344) &&
3311 (m_head->m_pkthdr.len <= 61440))
3312 fp->tx_pkts_hist[16]++;
3313 else
3314 fp->tx_pkts_hist[17]++;
3315 }
3316
3317 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3318 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
3319 bd_used = TX_RING_SIZE - elem_left;
3320
3321 if(bd_used <= 100)
3322 fp->tx_pkts_q[0]++;
3323 else if((bd_used > 100) && (bd_used <= 500))
3324 fp->tx_pkts_q[1]++;
3325 else if((bd_used > 500) && (bd_used <= 1000))
3326 fp->tx_pkts_q[2]++;
3327 else if((bd_used > 1000) && (bd_used <= 2000))
3328 fp->tx_pkts_q[3]++;
3329 else if((bd_used > 3000) && (bd_used <= 4000))
3330 fp->tx_pkts_q[4]++;
3331 else if((bd_used > 4000) && (bd_used <= 5000))
3332 fp->tx_pkts_q[5]++;
3333 else if((bd_used > 6000) && (bd_used <= 7000))
3334 fp->tx_pkts_q[6]++;
3335 else if((bd_used > 7000) && (bd_used <= 8000))
3336 fp->tx_pkts_q[7]++;
3337 else if((bd_used > 8000) && (bd_used <= 9000))
3338 fp->tx_pkts_q[8]++;
3339 else if((bd_used > 9000) && (bd_used <= 10000))
3340 fp->tx_pkts_q[9]++;
3341 else if((bd_used > 10000) && (bd_used <= 11000))
3342 fp->tx_pkts_q[10]++;
3343 else if((bd_used > 11000) && (bd_used <= 12000))
3344 fp->tx_pkts_q[11]++;
3345 else if((bd_used > 12000) && (bd_used <= 13000))
3346 fp->tx_pkts_q[12]++;
3347 else if((bd_used > 13000) && (bd_used <= 14000))
3348 fp->tx_pkts_q[13]++;
3349 else if((bd_used > 14000) && (bd_used <= 15000))
3350 fp->tx_pkts_q[14]++;
3351 else if((bd_used > 15000) && (bd_used <= 16000))
3352 fp->tx_pkts_q[15]++;
3353 else
3354 fp->tx_pkts_q[16]++;
3355 }
3356
3357 #endif /* end of QLNX_TRACE_PERF_DATA */
3358
3359 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3360 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3361 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3362 " in chain[%d] trying to free packets\n",
3363 nsegs, elem_left, fp->rss_id);
3364
3365 fp->tx_nsegs_gt_elem_left++;
3366
3367 (void)qlnx_tx_int(ha, fp, txq);
3368
3369 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3370 ecore_chain_get_elem_left(&txq->tx_pbl))) {
3371 QL_DPRINT1(ha,
3372 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
3373 nsegs, elem_left, fp->rss_id);
3374
3375 fp->err_tx_nsegs_gt_elem_left++;
3376 fp->tx_ring_full = 1;
3377 if (ha->storm_stats_enable)
3378 ha->storm_stats_gather = 1;
3379 return (ENOBUFS);
3380 }
3381 }
3382
3383 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3384
3385 txq->sw_tx_ring[idx].mp = m_head;
3386
3387 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3388
3389 memset(first_bd, 0, sizeof(*first_bd));
3390
3391 first_bd->data.bd_flags.bitfields =
3392 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3393
3394 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3395
3396 nbd++;
3397
3398 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3399 first_bd->data.bd_flags.bitfields |=
3400 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3401 }
3402
3403 if (m_head->m_pkthdr.csum_flags &
3404 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3405 first_bd->data.bd_flags.bitfields |=
3406 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3407 }
3408
3409 if (m_head->m_flags & M_VLANTAG) {
3410 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3411 first_bd->data.bd_flags.bitfields |=
3412 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3413 }
3414
3415 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3416 first_bd->data.bd_flags.bitfields |=
3417 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3418 first_bd->data.bd_flags.bitfields |=
3419 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3420
3421 nbds_in_hdr = 1;
3422
3423 if (offset == segs->ds_len) {
3424 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3425 segs++;
3426 seg_idx++;
3427
3428 second_bd = (struct eth_tx_2nd_bd *)
3429 ecore_chain_produce(&txq->tx_pbl);
3430 memset(second_bd, 0, sizeof(*second_bd));
3431 nbd++;
3432
3433 if (seg_idx < nsegs) {
3434 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3435 (segs->ds_addr), (segs->ds_len));
3436 segs++;
3437 seg_idx++;
3438 }
3439
3440 third_bd = (struct eth_tx_3rd_bd *)
3441 ecore_chain_produce(&txq->tx_pbl);
3442 memset(third_bd, 0, sizeof(*third_bd));
3443 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3444 third_bd->data.bitfields |=
3445 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3446 nbd++;
3447
3448 if (seg_idx < nsegs) {
3449 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3450 (segs->ds_addr), (segs->ds_len));
3451 segs++;
3452 seg_idx++;
3453 }
3454
3455 for (; seg_idx < nsegs; seg_idx++) {
3456 tx_data_bd = (struct eth_tx_bd *)
3457 ecore_chain_produce(&txq->tx_pbl);
3458 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3459 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3460 segs->ds_addr,\
3461 segs->ds_len);
3462 segs++;
3463 nbd++;
3464 }
3465
3466 } else if (offset < segs->ds_len) {
3467 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3468
3469 second_bd = (struct eth_tx_2nd_bd *)
3470 ecore_chain_produce(&txq->tx_pbl);
3471 memset(second_bd, 0, sizeof(*second_bd));
3472 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3473 (segs->ds_addr + offset),\
3474 (segs->ds_len - offset));
3475 nbd++;
3476 segs++;
3477
3478 third_bd = (struct eth_tx_3rd_bd *)
3479 ecore_chain_produce(&txq->tx_pbl);
3480 memset(third_bd, 0, sizeof(*third_bd));
3481
3482 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3483 segs->ds_addr,\
3484 segs->ds_len);
3485 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3486 third_bd->data.bitfields |=
3487 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3488 segs++;
3489 nbd++;
3490
3491 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3492 tx_data_bd = (struct eth_tx_bd *)
3493 ecore_chain_produce(&txq->tx_pbl);
3494 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3495 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3496 segs->ds_addr,\
3497 segs->ds_len);
3498 segs++;
3499 nbd++;
3500 }
3501
3502 } else {
3503 offset = offset - segs->ds_len;
3504 segs++;
3505
3506 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3507 if (offset)
3508 nbds_in_hdr++;
3509
3510 tx_data_bd = (struct eth_tx_bd *)
3511 ecore_chain_produce(&txq->tx_pbl);
3512 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3513
3514 if (second_bd == NULL) {
3515 second_bd = (struct eth_tx_2nd_bd *)
3516 tx_data_bd;
3517 } else if (third_bd == NULL) {
3518 third_bd = (struct eth_tx_3rd_bd *)
3519 tx_data_bd;
3520 }
3521
3522 if (offset && (offset < segs->ds_len)) {
3523 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3524 segs->ds_addr, offset);
3525
3526 tx_data_bd = (struct eth_tx_bd *)
3527 ecore_chain_produce(&txq->tx_pbl);
3528
3529 memset(tx_data_bd, 0,
3530 sizeof(*tx_data_bd));
3531
3532 if (second_bd == NULL) {
3533 second_bd =
3534 (struct eth_tx_2nd_bd *)tx_data_bd;
3535 } else if (third_bd == NULL) {
3536 third_bd =
3537 (struct eth_tx_3rd_bd *)tx_data_bd;
3538 }
3539 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3540 (segs->ds_addr + offset), \
3541 (segs->ds_len - offset));
3542 nbd++;
3543 offset = 0;
3544 } else {
3545 if (offset)
3546 offset = offset - segs->ds_len;
3547 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3548 segs->ds_addr, segs->ds_len);
3549 }
3550 segs++;
3551 nbd++;
3552 }
3553
3554 if (third_bd == NULL) {
3555 third_bd = (struct eth_tx_3rd_bd *)
3556 ecore_chain_produce(&txq->tx_pbl);
3557 memset(third_bd, 0, sizeof(*third_bd));
3558 }
3559
3560 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3561 third_bd->data.bitfields |=
3562 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3563 }
3564 fp->tx_tso_pkts++;
3565 } else {
3566 segs++;
3567 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3568 tx_data_bd = (struct eth_tx_bd *)
3569 ecore_chain_produce(&txq->tx_pbl);
3570 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3571 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3572 segs->ds_len);
3573 segs++;
3574 nbd++;
3575 }
3576 first_bd->data.bitfields =
3577 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3578 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3579 first_bd->data.bitfields =
3580 htole16(first_bd->data.bitfields);
3581 fp->tx_non_tso_pkts++;
3582 }
3583
3584 first_bd->data.nbds = nbd;
3585
3586 if (ha->dbg_trace_tso_pkt_len) {
3587 if (fp->tx_tso_max_nsegs < nsegs)
3588 fp->tx_tso_max_nsegs = nsegs;
3589
3590 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3591 fp->tx_tso_min_nsegs = nsegs;
3592 }
3593
3594 txq->sw_tx_ring[idx].nsegs = nsegs;
3595 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3596
3597 txq->tx_db.data.bd_prod =
3598 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3599
3600 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3601
3602 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3603 return (0);
3604 }
3605
3606 static void
qlnx_stop(qlnx_host_t * ha)3607 qlnx_stop(qlnx_host_t *ha)
3608 {
3609 if_t ifp = ha->ifp;
3610 int i;
3611
3612 if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
3613
3614 /*
3615 * We simply lock and unlock each fp->tx_mtx to
3616 * propagate the if_drv_flags
3617 * state to each tx thread
3618 */
3619 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3620
3621 if (ha->state == QLNX_STATE_OPEN) {
3622 for (i = 0; i < ha->num_rss; i++) {
3623 struct qlnx_fastpath *fp = &ha->fp_array[i];
3624
3625 mtx_lock(&fp->tx_mtx);
3626 mtx_unlock(&fp->tx_mtx);
3627
3628 if (fp->fp_taskqueue != NULL)
3629 taskqueue_enqueue(fp->fp_taskqueue,
3630 &fp->fp_task);
3631 }
3632 }
3633 #ifdef QLNX_ENABLE_IWARP
3634 if (qlnx_vf_device(ha) != 0) {
3635 qlnx_rdma_dev_close(ha);
3636 }
3637 #endif /* #ifdef QLNX_ENABLE_IWARP */
3638
3639 qlnx_unload(ha);
3640
3641 return;
3642 }
3643
3644 static int
qlnx_get_ifq_snd_maxlen(qlnx_host_t * ha)3645 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3646 {
3647 return(TX_RING_SIZE - 1);
3648 }
3649
3650 static void
qlnx_get_mac_addr(qlnx_host_t * ha)3651 qlnx_get_mac_addr(qlnx_host_t *ha)
3652 {
3653 struct ecore_hwfn *p_hwfn;
3654 unsigned char mac[ETHER_ADDR_LEN];
3655 uint8_t p_is_forced;
3656
3657 p_hwfn = &ha->cdev.hwfns[0];
3658
3659 if (qlnx_vf_device(ha) != 0) {
3660 memcpy(ha->primary_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
3661 return;
3662 }
3663
3664 ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3665 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3666 true) {
3667 device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3668 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3669 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3670 memcpy(ha->primary_mac, mac, ETH_ALEN);
3671 }
3672 }
3673
3674 static uint32_t
qlnx_get_optics(qlnx_host_t * ha,struct qlnx_link_output * if_link)3675 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3676 {
3677 uint32_t ifm_type = 0;
3678
3679 switch (if_link->media_type) {
3680 case MEDIA_MODULE_FIBER:
3681 case MEDIA_UNSPECIFIED:
3682 if (if_link->speed == (100 * 1000))
3683 ifm_type = IFM_100G_SR4;
3684 else if (if_link->speed == (40 * 1000))
3685 ifm_type = IFM_40G_SR4;
3686 else if (if_link->speed == (25 * 1000))
3687 ifm_type = IFM_25G_SR;
3688 else if (if_link->speed == (10 * 1000))
3689 ifm_type = (IFM_10G_LR | IFM_10G_SR);
3690 else if (if_link->speed == (1 * 1000))
3691 ifm_type = (IFM_1000_SX | IFM_1000_LX);
3692
3693 break;
3694
3695 case MEDIA_DA_TWINAX:
3696 if (if_link->speed == (100 * 1000))
3697 ifm_type = IFM_100G_CR4;
3698 else if (if_link->speed == (40 * 1000))
3699 ifm_type = IFM_40G_CR4;
3700 else if (if_link->speed == (25 * 1000))
3701 ifm_type = IFM_25G_CR;
3702 else if (if_link->speed == (10 * 1000))
3703 ifm_type = IFM_10G_TWINAX;
3704
3705 break;
3706
3707 default :
3708 ifm_type = IFM_UNKNOWN;
3709 break;
3710 }
3711 return (ifm_type);
3712 }
3713
3714 /*****************************************************************************
3715 * Interrupt Service Functions
3716 *****************************************************************************/
3717
3718 static int
qlnx_rx_jumbo_chain(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct mbuf * mp_head,uint16_t len)3719 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3720 struct mbuf *mp_head, uint16_t len)
3721 {
3722 struct mbuf *mp, *mpf, *mpl;
3723 struct sw_rx_data *sw_rx_data;
3724 struct qlnx_rx_queue *rxq;
3725 uint16_t len_in_buffer;
3726
3727 rxq = fp->rxq;
3728 mpf = mpl = mp = NULL;
3729
3730 while (len) {
3731 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3732
3733 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3734 mp = sw_rx_data->data;
3735
3736 if (mp == NULL) {
3737 QL_DPRINT1(ha, "mp = NULL\n");
3738 fp->err_rx_mp_null++;
3739 rxq->sw_rx_cons =
3740 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3741
3742 if (mpf != NULL)
3743 m_freem(mpf);
3744
3745 return (-1);
3746 }
3747 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3748 BUS_DMASYNC_POSTREAD);
3749
3750 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3751 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3752 " incoming packet and reusing its buffer\n");
3753
3754 qlnx_reuse_rx_data(rxq);
3755 fp->err_rx_alloc_errors++;
3756
3757 if (mpf != NULL)
3758 m_freem(mpf);
3759
3760 return (-1);
3761 }
3762 ecore_chain_consume(&rxq->rx_bd_ring);
3763
3764 if (len > rxq->rx_buf_size)
3765 len_in_buffer = rxq->rx_buf_size;
3766 else
3767 len_in_buffer = len;
3768
3769 len = len - len_in_buffer;
3770
3771 mp->m_flags &= ~M_PKTHDR;
3772 mp->m_next = NULL;
3773 mp->m_len = len_in_buffer;
3774
3775 if (mpf == NULL)
3776 mpf = mpl = mp;
3777 else {
3778 mpl->m_next = mp;
3779 mpl = mp;
3780 }
3781 }
3782
3783 if (mpf != NULL)
3784 mp_head->m_next = mpf;
3785
3786 return (0);
3787 }
3788
3789 static void
qlnx_tpa_start(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_start_cqe * cqe)3790 qlnx_tpa_start(qlnx_host_t *ha,
3791 struct qlnx_fastpath *fp,
3792 struct qlnx_rx_queue *rxq,
3793 struct eth_fast_path_rx_tpa_start_cqe *cqe)
3794 {
3795 uint32_t agg_index;
3796 if_t ifp = ha->ifp;
3797 struct mbuf *mp;
3798 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3799 struct sw_rx_data *sw_rx_data;
3800 dma_addr_t addr;
3801 bus_dmamap_t map;
3802 struct eth_rx_bd *rx_bd;
3803 int i;
3804 uint8_t hash_type;
3805
3806 agg_index = cqe->tpa_agg_index;
3807
3808 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3809 \t type = 0x%x\n \
3810 \t bitfields = 0x%x\n \
3811 \t seg_len = 0x%x\n \
3812 \t pars_flags = 0x%x\n \
3813 \t vlan_tag = 0x%x\n \
3814 \t rss_hash = 0x%x\n \
3815 \t len_on_first_bd = 0x%x\n \
3816 \t placement_offset = 0x%x\n \
3817 \t tpa_agg_index = 0x%x\n \
3818 \t header_len = 0x%x\n \
3819 \t ext_bd_len_list[0] = 0x%x\n \
3820 \t ext_bd_len_list[1] = 0x%x\n \
3821 \t ext_bd_len_list[2] = 0x%x\n \
3822 \t ext_bd_len_list[3] = 0x%x\n \
3823 \t ext_bd_len_list[4] = 0x%x\n",
3824 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3825 cqe->pars_flags.flags, cqe->vlan_tag,
3826 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3827 cqe->tpa_agg_index, cqe->header_len,
3828 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3829 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3830 cqe->ext_bd_len_list[4]);
3831
3832 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3833 fp->err_rx_tpa_invalid_agg_num++;
3834 return;
3835 }
3836
3837 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3838 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3839 mp = sw_rx_data->data;
3840
3841 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3842
3843 if (mp == NULL) {
3844 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3845 fp->err_rx_mp_null++;
3846 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3847
3848 return;
3849 }
3850
3851 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3852 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3853 " flags = %x, dropping incoming packet\n", fp->rss_id,
3854 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3855
3856 fp->err_rx_hw_errors++;
3857
3858 qlnx_reuse_rx_data(rxq);
3859
3860 QLNX_INC_IERRORS(ifp);
3861
3862 return;
3863 }
3864
3865 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3866 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3867 " dropping incoming packet and reusing its buffer\n",
3868 fp->rss_id);
3869
3870 fp->err_rx_alloc_errors++;
3871 QLNX_INC_IQDROPS(ifp);
3872
3873 /*
3874 * Load the tpa mbuf into the rx ring and save the
3875 * posted mbuf
3876 */
3877
3878 map = sw_rx_data->map;
3879 addr = sw_rx_data->dma_addr;
3880
3881 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
3882
3883 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
3884 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
3885 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
3886
3887 rxq->tpa_info[agg_index].rx_buf.data = mp;
3888 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
3889 rxq->tpa_info[agg_index].rx_buf.map = map;
3890
3891 rx_bd = (struct eth_rx_bd *)
3892 ecore_chain_produce(&rxq->rx_bd_ring);
3893
3894 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
3895 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
3896
3897 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3898 BUS_DMASYNC_PREREAD);
3899
3900 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
3901 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3902
3903 ecore_chain_consume(&rxq->rx_bd_ring);
3904
3905 /* Now reuse any buffers posted in ext_bd_len_list */
3906 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3907 if (cqe->ext_bd_len_list[i] == 0)
3908 break;
3909
3910 qlnx_reuse_rx_data(rxq);
3911 }
3912
3913 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3914 return;
3915 }
3916
3917 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3918 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
3919 " dropping incoming packet and reusing its buffer\n",
3920 fp->rss_id);
3921
3922 QLNX_INC_IQDROPS(ifp);
3923
3924 /* if we already have mbuf head in aggregation free it */
3925 if (rxq->tpa_info[agg_index].mpf) {
3926 m_freem(rxq->tpa_info[agg_index].mpf);
3927 rxq->tpa_info[agg_index].mpl = NULL;
3928 }
3929 rxq->tpa_info[agg_index].mpf = mp;
3930 rxq->tpa_info[agg_index].mpl = NULL;
3931
3932 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3933 ecore_chain_consume(&rxq->rx_bd_ring);
3934
3935 /* Now reuse any buffers posted in ext_bd_len_list */
3936 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3937 if (cqe->ext_bd_len_list[i] == 0)
3938 break;
3939
3940 qlnx_reuse_rx_data(rxq);
3941 }
3942 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3943
3944 return;
3945 }
3946
3947 /*
3948 * first process the ext_bd_len_list
3949 * if this fails then we simply drop the packet
3950 */
3951 ecore_chain_consume(&rxq->rx_bd_ring);
3952 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3953
3954 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3955 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
3956
3957 if (cqe->ext_bd_len_list[i] == 0)
3958 break;
3959
3960 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3961 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3962 BUS_DMASYNC_POSTREAD);
3963
3964 mpc = sw_rx_data->data;
3965
3966 if (mpc == NULL) {
3967 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3968 fp->err_rx_mp_null++;
3969 if (mpf != NULL)
3970 m_freem(mpf);
3971 mpf = mpl = NULL;
3972 rxq->tpa_info[agg_index].agg_state =
3973 QLNX_AGG_STATE_ERROR;
3974 ecore_chain_consume(&rxq->rx_bd_ring);
3975 rxq->sw_rx_cons =
3976 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3977 continue;
3978 }
3979
3980 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3981 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3982 " dropping incoming packet and reusing its"
3983 " buffer\n", fp->rss_id);
3984
3985 qlnx_reuse_rx_data(rxq);
3986
3987 if (mpf != NULL)
3988 m_freem(mpf);
3989 mpf = mpl = NULL;
3990
3991 rxq->tpa_info[agg_index].agg_state =
3992 QLNX_AGG_STATE_ERROR;
3993
3994 ecore_chain_consume(&rxq->rx_bd_ring);
3995 rxq->sw_rx_cons =
3996 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3997
3998 continue;
3999 }
4000
4001 mpc->m_flags &= ~M_PKTHDR;
4002 mpc->m_next = NULL;
4003 mpc->m_len = cqe->ext_bd_len_list[i];
4004
4005 if (mpf == NULL) {
4006 mpf = mpl = mpc;
4007 } else {
4008 mpl->m_len = ha->rx_buf_size;
4009 mpl->m_next = mpc;
4010 mpl = mpc;
4011 }
4012
4013 ecore_chain_consume(&rxq->rx_bd_ring);
4014 rxq->sw_rx_cons =
4015 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4016 }
4017
4018 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4019 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4020 " incoming packet and reusing its buffer\n",
4021 fp->rss_id);
4022
4023 QLNX_INC_IQDROPS(ifp);
4024
4025 rxq->tpa_info[agg_index].mpf = mp;
4026 rxq->tpa_info[agg_index].mpl = NULL;
4027
4028 return;
4029 }
4030
4031 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4032
4033 if (mpf != NULL) {
4034 mp->m_len = ha->rx_buf_size;
4035 mp->m_next = mpf;
4036 rxq->tpa_info[agg_index].mpf = mp;
4037 rxq->tpa_info[agg_index].mpl = mpl;
4038 } else {
4039 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4040 rxq->tpa_info[agg_index].mpf = mp;
4041 rxq->tpa_info[agg_index].mpl = mp;
4042 mp->m_next = NULL;
4043 }
4044
4045 mp->m_flags |= M_PKTHDR;
4046
4047 /* assign packet to this interface interface */
4048 mp->m_pkthdr.rcvif = ifp;
4049
4050 /* assume no hardware checksum has complated */
4051 mp->m_pkthdr.csum_flags = 0;
4052
4053 //mp->m_pkthdr.flowid = fp->rss_id;
4054 mp->m_pkthdr.flowid = cqe->rss_hash;
4055
4056 hash_type = cqe->bitfields &
4057 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4058 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4059
4060 switch (hash_type) {
4061 case RSS_HASH_TYPE_IPV4:
4062 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4063 break;
4064
4065 case RSS_HASH_TYPE_TCP_IPV4:
4066 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4067 break;
4068
4069 case RSS_HASH_TYPE_IPV6:
4070 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4071 break;
4072
4073 case RSS_HASH_TYPE_TCP_IPV6:
4074 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4075 break;
4076
4077 default:
4078 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4079 break;
4080 }
4081
4082 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4083 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4084
4085 mp->m_pkthdr.csum_data = 0xFFFF;
4086
4087 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4088 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4089 mp->m_flags |= M_VLANTAG;
4090 }
4091
4092 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4093
4094 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4095 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4096 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4097
4098 return;
4099 }
4100
4101 static void
qlnx_tpa_cont(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_cont_cqe * cqe)4102 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4103 struct qlnx_rx_queue *rxq,
4104 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4105 {
4106 struct sw_rx_data *sw_rx_data;
4107 int i;
4108 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4109 struct mbuf *mp;
4110 uint32_t agg_index;
4111
4112 QL_DPRINT7(ha, "[%d]: enter\n \
4113 \t type = 0x%x\n \
4114 \t tpa_agg_index = 0x%x\n \
4115 \t len_list[0] = 0x%x\n \
4116 \t len_list[1] = 0x%x\n \
4117 \t len_list[2] = 0x%x\n \
4118 \t len_list[3] = 0x%x\n \
4119 \t len_list[4] = 0x%x\n \
4120 \t len_list[5] = 0x%x\n",
4121 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4122 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4123 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4124
4125 agg_index = cqe->tpa_agg_index;
4126
4127 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4128 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4129 fp->err_rx_tpa_invalid_agg_num++;
4130 return;
4131 }
4132
4133 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4134 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4135
4136 if (cqe->len_list[i] == 0)
4137 break;
4138
4139 if (rxq->tpa_info[agg_index].agg_state !=
4140 QLNX_AGG_STATE_START) {
4141 qlnx_reuse_rx_data(rxq);
4142 continue;
4143 }
4144
4145 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4146 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4147 BUS_DMASYNC_POSTREAD);
4148
4149 mpc = sw_rx_data->data;
4150
4151 if (mpc == NULL) {
4152 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4153
4154 fp->err_rx_mp_null++;
4155 if (mpf != NULL)
4156 m_freem(mpf);
4157 mpf = mpl = NULL;
4158 rxq->tpa_info[agg_index].agg_state =
4159 QLNX_AGG_STATE_ERROR;
4160 ecore_chain_consume(&rxq->rx_bd_ring);
4161 rxq->sw_rx_cons =
4162 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4163 continue;
4164 }
4165
4166 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4167 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4168 " dropping incoming packet and reusing its"
4169 " buffer\n", fp->rss_id);
4170
4171 qlnx_reuse_rx_data(rxq);
4172
4173 if (mpf != NULL)
4174 m_freem(mpf);
4175 mpf = mpl = NULL;
4176
4177 rxq->tpa_info[agg_index].agg_state =
4178 QLNX_AGG_STATE_ERROR;
4179
4180 ecore_chain_consume(&rxq->rx_bd_ring);
4181 rxq->sw_rx_cons =
4182 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4183
4184 continue;
4185 }
4186
4187 mpc->m_flags &= ~M_PKTHDR;
4188 mpc->m_next = NULL;
4189 mpc->m_len = cqe->len_list[i];
4190
4191 if (mpf == NULL) {
4192 mpf = mpl = mpc;
4193 } else {
4194 mpl->m_len = ha->rx_buf_size;
4195 mpl->m_next = mpc;
4196 mpl = mpc;
4197 }
4198
4199 ecore_chain_consume(&rxq->rx_bd_ring);
4200 rxq->sw_rx_cons =
4201 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4202 }
4203
4204 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4205 fp->rss_id, mpf, mpl);
4206
4207 if (mpf != NULL) {
4208 mp = rxq->tpa_info[agg_index].mpl;
4209 mp->m_len = ha->rx_buf_size;
4210 mp->m_next = mpf;
4211 rxq->tpa_info[agg_index].mpl = mpl;
4212 }
4213
4214 return;
4215 }
4216
4217 static int
qlnx_tpa_end(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_rx_queue * rxq,struct eth_fast_path_rx_tpa_end_cqe * cqe)4218 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4219 struct qlnx_rx_queue *rxq,
4220 struct eth_fast_path_rx_tpa_end_cqe *cqe)
4221 {
4222 struct sw_rx_data *sw_rx_data;
4223 int i;
4224 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4225 struct mbuf *mp;
4226 uint32_t agg_index;
4227 uint32_t len = 0;
4228 if_t ifp = ha->ifp;
4229
4230 QL_DPRINT7(ha, "[%d]: enter\n \
4231 \t type = 0x%x\n \
4232 \t tpa_agg_index = 0x%x\n \
4233 \t total_packet_len = 0x%x\n \
4234 \t num_of_bds = 0x%x\n \
4235 \t end_reason = 0x%x\n \
4236 \t num_of_coalesced_segs = 0x%x\n \
4237 \t ts_delta = 0x%x\n \
4238 \t len_list[0] = 0x%x\n \
4239 \t len_list[1] = 0x%x\n \
4240 \t len_list[2] = 0x%x\n \
4241 \t len_list[3] = 0x%x\n",
4242 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4243 cqe->total_packet_len, cqe->num_of_bds,
4244 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4245 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4246 cqe->len_list[3]);
4247
4248 agg_index = cqe->tpa_agg_index;
4249
4250 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4251 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4252
4253 fp->err_rx_tpa_invalid_agg_num++;
4254 return (0);
4255 }
4256
4257 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4258 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4259
4260 if (cqe->len_list[i] == 0)
4261 break;
4262
4263 if (rxq->tpa_info[agg_index].agg_state !=
4264 QLNX_AGG_STATE_START) {
4265 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4266
4267 qlnx_reuse_rx_data(rxq);
4268 continue;
4269 }
4270
4271 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4272 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4273 BUS_DMASYNC_POSTREAD);
4274
4275 mpc = sw_rx_data->data;
4276
4277 if (mpc == NULL) {
4278 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4279
4280 fp->err_rx_mp_null++;
4281 if (mpf != NULL)
4282 m_freem(mpf);
4283 mpf = mpl = NULL;
4284 rxq->tpa_info[agg_index].agg_state =
4285 QLNX_AGG_STATE_ERROR;
4286 ecore_chain_consume(&rxq->rx_bd_ring);
4287 rxq->sw_rx_cons =
4288 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4289 continue;
4290 }
4291
4292 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4293 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4294 " dropping incoming packet and reusing its"
4295 " buffer\n", fp->rss_id);
4296
4297 qlnx_reuse_rx_data(rxq);
4298
4299 if (mpf != NULL)
4300 m_freem(mpf);
4301 mpf = mpl = NULL;
4302
4303 rxq->tpa_info[agg_index].agg_state =
4304 QLNX_AGG_STATE_ERROR;
4305
4306 ecore_chain_consume(&rxq->rx_bd_ring);
4307 rxq->sw_rx_cons =
4308 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4309
4310 continue;
4311 }
4312
4313 mpc->m_flags &= ~M_PKTHDR;
4314 mpc->m_next = NULL;
4315 mpc->m_len = cqe->len_list[i];
4316
4317 if (mpf == NULL) {
4318 mpf = mpl = mpc;
4319 } else {
4320 mpl->m_len = ha->rx_buf_size;
4321 mpl->m_next = mpc;
4322 mpl = mpc;
4323 }
4324
4325 ecore_chain_consume(&rxq->rx_bd_ring);
4326 rxq->sw_rx_cons =
4327 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4328 }
4329
4330 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4331
4332 if (mpf != NULL) {
4333 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4334
4335 mp = rxq->tpa_info[agg_index].mpl;
4336 mp->m_len = ha->rx_buf_size;
4337 mp->m_next = mpf;
4338 }
4339
4340 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4341 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4342
4343 if (rxq->tpa_info[agg_index].mpf != NULL)
4344 m_freem(rxq->tpa_info[agg_index].mpf);
4345 rxq->tpa_info[agg_index].mpf = NULL;
4346 rxq->tpa_info[agg_index].mpl = NULL;
4347 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4348 return (0);
4349 }
4350
4351 mp = rxq->tpa_info[agg_index].mpf;
4352 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4353 mp->m_pkthdr.len = cqe->total_packet_len;
4354
4355 if (mp->m_next == NULL)
4356 mp->m_len = mp->m_pkthdr.len;
4357 else {
4358 /* compute the total packet length */
4359 mpf = mp;
4360 while (mpf != NULL) {
4361 len += mpf->m_len;
4362 mpf = mpf->m_next;
4363 }
4364
4365 if (cqe->total_packet_len > len) {
4366 mpl = rxq->tpa_info[agg_index].mpl;
4367 mpl->m_len += (cqe->total_packet_len - len);
4368 }
4369 }
4370
4371 QLNX_INC_IPACKETS(ifp);
4372 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4373
4374 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4375 m_len = 0x%x m_pkthdr_len = 0x%x\n",
4376 fp->rss_id, mp->m_pkthdr.csum_data,
4377 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4378
4379 if_input(ifp, mp);
4380
4381 rxq->tpa_info[agg_index].mpf = NULL;
4382 rxq->tpa_info[agg_index].mpl = NULL;
4383 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4384
4385 return (cqe->num_of_coalesced_segs);
4386 }
4387
4388 static int
qlnx_rx_int(qlnx_host_t * ha,struct qlnx_fastpath * fp,int budget,int lro_enable)4389 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4390 int lro_enable)
4391 {
4392 uint16_t hw_comp_cons, sw_comp_cons;
4393 int rx_pkt = 0;
4394 struct qlnx_rx_queue *rxq = fp->rxq;
4395 if_t ifp = ha->ifp;
4396 struct ecore_dev *cdev = &ha->cdev;
4397 struct ecore_hwfn *p_hwfn;
4398
4399 #ifdef QLNX_SOFT_LRO
4400 struct lro_ctrl *lro;
4401
4402 lro = &rxq->lro;
4403 #endif /* #ifdef QLNX_SOFT_LRO */
4404
4405 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4406 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4407
4408 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4409
4410 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4411 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4412 * read before it is written by FW, then FW writes CQE and SB, and then
4413 * the CPU reads the hw_comp_cons, it will use an old CQE.
4414 */
4415
4416 /* Loop to complete all indicated BDs */
4417 while (sw_comp_cons != hw_comp_cons) {
4418 union eth_rx_cqe *cqe;
4419 struct eth_fast_path_rx_reg_cqe *fp_cqe;
4420 struct sw_rx_data *sw_rx_data;
4421 register struct mbuf *mp;
4422 enum eth_rx_cqe_type cqe_type;
4423 uint16_t len, pad, len_on_first_bd;
4424 uint8_t *data;
4425 uint8_t hash_type;
4426
4427 /* Get the CQE from the completion ring */
4428 cqe = (union eth_rx_cqe *)
4429 ecore_chain_consume(&rxq->rx_comp_ring);
4430 cqe_type = cqe->fast_path_regular.type;
4431
4432 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4433 QL_DPRINT3(ha, "Got a slowath CQE\n");
4434
4435 ecore_eth_cqe_completion(p_hwfn,
4436 (struct eth_slow_path_rx_cqe *)cqe);
4437 goto next_cqe;
4438 }
4439
4440 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4441 switch (cqe_type) {
4442 case ETH_RX_CQE_TYPE_TPA_START:
4443 qlnx_tpa_start(ha, fp, rxq,
4444 &cqe->fast_path_tpa_start);
4445 fp->tpa_start++;
4446 break;
4447
4448 case ETH_RX_CQE_TYPE_TPA_CONT:
4449 qlnx_tpa_cont(ha, fp, rxq,
4450 &cqe->fast_path_tpa_cont);
4451 fp->tpa_cont++;
4452 break;
4453
4454 case ETH_RX_CQE_TYPE_TPA_END:
4455 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4456 &cqe->fast_path_tpa_end);
4457 fp->tpa_end++;
4458 break;
4459
4460 default:
4461 break;
4462 }
4463
4464 goto next_cqe;
4465 }
4466
4467 /* Get the data from the SW ring */
4468 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4469 mp = sw_rx_data->data;
4470
4471 if (mp == NULL) {
4472 QL_DPRINT1(ha, "mp = NULL\n");
4473 fp->err_rx_mp_null++;
4474 rxq->sw_rx_cons =
4475 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4476 goto next_cqe;
4477 }
4478 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4479 BUS_DMASYNC_POSTREAD);
4480
4481 /* non GRO */
4482 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4483 len = le16toh(fp_cqe->pkt_len);
4484 pad = fp_cqe->placement_offset;
4485 #if 0
4486 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4487 " len %u, parsing flags = %d pad = %d\n",
4488 cqe_type, fp_cqe->bitfields,
4489 le16toh(fp_cqe->vlan_tag),
4490 len, le16toh(fp_cqe->pars_flags.flags), pad);
4491 #endif
4492 data = mtod(mp, uint8_t *);
4493 data = data + pad;
4494
4495 if (0)
4496 qlnx_dump_buf8(ha, __func__, data, len);
4497
4498 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4499 * is always with a fixed size. If allocation fails, we take the
4500 * consumed BD and return it to the ring in the PROD position.
4501 * The packet that was received on that BD will be dropped (and
4502 * not passed to the upper stack).
4503 */
4504 /* If this is an error packet then drop it */
4505 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4506 CQE_FLAGS_ERR) {
4507 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4508 " dropping incoming packet\n", sw_comp_cons,
4509 le16toh(cqe->fast_path_regular.pars_flags.flags));
4510 fp->err_rx_hw_errors++;
4511
4512 qlnx_reuse_rx_data(rxq);
4513
4514 QLNX_INC_IERRORS(ifp);
4515
4516 goto next_cqe;
4517 }
4518
4519 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4520 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4521 " incoming packet and reusing its buffer\n");
4522 qlnx_reuse_rx_data(rxq);
4523
4524 fp->err_rx_alloc_errors++;
4525
4526 QLNX_INC_IQDROPS(ifp);
4527
4528 goto next_cqe;
4529 }
4530
4531 ecore_chain_consume(&rxq->rx_bd_ring);
4532
4533 len_on_first_bd = fp_cqe->len_on_first_bd;
4534 m_adj(mp, pad);
4535 mp->m_pkthdr.len = len;
4536
4537 if ((len > 60 ) && (len > len_on_first_bd)) {
4538 mp->m_len = len_on_first_bd;
4539
4540 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4541 (len - len_on_first_bd)) != 0) {
4542 m_freem(mp);
4543
4544 QLNX_INC_IQDROPS(ifp);
4545
4546 goto next_cqe;
4547 }
4548
4549 } else if (len_on_first_bd < len) {
4550 fp->err_rx_jumbo_chain_pkts++;
4551 } else {
4552 mp->m_len = len;
4553 }
4554
4555 mp->m_flags |= M_PKTHDR;
4556
4557 /* assign packet to this interface interface */
4558 mp->m_pkthdr.rcvif = ifp;
4559
4560 /* assume no hardware checksum has complated */
4561 mp->m_pkthdr.csum_flags = 0;
4562
4563 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4564
4565 hash_type = fp_cqe->bitfields &
4566 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4567 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4568
4569 switch (hash_type) {
4570 case RSS_HASH_TYPE_IPV4:
4571 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4572 break;
4573
4574 case RSS_HASH_TYPE_TCP_IPV4:
4575 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4576 break;
4577
4578 case RSS_HASH_TYPE_IPV6:
4579 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4580 break;
4581
4582 case RSS_HASH_TYPE_TCP_IPV6:
4583 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4584 break;
4585
4586 default:
4587 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4588 break;
4589 }
4590
4591 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4592 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4593 }
4594
4595 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4596 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4597 }
4598
4599 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4600 mp->m_pkthdr.csum_data = 0xFFFF;
4601 mp->m_pkthdr.csum_flags |=
4602 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4603 }
4604
4605 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4606 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4607 mp->m_flags |= M_VLANTAG;
4608 }
4609
4610 QLNX_INC_IPACKETS(ifp);
4611 QLNX_INC_IBYTES(ifp, len);
4612
4613 #ifdef QLNX_SOFT_LRO
4614 if (lro_enable)
4615 tcp_lro_queue_mbuf(lro, mp);
4616 else
4617 if_input(ifp, mp);
4618 #else
4619
4620 if_input(ifp, mp);
4621
4622 #endif /* #ifdef QLNX_SOFT_LRO */
4623
4624 rx_pkt++;
4625
4626 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4627
4628 next_cqe: /* don't consume bd rx buffer */
4629 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4630 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4631
4632 /* CR TPA - revisit how to handle budget in TPA perhaps
4633 increase on "end" */
4634 if (rx_pkt == budget)
4635 break;
4636 } /* repeat while sw_comp_cons != hw_comp_cons... */
4637
4638 /* Update producers */
4639 qlnx_update_rx_prod(p_hwfn, rxq);
4640
4641 return rx_pkt;
4642 }
4643
4644 /*
4645 * fast path interrupt
4646 */
4647
4648 static void
qlnx_fp_isr(void * arg)4649 qlnx_fp_isr(void *arg)
4650 {
4651 qlnx_ivec_t *ivec = arg;
4652 qlnx_host_t *ha;
4653 struct qlnx_fastpath *fp = NULL;
4654 int idx;
4655
4656 ha = ivec->ha;
4657
4658 if (ha->state != QLNX_STATE_OPEN) {
4659 return;
4660 }
4661
4662 idx = ivec->rss_idx;
4663
4664 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4665 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4666 ha->err_illegal_intr++;
4667 return;
4668 }
4669 fp = &ha->fp_array[idx];
4670
4671 if (fp == NULL) {
4672 ha->err_fp_null++;
4673 } else {
4674 int rx_int = 0;
4675 #ifdef QLNX_SOFT_LRO
4676 int total_rx_count = 0;
4677 #endif
4678 int lro_enable, tc;
4679 struct qlnx_tx_queue *txq;
4680 uint16_t elem_left;
4681
4682 lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO;
4683
4684 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4685
4686 do {
4687 for (tc = 0; tc < ha->num_tc; tc++) {
4688 txq = fp->txq[tc];
4689
4690 if((int)(elem_left =
4691 ecore_chain_get_elem_left(&txq->tx_pbl)) <
4692 QLNX_TX_ELEM_THRESH) {
4693 if (mtx_trylock(&fp->tx_mtx)) {
4694 #ifdef QLNX_TRACE_PERF_DATA
4695 tx_compl = fp->tx_pkts_completed;
4696 #endif
4697
4698 qlnx_tx_int(ha, fp, fp->txq[tc]);
4699 #ifdef QLNX_TRACE_PERF_DATA
4700 fp->tx_pkts_compl_intr +=
4701 (fp->tx_pkts_completed - tx_compl);
4702 if ((fp->tx_pkts_completed - tx_compl) <= 32)
4703 fp->tx_comInt[0]++;
4704 else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4705 ((fp->tx_pkts_completed - tx_compl) <= 64))
4706 fp->tx_comInt[1]++;
4707 else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4708 ((fp->tx_pkts_completed - tx_compl) <= 128))
4709 fp->tx_comInt[2]++;
4710 else if(((fp->tx_pkts_completed - tx_compl) > 128))
4711 fp->tx_comInt[3]++;
4712 #endif
4713 mtx_unlock(&fp->tx_mtx);
4714 }
4715 }
4716 }
4717
4718 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4719 lro_enable);
4720
4721 if (rx_int) {
4722 fp->rx_pkts += rx_int;
4723 #ifdef QLNX_SOFT_LRO
4724 total_rx_count += rx_int;
4725 #endif
4726 }
4727
4728 } while (rx_int);
4729
4730 #ifdef QLNX_SOFT_LRO
4731 {
4732 struct lro_ctrl *lro;
4733
4734 lro = &fp->rxq->lro;
4735
4736 if (lro_enable && total_rx_count) {
4737
4738 #ifdef QLNX_TRACE_LRO_CNT
4739 if (lro->lro_mbuf_count & ~1023)
4740 fp->lro_cnt_1024++;
4741 else if (lro->lro_mbuf_count & ~511)
4742 fp->lro_cnt_512++;
4743 else if (lro->lro_mbuf_count & ~255)
4744 fp->lro_cnt_256++;
4745 else if (lro->lro_mbuf_count & ~127)
4746 fp->lro_cnt_128++;
4747 else if (lro->lro_mbuf_count & ~63)
4748 fp->lro_cnt_64++;
4749 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4750
4751 tcp_lro_flush_all(lro);
4752 }
4753 }
4754 #endif /* #ifdef QLNX_SOFT_LRO */
4755
4756 ecore_sb_update_sb_idx(fp->sb_info);
4757 rmb();
4758 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4759 }
4760
4761 return;
4762 }
4763
4764 /*
4765 * slow path interrupt processing function
4766 * can be invoked in polled mode or in interrupt mode via taskqueue.
4767 */
4768 void
qlnx_sp_isr(void * arg)4769 qlnx_sp_isr(void *arg)
4770 {
4771 struct ecore_hwfn *p_hwfn;
4772 qlnx_host_t *ha;
4773
4774 p_hwfn = arg;
4775
4776 ha = (qlnx_host_t *)p_hwfn->p_dev;
4777
4778 ha->sp_interrupts++;
4779
4780 QL_DPRINT2(ha, "enter\n");
4781
4782 ecore_int_sp_dpc(p_hwfn);
4783
4784 QL_DPRINT2(ha, "exit\n");
4785
4786 return;
4787 }
4788
4789 /*****************************************************************************
4790 * Support Functions for DMA'able Memory
4791 *****************************************************************************/
4792
4793 static void
qlnx_dmamap_callback(void * arg,bus_dma_segment_t * segs,int nsegs,int error)4794 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4795 {
4796 *((bus_addr_t *)arg) = 0;
4797
4798 if (error) {
4799 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4800 return;
4801 }
4802
4803 *((bus_addr_t *)arg) = segs[0].ds_addr;
4804
4805 return;
4806 }
4807
4808 static int
qlnx_alloc_dmabuf(qlnx_host_t * ha,qlnx_dma_t * dma_buf)4809 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4810 {
4811 int ret = 0;
4812 bus_addr_t b_addr;
4813
4814 ret = bus_dma_tag_create(
4815 ha->parent_tag,/* parent */
4816 dma_buf->alignment,
4817 ((bus_size_t)(1ULL << 32)),/* boundary */
4818 BUS_SPACE_MAXADDR, /* lowaddr */
4819 BUS_SPACE_MAXADDR, /* highaddr */
4820 NULL, NULL, /* filter, filterarg */
4821 dma_buf->size, /* maxsize */
4822 1, /* nsegments */
4823 dma_buf->size, /* maxsegsize */
4824 0, /* flags */
4825 NULL, NULL, /* lockfunc, lockarg */
4826 &dma_buf->dma_tag);
4827
4828 if (ret) {
4829 QL_DPRINT1(ha, "could not create dma tag\n");
4830 goto qlnx_alloc_dmabuf_exit;
4831 }
4832 ret = bus_dmamem_alloc(dma_buf->dma_tag,
4833 (void **)&dma_buf->dma_b,
4834 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4835 &dma_buf->dma_map);
4836 if (ret) {
4837 bus_dma_tag_destroy(dma_buf->dma_tag);
4838 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4839 goto qlnx_alloc_dmabuf_exit;
4840 }
4841
4842 ret = bus_dmamap_load(dma_buf->dma_tag,
4843 dma_buf->dma_map,
4844 dma_buf->dma_b,
4845 dma_buf->size,
4846 qlnx_dmamap_callback,
4847 &b_addr, BUS_DMA_NOWAIT);
4848
4849 if (ret || !b_addr) {
4850 bus_dma_tag_destroy(dma_buf->dma_tag);
4851 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4852 dma_buf->dma_map);
4853 ret = -1;
4854 goto qlnx_alloc_dmabuf_exit;
4855 }
4856
4857 dma_buf->dma_addr = b_addr;
4858
4859 qlnx_alloc_dmabuf_exit:
4860
4861 return ret;
4862 }
4863
4864 static void
qlnx_free_dmabuf(qlnx_host_t * ha,qlnx_dma_t * dma_buf)4865 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4866 {
4867 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4868 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4869 bus_dma_tag_destroy(dma_buf->dma_tag);
4870 return;
4871 }
4872
4873 void *
qlnx_dma_alloc_coherent(void * ecore_dev,bus_addr_t * phys,uint32_t size)4874 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
4875 {
4876 qlnx_dma_t dma_buf;
4877 qlnx_dma_t *dma_p;
4878 qlnx_host_t *ha __unused;
4879
4880 ha = (qlnx_host_t *)ecore_dev;
4881
4882 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4883
4884 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
4885
4886 dma_buf.size = size + PAGE_SIZE;
4887 dma_buf.alignment = 8;
4888
4889 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
4890 return (NULL);
4891 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
4892
4893 *phys = dma_buf.dma_addr;
4894
4895 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
4896
4897 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
4898
4899 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4900 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
4901 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
4902
4903 return (dma_buf.dma_b);
4904 }
4905
4906 void
qlnx_dma_free_coherent(void * ecore_dev,void * v_addr,bus_addr_t phys,uint32_t size)4907 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
4908 uint32_t size)
4909 {
4910 qlnx_dma_t dma_buf, *dma_p;
4911 qlnx_host_t *ha;
4912
4913 ha = (qlnx_host_t *)ecore_dev;
4914
4915 if (v_addr == NULL)
4916 return;
4917
4918 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4919
4920 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
4921
4922 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4923 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
4924 dma_p->dma_b, (void *)dma_p->dma_addr, size);
4925
4926 dma_buf = *dma_p;
4927
4928 if (!ha->qlnxr_debug)
4929 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
4930 return;
4931 }
4932
4933 static int
qlnx_alloc_parent_dma_tag(qlnx_host_t * ha)4934 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
4935 {
4936 int ret;
4937 device_t dev;
4938
4939 dev = ha->pci_dev;
4940
4941 /*
4942 * Allocate parent DMA Tag
4943 */
4944 ret = bus_dma_tag_create(
4945 bus_get_dma_tag(dev), /* parent */
4946 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
4947 BUS_SPACE_MAXADDR, /* lowaddr */
4948 BUS_SPACE_MAXADDR, /* highaddr */
4949 NULL, NULL, /* filter, filterarg */
4950 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
4951 0, /* nsegments */
4952 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4953 0, /* flags */
4954 NULL, NULL, /* lockfunc, lockarg */
4955 &ha->parent_tag);
4956
4957 if (ret) {
4958 QL_DPRINT1(ha, "could not create parent dma tag\n");
4959 return (-1);
4960 }
4961
4962 ha->flags.parent_tag = 1;
4963
4964 return (0);
4965 }
4966
4967 static void
qlnx_free_parent_dma_tag(qlnx_host_t * ha)4968 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
4969 {
4970 if (ha->parent_tag != NULL) {
4971 bus_dma_tag_destroy(ha->parent_tag);
4972 ha->parent_tag = NULL;
4973 }
4974 return;
4975 }
4976
4977 static int
qlnx_alloc_tx_dma_tag(qlnx_host_t * ha)4978 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
4979 {
4980 if (bus_dma_tag_create(NULL, /* parent */
4981 1, 0, /* alignment, bounds */
4982 BUS_SPACE_MAXADDR, /* lowaddr */
4983 BUS_SPACE_MAXADDR, /* highaddr */
4984 NULL, NULL, /* filter, filterarg */
4985 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
4986 QLNX_MAX_SEGMENTS, /* nsegments */
4987 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */
4988 0, /* flags */
4989 NULL, /* lockfunc */
4990 NULL, /* lockfuncarg */
4991 &ha->tx_tag)) {
4992 QL_DPRINT1(ha, "tx_tag alloc failed\n");
4993 return (-1);
4994 }
4995
4996 return (0);
4997 }
4998
4999 static void
qlnx_free_tx_dma_tag(qlnx_host_t * ha)5000 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5001 {
5002 if (ha->tx_tag != NULL) {
5003 bus_dma_tag_destroy(ha->tx_tag);
5004 ha->tx_tag = NULL;
5005 }
5006 return;
5007 }
5008
5009 static int
qlnx_alloc_rx_dma_tag(qlnx_host_t * ha)5010 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5011 {
5012 if (bus_dma_tag_create(NULL, /* parent */
5013 1, 0, /* alignment, bounds */
5014 BUS_SPACE_MAXADDR, /* lowaddr */
5015 BUS_SPACE_MAXADDR, /* highaddr */
5016 NULL, NULL, /* filter, filterarg */
5017 MJUM9BYTES, /* maxsize */
5018 1, /* nsegments */
5019 MJUM9BYTES, /* maxsegsize */
5020 0, /* flags */
5021 NULL, /* lockfunc */
5022 NULL, /* lockfuncarg */
5023 &ha->rx_tag)) {
5024 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5025
5026 return (-1);
5027 }
5028 return (0);
5029 }
5030
5031 static void
qlnx_free_rx_dma_tag(qlnx_host_t * ha)5032 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5033 {
5034 if (ha->rx_tag != NULL) {
5035 bus_dma_tag_destroy(ha->rx_tag);
5036 ha->rx_tag = NULL;
5037 }
5038 return;
5039 }
5040
5041 /*********************************
5042 * Exported functions
5043 *********************************/
5044 uint32_t
qlnx_pci_bus_get_bar_size(void * ecore_dev,uint8_t bar_id)5045 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5046 {
5047 uint32_t bar_size;
5048
5049 bar_id = bar_id * 2;
5050
5051 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5052 SYS_RES_MEMORY,
5053 PCIR_BAR(bar_id));
5054
5055 return (bar_size);
5056 }
5057
5058 uint32_t
qlnx_pci_read_config_byte(void * ecore_dev,uint32_t pci_reg,uint8_t * reg_value)5059 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5060 {
5061 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5062 pci_reg, 1);
5063 return 0;
5064 }
5065
5066 uint32_t
qlnx_pci_read_config_word(void * ecore_dev,uint32_t pci_reg,uint16_t * reg_value)5067 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5068 uint16_t *reg_value)
5069 {
5070 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5071 pci_reg, 2);
5072 return 0;
5073 }
5074
5075 uint32_t
qlnx_pci_read_config_dword(void * ecore_dev,uint32_t pci_reg,uint32_t * reg_value)5076 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5077 uint32_t *reg_value)
5078 {
5079 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5080 pci_reg, 4);
5081 return 0;
5082 }
5083
5084 void
qlnx_pci_write_config_byte(void * ecore_dev,uint32_t pci_reg,uint8_t reg_value)5085 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5086 {
5087 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5088 pci_reg, reg_value, 1);
5089 return;
5090 }
5091
5092 void
qlnx_pci_write_config_word(void * ecore_dev,uint32_t pci_reg,uint16_t reg_value)5093 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5094 uint16_t reg_value)
5095 {
5096 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5097 pci_reg, reg_value, 2);
5098 return;
5099 }
5100
5101 void
qlnx_pci_write_config_dword(void * ecore_dev,uint32_t pci_reg,uint32_t reg_value)5102 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5103 uint32_t reg_value)
5104 {
5105 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5106 pci_reg, reg_value, 4);
5107 return;
5108 }
5109
5110 int
qlnx_pci_find_capability(void * ecore_dev,int cap)5111 qlnx_pci_find_capability(void *ecore_dev, int cap)
5112 {
5113 int reg;
5114 qlnx_host_t *ha;
5115
5116 ha = ecore_dev;
5117
5118 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
5119 return reg;
5120 else {
5121 QL_DPRINT1(ha, "failed\n");
5122 return 0;
5123 }
5124 }
5125
5126 int
qlnx_pci_find_ext_capability(void * ecore_dev,int ext_cap)5127 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5128 {
5129 int reg;
5130 qlnx_host_t *ha;
5131
5132 ha = ecore_dev;
5133
5134 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0)
5135 return reg;
5136 else {
5137 QL_DPRINT1(ha, "failed\n");
5138 return 0;
5139 }
5140 }
5141
5142 uint32_t
qlnx_reg_rd32(void * hwfn,uint32_t reg_addr)5143 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5144 {
5145 uint32_t data32;
5146 struct ecore_hwfn *p_hwfn;
5147
5148 p_hwfn = hwfn;
5149
5150 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5151 (bus_size_t)(p_hwfn->reg_offset + reg_addr));
5152
5153 return (data32);
5154 }
5155
5156 void
qlnx_reg_wr32(void * hwfn,uint32_t reg_addr,uint32_t value)5157 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5158 {
5159 struct ecore_hwfn *p_hwfn = hwfn;
5160
5161 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5162 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5163
5164 return;
5165 }
5166
5167 void
qlnx_reg_wr16(void * hwfn,uint32_t reg_addr,uint16_t value)5168 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5169 {
5170 struct ecore_hwfn *p_hwfn = hwfn;
5171
5172 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5173 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5174 return;
5175 }
5176
5177 void
qlnx_dbell_wr32_db(void * hwfn,void * reg_addr,uint32_t value)5178 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5179 {
5180 struct ecore_dev *cdev;
5181 struct ecore_hwfn *p_hwfn;
5182 uint32_t offset;
5183
5184 p_hwfn = hwfn;
5185
5186 cdev = p_hwfn->p_dev;
5187
5188 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5189 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5190
5191 return;
5192 }
5193
5194 void
qlnx_dbell_wr32(void * hwfn,uint32_t reg_addr,uint32_t value)5195 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5196 {
5197 struct ecore_hwfn *p_hwfn = hwfn;
5198
5199 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5200 (bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5201
5202 return;
5203 }
5204
5205 uint32_t
qlnx_direct_reg_rd32(void * p_hwfn,uint32_t * reg_addr)5206 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5207 {
5208 uint32_t data32;
5209 bus_size_t offset;
5210 struct ecore_dev *cdev;
5211
5212 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5213 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5214
5215 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5216
5217 return (data32);
5218 }
5219
5220 void
qlnx_direct_reg_wr32(void * p_hwfn,void * reg_addr,uint32_t value)5221 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5222 {
5223 bus_size_t offset;
5224 struct ecore_dev *cdev;
5225
5226 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5227 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5228
5229 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5230
5231 return;
5232 }
5233
5234 void
qlnx_direct_reg_wr64(void * p_hwfn,void * reg_addr,uint64_t value)5235 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5236 {
5237 bus_size_t offset;
5238 struct ecore_dev *cdev;
5239
5240 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5241 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5242
5243 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5244 return;
5245 }
5246
5247 void *
qlnx_zalloc(uint32_t size)5248 qlnx_zalloc(uint32_t size)
5249 {
5250 caddr_t va;
5251
5252 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5253 bzero(va, size);
5254 return ((void *)va);
5255 }
5256
5257 void
qlnx_barrier(void * p_dev)5258 qlnx_barrier(void *p_dev)
5259 {
5260 qlnx_host_t *ha;
5261
5262 ha = ((struct ecore_dev *) p_dev)->ha;
5263 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5264 }
5265
5266 void
qlnx_link_update(void * p_hwfn)5267 qlnx_link_update(void *p_hwfn)
5268 {
5269 qlnx_host_t *ha;
5270 int prev_link_state;
5271
5272 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5273
5274 qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5275
5276 prev_link_state = ha->link_up;
5277 ha->link_up = ha->if_link.link_up;
5278
5279 if (prev_link_state != ha->link_up) {
5280 if (ha->link_up) {
5281 if_link_state_change(ha->ifp, LINK_STATE_UP);
5282 } else {
5283 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5284 }
5285 }
5286 #ifndef QLNX_VF
5287 #ifdef CONFIG_ECORE_SRIOV
5288
5289 if (qlnx_vf_device(ha) != 0) {
5290 if (ha->sriov_initialized)
5291 qlnx_inform_vf_link_state(p_hwfn, ha);
5292 }
5293
5294 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5295 #endif /* #ifdef QLNX_VF */
5296
5297 return;
5298 }
5299
5300 static void
__qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn * p_hwfn,struct ecore_vf_acquire_sw_info * p_sw_info)5301 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5302 struct ecore_vf_acquire_sw_info *p_sw_info)
5303 {
5304 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5305 (QLNX_VERSION_MINOR << 16) |
5306 QLNX_VERSION_BUILD;
5307 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5308
5309 return;
5310 }
5311
5312 void
qlnx_osal_vf_fill_acquire_resc_req(void * p_hwfn,void * p_resc_req,void * p_sw_info)5313 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5314 void *p_sw_info)
5315 {
5316 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5317
5318 return;
5319 }
5320
5321 void
qlnx_fill_link(qlnx_host_t * ha,struct ecore_hwfn * hwfn,struct qlnx_link_output * if_link)5322 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5323 struct qlnx_link_output *if_link)
5324 {
5325 struct ecore_mcp_link_params link_params;
5326 struct ecore_mcp_link_state link_state;
5327 uint8_t p_change;
5328 struct ecore_ptt *p_ptt = NULL;
5329
5330 memset(if_link, 0, sizeof(*if_link));
5331 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5332 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5333
5334 ha = (qlnx_host_t *)hwfn->p_dev;
5335
5336 /* Prepare source inputs */
5337 /* we only deal with physical functions */
5338 if (qlnx_vf_device(ha) != 0) {
5339 p_ptt = ecore_ptt_acquire(hwfn);
5340
5341 if (p_ptt == NULL) {
5342 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5343 return;
5344 }
5345
5346 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5347 ecore_ptt_release(hwfn, p_ptt);
5348
5349 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5350 sizeof(link_params));
5351 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5352 sizeof(link_state));
5353 } else {
5354 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5355 ecore_vf_read_bulletin(hwfn, &p_change);
5356 ecore_vf_get_link_params(hwfn, &link_params);
5357 ecore_vf_get_link_state(hwfn, &link_state);
5358 }
5359
5360 /* Set the link parameters to pass to protocol driver */
5361 if (link_state.link_up) {
5362 if_link->link_up = true;
5363 if_link->speed = link_state.speed;
5364 }
5365
5366 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5367
5368 if (link_params.speed.autoneg)
5369 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5370
5371 if (link_params.pause.autoneg ||
5372 (link_params.pause.forced_rx && link_params.pause.forced_tx))
5373 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5374
5375 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5376 link_params.pause.forced_tx)
5377 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5378
5379 if (link_params.speed.advertised_speeds &
5380 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5381 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5382 QLNX_LINK_CAP_1000baseT_Full;
5383
5384 if (link_params.speed.advertised_speeds &
5385 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5386 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5387
5388 if (link_params.speed.advertised_speeds &
5389 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5390 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5391
5392 if (link_params.speed.advertised_speeds &
5393 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5394 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5395
5396 if (link_params.speed.advertised_speeds &
5397 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5398 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5399
5400 if (link_params.speed.advertised_speeds &
5401 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5402 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5403
5404 if_link->advertised_caps = if_link->supported_caps;
5405
5406 if_link->autoneg = link_params.speed.autoneg;
5407 if_link->duplex = QLNX_LINK_DUPLEX;
5408
5409 /* Link partner capabilities */
5410
5411 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5412 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5413
5414 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5415 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5416
5417 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5418 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5419
5420 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5421 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5422
5423 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5424 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5425
5426 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5427 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5428
5429 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5430 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5431
5432 if (link_state.an_complete)
5433 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5434
5435 if (link_state.partner_adv_pause)
5436 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5437
5438 if ((link_state.partner_adv_pause ==
5439 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5440 (link_state.partner_adv_pause ==
5441 ECORE_LINK_PARTNER_BOTH_PAUSE))
5442 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5443
5444 return;
5445 }
5446
5447 void
qlnx_schedule_recovery(void * p_hwfn)5448 qlnx_schedule_recovery(void *p_hwfn)
5449 {
5450 qlnx_host_t *ha;
5451
5452 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5453
5454 if (qlnx_vf_device(ha) != 0) {
5455 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5456 }
5457
5458 return;
5459 }
5460
5461 static int
qlnx_nic_setup(struct ecore_dev * cdev,struct ecore_pf_params * func_params)5462 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5463 {
5464 int rc, i;
5465
5466 for (i = 0; i < cdev->num_hwfns; i++) {
5467 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5468 p_hwfn->pf_params = *func_params;
5469
5470 #ifdef QLNX_ENABLE_IWARP
5471 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5472 p_hwfn->using_ll2 = true;
5473 }
5474 #endif /* #ifdef QLNX_ENABLE_IWARP */
5475 }
5476
5477 rc = ecore_resc_alloc(cdev);
5478 if (rc)
5479 goto qlnx_nic_setup_exit;
5480
5481 ecore_resc_setup(cdev);
5482
5483 qlnx_nic_setup_exit:
5484
5485 return rc;
5486 }
5487
5488 static int
qlnx_nic_start(struct ecore_dev * cdev)5489 qlnx_nic_start(struct ecore_dev *cdev)
5490 {
5491 int rc;
5492 struct ecore_hw_init_params params;
5493
5494 bzero(¶ms, sizeof (struct ecore_hw_init_params));
5495
5496 params.p_tunn = NULL;
5497 params.b_hw_start = true;
5498 params.int_mode = cdev->int_mode;
5499 params.allow_npar_tx_switch = true;
5500 params.bin_fw_data = NULL;
5501
5502 rc = ecore_hw_init(cdev, ¶ms);
5503 if (rc) {
5504 ecore_resc_free(cdev);
5505 return rc;
5506 }
5507
5508 return 0;
5509 }
5510
5511 static int
qlnx_slowpath_start(qlnx_host_t * ha)5512 qlnx_slowpath_start(qlnx_host_t *ha)
5513 {
5514 struct ecore_dev *cdev;
5515 struct ecore_pf_params pf_params;
5516 int rc;
5517
5518 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5519 pf_params.eth_pf_params.num_cons =
5520 (ha->num_rss) * (ha->num_tc + 1);
5521
5522 #ifdef QLNX_ENABLE_IWARP
5523 if (qlnx_vf_device(ha) != 0) {
5524 if(ha->personality == ECORE_PCI_ETH_IWARP) {
5525 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5526 pf_params.rdma_pf_params.num_qps = 1024;
5527 pf_params.rdma_pf_params.num_srqs = 1024;
5528 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5529 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5530 } else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5531 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5532 pf_params.rdma_pf_params.num_qps = 8192;
5533 pf_params.rdma_pf_params.num_srqs = 8192;
5534 //pf_params.rdma_pf_params.min_dpis = 0;
5535 pf_params.rdma_pf_params.min_dpis = 8;
5536 pf_params.rdma_pf_params.roce_edpm_mode = 0;
5537 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5538 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5539 }
5540 }
5541 #endif /* #ifdef QLNX_ENABLE_IWARP */
5542
5543 cdev = &ha->cdev;
5544
5545 rc = qlnx_nic_setup(cdev, &pf_params);
5546 if (rc)
5547 goto qlnx_slowpath_start_exit;
5548
5549 cdev->int_mode = ECORE_INT_MODE_MSIX;
5550 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5551
5552 #ifdef QLNX_MAX_COALESCE
5553 cdev->rx_coalesce_usecs = 255;
5554 cdev->tx_coalesce_usecs = 255;
5555 #endif
5556
5557 rc = qlnx_nic_start(cdev);
5558
5559 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5560 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5561
5562 #ifdef QLNX_USER_LLDP
5563 (void)qlnx_set_lldp_tlvx(ha, NULL);
5564 #endif /* #ifdef QLNX_USER_LLDP */
5565
5566 qlnx_slowpath_start_exit:
5567
5568 return (rc);
5569 }
5570
5571 static int
qlnx_slowpath_stop(qlnx_host_t * ha)5572 qlnx_slowpath_stop(qlnx_host_t *ha)
5573 {
5574 struct ecore_dev *cdev;
5575 device_t dev = ha->pci_dev;
5576 int i;
5577
5578 cdev = &ha->cdev;
5579
5580 ecore_hw_stop(cdev);
5581
5582 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5583 if (ha->sp_handle[i])
5584 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5585 ha->sp_handle[i]);
5586
5587 ha->sp_handle[i] = NULL;
5588
5589 if (ha->sp_irq[i])
5590 (void) bus_release_resource(dev, SYS_RES_IRQ,
5591 ha->sp_irq_rid[i], ha->sp_irq[i]);
5592 ha->sp_irq[i] = NULL;
5593 }
5594
5595 ecore_resc_free(cdev);
5596
5597 return 0;
5598 }
5599
5600 static void
qlnx_set_id(struct ecore_dev * cdev,char name[NAME_SIZE],char ver_str[VER_SIZE])5601 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5602 char ver_str[VER_SIZE])
5603 {
5604 int i;
5605
5606 memcpy(cdev->name, name, NAME_SIZE);
5607
5608 for_each_hwfn(cdev, i) {
5609 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5610 }
5611
5612 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5613
5614 return ;
5615 }
5616
5617 void
qlnx_get_protocol_stats(void * cdev,int proto_type,void * proto_stats)5618 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5619 {
5620 enum ecore_mcp_protocol_type type;
5621 union ecore_mcp_protocol_stats *stats;
5622 struct ecore_eth_stats eth_stats;
5623 qlnx_host_t *ha;
5624
5625 ha = cdev;
5626 stats = proto_stats;
5627 type = proto_type;
5628
5629 switch (type) {
5630 case ECORE_MCP_LAN_STATS:
5631 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5632 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5633 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5634 stats->lan_stats.fcs_err = -1;
5635 break;
5636
5637 default:
5638 ha->err_get_proto_invalid_type++;
5639
5640 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5641 break;
5642 }
5643 return;
5644 }
5645
5646 static int
qlnx_get_mfw_version(qlnx_host_t * ha,uint32_t * mfw_ver)5647 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5648 {
5649 struct ecore_hwfn *p_hwfn;
5650 struct ecore_ptt *p_ptt;
5651
5652 p_hwfn = &ha->cdev.hwfns[0];
5653 p_ptt = ecore_ptt_acquire(p_hwfn);
5654
5655 if (p_ptt == NULL) {
5656 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5657 return (-1);
5658 }
5659 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5660
5661 ecore_ptt_release(p_hwfn, p_ptt);
5662
5663 return (0);
5664 }
5665
5666 static int
qlnx_get_flash_size(qlnx_host_t * ha,uint32_t * flash_size)5667 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5668 {
5669 struct ecore_hwfn *p_hwfn;
5670 struct ecore_ptt *p_ptt;
5671
5672 p_hwfn = &ha->cdev.hwfns[0];
5673 p_ptt = ecore_ptt_acquire(p_hwfn);
5674
5675 if (p_ptt == NULL) {
5676 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5677 return (-1);
5678 }
5679 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5680
5681 ecore_ptt_release(p_hwfn, p_ptt);
5682
5683 return (0);
5684 }
5685
5686 static int
qlnx_alloc_mem_arrays(qlnx_host_t * ha)5687 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5688 {
5689 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5690 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5691 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5692
5693 return 0;
5694 }
5695
5696 static void
qlnx_init_fp(qlnx_host_t * ha)5697 qlnx_init_fp(qlnx_host_t *ha)
5698 {
5699 int rss_id, txq_array_index, tc;
5700
5701 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5702 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5703
5704 fp->rss_id = rss_id;
5705 fp->edev = ha;
5706 fp->sb_info = &ha->sb_array[rss_id];
5707 fp->rxq = &ha->rxq_array[rss_id];
5708 fp->rxq->rxq_id = rss_id;
5709
5710 for (tc = 0; tc < ha->num_tc; tc++) {
5711 txq_array_index = tc * ha->num_rss + rss_id;
5712 fp->txq[tc] = &ha->txq_array[txq_array_index];
5713 fp->txq[tc]->index = txq_array_index;
5714 }
5715
5716 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5717 rss_id);
5718
5719 fp->tx_ring_full = 0;
5720
5721 /* reset all the statistics counters */
5722
5723 fp->tx_pkts_processed = 0;
5724 fp->tx_pkts_freed = 0;
5725 fp->tx_pkts_transmitted = 0;
5726 fp->tx_pkts_completed = 0;
5727
5728 #ifdef QLNX_TRACE_PERF_DATA
5729 fp->tx_pkts_trans_ctx = 0;
5730 fp->tx_pkts_compl_ctx = 0;
5731 fp->tx_pkts_trans_fp = 0;
5732 fp->tx_pkts_compl_fp = 0;
5733 fp->tx_pkts_compl_intr = 0;
5734 #endif
5735 fp->tx_lso_wnd_min_len = 0;
5736 fp->tx_defrag = 0;
5737 fp->tx_nsegs_gt_elem_left = 0;
5738 fp->tx_tso_max_nsegs = 0;
5739 fp->tx_tso_min_nsegs = 0;
5740 fp->err_tx_nsegs_gt_elem_left = 0;
5741 fp->err_tx_dmamap_create = 0;
5742 fp->err_tx_defrag_dmamap_load = 0;
5743 fp->err_tx_non_tso_max_seg = 0;
5744 fp->err_tx_dmamap_load = 0;
5745 fp->err_tx_defrag = 0;
5746 fp->err_tx_free_pkt_null = 0;
5747 fp->err_tx_cons_idx_conflict = 0;
5748
5749 fp->rx_pkts = 0;
5750 fp->err_m_getcl = 0;
5751 fp->err_m_getjcl = 0;
5752 }
5753 return;
5754 }
5755
5756 void
qlnx_free_mem_sb(qlnx_host_t * ha,struct ecore_sb_info * sb_info)5757 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5758 {
5759 struct ecore_dev *cdev;
5760
5761 cdev = &ha->cdev;
5762
5763 if (sb_info->sb_virt) {
5764 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5765 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5766 sb_info->sb_virt = NULL;
5767 }
5768 }
5769
5770 static int
qlnx_sb_init(struct ecore_dev * cdev,struct ecore_sb_info * sb_info,void * sb_virt_addr,bus_addr_t sb_phy_addr,u16 sb_id)5771 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5772 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5773 {
5774 struct ecore_hwfn *p_hwfn;
5775 int hwfn_index, rc;
5776 u16 rel_sb_id;
5777
5778 hwfn_index = sb_id % cdev->num_hwfns;
5779 p_hwfn = &cdev->hwfns[hwfn_index];
5780 rel_sb_id = sb_id / cdev->num_hwfns;
5781
5782 QL_DPRINT2(((qlnx_host_t *)cdev),
5783 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5784 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5785 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5786 sb_virt_addr, (void *)sb_phy_addr);
5787
5788 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5789 sb_virt_addr, sb_phy_addr, rel_sb_id);
5790
5791 return rc;
5792 }
5793
5794 /* This function allocates fast-path status block memory */
5795 int
qlnx_alloc_mem_sb(qlnx_host_t * ha,struct ecore_sb_info * sb_info,u16 sb_id)5796 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5797 {
5798 struct status_block_e4 *sb_virt;
5799 bus_addr_t sb_phys;
5800 int rc;
5801 uint32_t size;
5802 struct ecore_dev *cdev;
5803
5804 cdev = &ha->cdev;
5805
5806 size = sizeof(*sb_virt);
5807 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5808
5809 if (!sb_virt) {
5810 QL_DPRINT1(ha, "Status block allocation failed\n");
5811 return -ENOMEM;
5812 }
5813
5814 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5815 if (rc) {
5816 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5817 }
5818
5819 return rc;
5820 }
5821
5822 static void
qlnx_free_rx_buffers(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5823 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5824 {
5825 int i;
5826 struct sw_rx_data *rx_buf;
5827
5828 for (i = 0; i < rxq->num_rx_buffers; i++) {
5829 rx_buf = &rxq->sw_rx_ring[i];
5830
5831 if (rx_buf->data != NULL) {
5832 if (rx_buf->map != NULL) {
5833 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5834 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5835 rx_buf->map = NULL;
5836 }
5837 m_freem(rx_buf->data);
5838 rx_buf->data = NULL;
5839 }
5840 }
5841 return;
5842 }
5843
5844 static void
qlnx_free_mem_rxq(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5845 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5846 {
5847 struct ecore_dev *cdev;
5848 int i;
5849
5850 cdev = &ha->cdev;
5851
5852 qlnx_free_rx_buffers(ha, rxq);
5853
5854 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5855 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5856 if (rxq->tpa_info[i].mpf != NULL)
5857 m_freem(rxq->tpa_info[i].mpf);
5858 }
5859
5860 bzero((void *)&rxq->sw_rx_ring[0],
5861 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5862
5863 /* Free the real RQ ring used by FW */
5864 if (rxq->rx_bd_ring.p_virt_addr) {
5865 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5866 rxq->rx_bd_ring.p_virt_addr = NULL;
5867 }
5868
5869 /* Free the real completion ring used by FW */
5870 if (rxq->rx_comp_ring.p_virt_addr &&
5871 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5872 ecore_chain_free(cdev, &rxq->rx_comp_ring);
5873 rxq->rx_comp_ring.p_virt_addr = NULL;
5874 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
5875 }
5876
5877 #ifdef QLNX_SOFT_LRO
5878 {
5879 struct lro_ctrl *lro;
5880
5881 lro = &rxq->lro;
5882 tcp_lro_free(lro);
5883 }
5884 #endif /* #ifdef QLNX_SOFT_LRO */
5885
5886 return;
5887 }
5888
5889 static int
qlnx_alloc_rx_buffer(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)5890 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5891 {
5892 register struct mbuf *mp;
5893 uint16_t rx_buf_size;
5894 struct sw_rx_data *sw_rx_data;
5895 struct eth_rx_bd *rx_bd;
5896 dma_addr_t dma_addr;
5897 bus_dmamap_t map;
5898 bus_dma_segment_t segs[1];
5899 int nsegs;
5900 int ret;
5901
5902 rx_buf_size = rxq->rx_buf_size;
5903
5904 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5905
5906 if (mp == NULL) {
5907 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5908 return -ENOMEM;
5909 }
5910
5911 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5912
5913 map = (bus_dmamap_t)0;
5914
5915 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5916 BUS_DMA_NOWAIT);
5917 dma_addr = segs[0].ds_addr;
5918
5919 if (ret || !dma_addr || (nsegs != 1)) {
5920 m_freem(mp);
5921 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5922 ret, (long long unsigned int)dma_addr, nsegs);
5923 return -ENOMEM;
5924 }
5925
5926 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
5927 sw_rx_data->data = mp;
5928 sw_rx_data->dma_addr = dma_addr;
5929 sw_rx_data->map = map;
5930
5931 /* Advance PROD and get BD pointer */
5932 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
5933 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
5934 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
5935 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5936
5937 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5938
5939 return 0;
5940 }
5941
5942 static int
qlnx_alloc_tpa_mbuf(qlnx_host_t * ha,uint16_t rx_buf_size,struct qlnx_agg_info * tpa)5943 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
5944 struct qlnx_agg_info *tpa)
5945 {
5946 struct mbuf *mp;
5947 dma_addr_t dma_addr;
5948 bus_dmamap_t map;
5949 bus_dma_segment_t segs[1];
5950 int nsegs;
5951 int ret;
5952 struct sw_rx_data *rx_buf;
5953
5954 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5955
5956 if (mp == NULL) {
5957 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5958 return -ENOMEM;
5959 }
5960
5961 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5962
5963 map = (bus_dmamap_t)0;
5964
5965 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5966 BUS_DMA_NOWAIT);
5967 dma_addr = segs[0].ds_addr;
5968
5969 if (ret || !dma_addr || (nsegs != 1)) {
5970 m_freem(mp);
5971 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5972 ret, (long long unsigned int)dma_addr, nsegs);
5973 return -ENOMEM;
5974 }
5975
5976 rx_buf = &tpa->rx_buf;
5977
5978 memset(rx_buf, 0, sizeof (struct sw_rx_data));
5979
5980 rx_buf->data = mp;
5981 rx_buf->dma_addr = dma_addr;
5982 rx_buf->map = map;
5983
5984 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5985
5986 return (0);
5987 }
5988
5989 static void
qlnx_free_tpa_mbuf(qlnx_host_t * ha,struct qlnx_agg_info * tpa)5990 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
5991 {
5992 struct sw_rx_data *rx_buf;
5993
5994 rx_buf = &tpa->rx_buf;
5995
5996 if (rx_buf->data != NULL) {
5997 if (rx_buf->map != NULL) {
5998 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5999 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6000 rx_buf->map = NULL;
6001 }
6002 m_freem(rx_buf->data);
6003 rx_buf->data = NULL;
6004 }
6005 return;
6006 }
6007
6008 /* This function allocates all memory needed per Rx queue */
6009 static int
qlnx_alloc_mem_rxq(qlnx_host_t * ha,struct qlnx_rx_queue * rxq)6010 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6011 {
6012 int i, rc, num_allocated;
6013 struct ecore_dev *cdev;
6014
6015 cdev = &ha->cdev;
6016
6017 rxq->num_rx_buffers = RX_RING_SIZE;
6018
6019 rxq->rx_buf_size = ha->rx_buf_size;
6020
6021 /* Allocate the parallel driver ring for Rx buffers */
6022 bzero((void *)&rxq->sw_rx_ring[0],
6023 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6024
6025 /* Allocate FW Rx ring */
6026
6027 rc = ecore_chain_alloc(cdev,
6028 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6029 ECORE_CHAIN_MODE_NEXT_PTR,
6030 ECORE_CHAIN_CNT_TYPE_U16,
6031 RX_RING_SIZE,
6032 sizeof(struct eth_rx_bd),
6033 &rxq->rx_bd_ring, NULL);
6034
6035 if (rc)
6036 goto err;
6037
6038 /* Allocate FW completion ring */
6039 rc = ecore_chain_alloc(cdev,
6040 ECORE_CHAIN_USE_TO_CONSUME,
6041 ECORE_CHAIN_MODE_PBL,
6042 ECORE_CHAIN_CNT_TYPE_U16,
6043 RX_RING_SIZE,
6044 sizeof(union eth_rx_cqe),
6045 &rxq->rx_comp_ring, NULL);
6046
6047 if (rc)
6048 goto err;
6049
6050 /* Allocate buffers for the Rx ring */
6051
6052 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6053 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6054 &rxq->tpa_info[i]);
6055 if (rc)
6056 break;
6057 }
6058
6059 for (i = 0; i < rxq->num_rx_buffers; i++) {
6060 rc = qlnx_alloc_rx_buffer(ha, rxq);
6061 if (rc)
6062 break;
6063 }
6064 num_allocated = i;
6065 if (!num_allocated) {
6066 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6067 goto err;
6068 } else if (num_allocated < rxq->num_rx_buffers) {
6069 QL_DPRINT1(ha, "Allocated less buffers than"
6070 " desired (%d allocated)\n", num_allocated);
6071 }
6072
6073 #ifdef QLNX_SOFT_LRO
6074
6075 {
6076 struct lro_ctrl *lro;
6077
6078 lro = &rxq->lro;
6079
6080 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6081 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6082 rxq->rxq_id);
6083 goto err;
6084 }
6085
6086 lro->ifp = ha->ifp;
6087 }
6088 #endif /* #ifdef QLNX_SOFT_LRO */
6089 return 0;
6090
6091 err:
6092 qlnx_free_mem_rxq(ha, rxq);
6093 return -ENOMEM;
6094 }
6095
6096 static void
qlnx_free_mem_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6097 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6098 struct qlnx_tx_queue *txq)
6099 {
6100 struct ecore_dev *cdev;
6101
6102 cdev = &ha->cdev;
6103
6104 bzero((void *)&txq->sw_tx_ring[0],
6105 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6106
6107 /* Free the real RQ ring used by FW */
6108 if (txq->tx_pbl.p_virt_addr) {
6109 ecore_chain_free(cdev, &txq->tx_pbl);
6110 txq->tx_pbl.p_virt_addr = NULL;
6111 }
6112 return;
6113 }
6114
6115 /* This function allocates all memory needed per Tx queue */
6116 static int
qlnx_alloc_mem_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6117 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6118 struct qlnx_tx_queue *txq)
6119 {
6120 int ret = ECORE_SUCCESS;
6121 union eth_tx_bd_types *p_virt;
6122 struct ecore_dev *cdev;
6123
6124 cdev = &ha->cdev;
6125
6126 bzero((void *)&txq->sw_tx_ring[0],
6127 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6128
6129 /* Allocate the real Tx ring to be used by FW */
6130 ret = ecore_chain_alloc(cdev,
6131 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6132 ECORE_CHAIN_MODE_PBL,
6133 ECORE_CHAIN_CNT_TYPE_U16,
6134 TX_RING_SIZE,
6135 sizeof(*p_virt),
6136 &txq->tx_pbl, NULL);
6137
6138 if (ret != ECORE_SUCCESS) {
6139 goto err;
6140 }
6141
6142 txq->num_tx_buffers = TX_RING_SIZE;
6143
6144 return 0;
6145
6146 err:
6147 qlnx_free_mem_txq(ha, fp, txq);
6148 return -ENOMEM;
6149 }
6150
6151 static void
qlnx_free_tx_br(qlnx_host_t * ha,struct qlnx_fastpath * fp)6152 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6153 {
6154 struct mbuf *mp;
6155 if_t ifp = ha->ifp;
6156
6157 if (mtx_initialized(&fp->tx_mtx)) {
6158 if (fp->tx_br != NULL) {
6159 mtx_lock(&fp->tx_mtx);
6160
6161 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6162 fp->tx_pkts_freed++;
6163 m_freem(mp);
6164 }
6165
6166 mtx_unlock(&fp->tx_mtx);
6167
6168 buf_ring_free(fp->tx_br, M_DEVBUF);
6169 fp->tx_br = NULL;
6170 }
6171 mtx_destroy(&fp->tx_mtx);
6172 }
6173 return;
6174 }
6175
6176 static void
qlnx_free_mem_fp(qlnx_host_t * ha,struct qlnx_fastpath * fp)6177 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6178 {
6179 int tc;
6180
6181 qlnx_free_mem_sb(ha, fp->sb_info);
6182
6183 qlnx_free_mem_rxq(ha, fp->rxq);
6184
6185 for (tc = 0; tc < ha->num_tc; tc++)
6186 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6187
6188 return;
6189 }
6190
6191 static int
qlnx_alloc_tx_br(qlnx_host_t * ha,struct qlnx_fastpath * fp)6192 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6193 {
6194 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6195 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6196
6197 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6198
6199 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6200 M_NOWAIT, &fp->tx_mtx);
6201 if (fp->tx_br == NULL) {
6202 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6203 ha->dev_unit, fp->rss_id);
6204 return -ENOMEM;
6205 }
6206 return 0;
6207 }
6208
6209 static int
qlnx_alloc_mem_fp(qlnx_host_t * ha,struct qlnx_fastpath * fp)6210 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6211 {
6212 int rc, tc;
6213
6214 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6215 if (rc)
6216 goto err;
6217
6218 if (ha->rx_jumbo_buf_eq_mtu) {
6219 if (ha->max_frame_size <= MCLBYTES)
6220 ha->rx_buf_size = MCLBYTES;
6221 else if (ha->max_frame_size <= MJUMPAGESIZE)
6222 ha->rx_buf_size = MJUMPAGESIZE;
6223 else if (ha->max_frame_size <= MJUM9BYTES)
6224 ha->rx_buf_size = MJUM9BYTES;
6225 else if (ha->max_frame_size <= MJUM16BYTES)
6226 ha->rx_buf_size = MJUM16BYTES;
6227 } else {
6228 if (ha->max_frame_size <= MCLBYTES)
6229 ha->rx_buf_size = MCLBYTES;
6230 else
6231 ha->rx_buf_size = MJUMPAGESIZE;
6232 }
6233
6234 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6235 if (rc)
6236 goto err;
6237
6238 for (tc = 0; tc < ha->num_tc; tc++) {
6239 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6240 if (rc)
6241 goto err;
6242 }
6243
6244 return 0;
6245
6246 err:
6247 qlnx_free_mem_fp(ha, fp);
6248 return -ENOMEM;
6249 }
6250
6251 static void
qlnx_free_mem_load(qlnx_host_t * ha)6252 qlnx_free_mem_load(qlnx_host_t *ha)
6253 {
6254 int i;
6255
6256 for (i = 0; i < ha->num_rss; i++) {
6257 struct qlnx_fastpath *fp = &ha->fp_array[i];
6258
6259 qlnx_free_mem_fp(ha, fp);
6260 }
6261 return;
6262 }
6263
6264 static int
qlnx_alloc_mem_load(qlnx_host_t * ha)6265 qlnx_alloc_mem_load(qlnx_host_t *ha)
6266 {
6267 int rc = 0, rss_id;
6268
6269 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6270 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6271
6272 rc = qlnx_alloc_mem_fp(ha, fp);
6273 if (rc)
6274 break;
6275 }
6276 return (rc);
6277 }
6278
6279 static int
qlnx_start_vport(struct ecore_dev * cdev,u8 vport_id,u16 mtu,u8 drop_ttl0_flg,u8 inner_vlan_removal_en_flg,u8 tx_switching,u8 hw_lro_enable)6280 qlnx_start_vport(struct ecore_dev *cdev,
6281 u8 vport_id,
6282 u16 mtu,
6283 u8 drop_ttl0_flg,
6284 u8 inner_vlan_removal_en_flg,
6285 u8 tx_switching,
6286 u8 hw_lro_enable)
6287 {
6288 int rc, i;
6289 struct ecore_sp_vport_start_params vport_start_params = { 0 };
6290 qlnx_host_t *ha __unused;
6291
6292 ha = (qlnx_host_t *)cdev;
6293
6294 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6295 vport_start_params.tx_switching = 0;
6296 vport_start_params.handle_ptp_pkts = 0;
6297 vport_start_params.only_untagged = 0;
6298 vport_start_params.drop_ttl0 = drop_ttl0_flg;
6299
6300 vport_start_params.tpa_mode =
6301 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6302 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6303
6304 vport_start_params.vport_id = vport_id;
6305 vport_start_params.mtu = mtu;
6306
6307 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6308
6309 for_each_hwfn(cdev, i) {
6310 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6311
6312 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6313 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6314
6315 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6316
6317 if (rc) {
6318 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6319 " with MTU %d\n" , vport_id, mtu);
6320 return -ENOMEM;
6321 }
6322
6323 ecore_hw_start_fastpath(p_hwfn);
6324
6325 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6326 vport_id, mtu);
6327 }
6328 return 0;
6329 }
6330
6331 static int
qlnx_update_vport(struct ecore_dev * cdev,struct qlnx_update_vport_params * params)6332 qlnx_update_vport(struct ecore_dev *cdev,
6333 struct qlnx_update_vport_params *params)
6334 {
6335 struct ecore_sp_vport_update_params sp_params;
6336 int rc, i, j, fp_index;
6337 struct ecore_hwfn *p_hwfn;
6338 struct ecore_rss_params *rss;
6339 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6340 struct qlnx_fastpath *fp;
6341
6342 memset(&sp_params, 0, sizeof(sp_params));
6343 /* Translate protocol params into sp params */
6344 sp_params.vport_id = params->vport_id;
6345
6346 sp_params.update_vport_active_rx_flg =
6347 params->update_vport_active_rx_flg;
6348 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6349
6350 sp_params.update_vport_active_tx_flg =
6351 params->update_vport_active_tx_flg;
6352 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6353
6354 sp_params.update_inner_vlan_removal_flg =
6355 params->update_inner_vlan_removal_flg;
6356 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6357
6358 sp_params.sge_tpa_params = params->sge_tpa_params;
6359
6360 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6361 * We need to re-fix the rss values per engine for CMT.
6362 */
6363 if (params->rss_params->update_rss_config)
6364 sp_params.rss_params = params->rss_params;
6365 else
6366 sp_params.rss_params = NULL;
6367
6368 for_each_hwfn(cdev, i) {
6369 p_hwfn = &cdev->hwfns[i];
6370
6371 if ((cdev->num_hwfns > 1) &&
6372 params->rss_params->update_rss_config &&
6373 params->rss_params->rss_enable) {
6374 rss = params->rss_params;
6375
6376 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6377 fp_index = ((cdev->num_hwfns * j) + i) %
6378 ha->num_rss;
6379
6380 fp = &ha->fp_array[fp_index];
6381 rss->rss_ind_table[j] = fp->rxq->handle;
6382 }
6383
6384 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6385 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6386 rss->rss_ind_table[j],
6387 rss->rss_ind_table[j+1],
6388 rss->rss_ind_table[j+2],
6389 rss->rss_ind_table[j+3],
6390 rss->rss_ind_table[j+4],
6391 rss->rss_ind_table[j+5],
6392 rss->rss_ind_table[j+6],
6393 rss->rss_ind_table[j+7]);
6394 j += 8;
6395 }
6396 }
6397
6398 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6399
6400 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6401
6402 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6403 ECORE_SPQ_MODE_EBLOCK, NULL);
6404 if (rc) {
6405 QL_DPRINT1(ha, "Failed to update VPORT\n");
6406 return rc;
6407 }
6408
6409 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6410 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6411 params->vport_id, params->vport_active_tx_flg,
6412 params->vport_active_rx_flg,
6413 params->update_vport_active_tx_flg,
6414 params->update_vport_active_rx_flg);
6415 }
6416
6417 return 0;
6418 }
6419
6420 static void
qlnx_reuse_rx_data(struct qlnx_rx_queue * rxq)6421 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6422 {
6423 struct eth_rx_bd *rx_bd_cons =
6424 ecore_chain_consume(&rxq->rx_bd_ring);
6425 struct eth_rx_bd *rx_bd_prod =
6426 ecore_chain_produce(&rxq->rx_bd_ring);
6427 struct sw_rx_data *sw_rx_data_cons =
6428 &rxq->sw_rx_ring[rxq->sw_rx_cons];
6429 struct sw_rx_data *sw_rx_data_prod =
6430 &rxq->sw_rx_ring[rxq->sw_rx_prod];
6431
6432 sw_rx_data_prod->data = sw_rx_data_cons->data;
6433 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6434
6435 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6436 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6437
6438 return;
6439 }
6440
6441 static void
qlnx_update_rx_prod(struct ecore_hwfn * p_hwfn,struct qlnx_rx_queue * rxq)6442 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6443 {
6444
6445 uint16_t bd_prod;
6446 uint16_t cqe_prod;
6447 union {
6448 struct eth_rx_prod_data rx_prod_data;
6449 uint32_t data32;
6450 } rx_prods;
6451
6452 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6453 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6454
6455 /* Update producers */
6456 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6457 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6458
6459 /* Make sure that the BD and SGE data is updated before updating the
6460 * producers since FW might read the BD/SGE right after the producer
6461 * is updated.
6462 */
6463 wmb();
6464
6465 #ifdef ECORE_CONFIG_DIRECT_HWFN
6466 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6467 sizeof(rx_prods), &rx_prods.data32);
6468 #else
6469 internal_ram_wr(rxq->hw_rxq_prod_addr,
6470 sizeof(rx_prods), &rx_prods.data32);
6471 #endif
6472
6473 /* mmiowb is needed to synchronize doorbell writes from more than one
6474 * processor. It guarantees that the write arrives to the device before
6475 * the napi lock is released and another qlnx_poll is called (possibly
6476 * on another CPU). Without this barrier, the next doorbell can bypass
6477 * this doorbell. This is applicable to IA64/Altix systems.
6478 */
6479 wmb();
6480
6481 return;
6482 }
6483
6484 static uint32_t qlnx_hash_key[] = {
6485 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6486 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6487 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6488 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6489 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6490 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6491 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6492 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6493 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6494 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6495
6496 static int
qlnx_start_queues(qlnx_host_t * ha)6497 qlnx_start_queues(qlnx_host_t *ha)
6498 {
6499 int rc, tc, i, vport_id = 0,
6500 drop_ttl0_flg = 1, vlan_removal_en = 1,
6501 tx_switching = 0, hw_lro_enable = 0;
6502 struct ecore_dev *cdev = &ha->cdev;
6503 struct ecore_rss_params *rss_params = &ha->rss_params;
6504 struct qlnx_update_vport_params vport_update_params;
6505 if_t ifp;
6506 struct ecore_hwfn *p_hwfn;
6507 struct ecore_sge_tpa_params tpa_params;
6508 struct ecore_queue_start_common_params qparams;
6509 struct qlnx_fastpath *fp;
6510
6511 ifp = ha->ifp;
6512
6513 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6514
6515 if (!ha->num_rss) {
6516 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6517 " are no Rx queues\n");
6518 return -EINVAL;
6519 }
6520
6521 #ifndef QLNX_SOFT_LRO
6522 hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO;
6523 #endif /* #ifndef QLNX_SOFT_LRO */
6524
6525 rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg,
6526 vlan_removal_en, tx_switching, hw_lro_enable);
6527
6528 if (rc) {
6529 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6530 return rc;
6531 }
6532
6533 QL_DPRINT2(ha, "Start vport ramrod passed, "
6534 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6535 vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en);
6536
6537 for_each_rss(i) {
6538 struct ecore_rxq_start_ret_params rx_ret_params;
6539 struct ecore_txq_start_ret_params tx_ret_params;
6540
6541 fp = &ha->fp_array[i];
6542 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6543
6544 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6545 bzero(&rx_ret_params,
6546 sizeof (struct ecore_rxq_start_ret_params));
6547
6548 qparams.queue_id = i ;
6549 qparams.vport_id = vport_id;
6550 qparams.stats_id = vport_id;
6551 qparams.p_sb = fp->sb_info;
6552 qparams.sb_idx = RX_PI;
6553
6554
6555 rc = ecore_eth_rx_queue_start(p_hwfn,
6556 p_hwfn->hw_info.opaque_fid,
6557 &qparams,
6558 fp->rxq->rx_buf_size, /* bd_max_bytes */
6559 /* bd_chain_phys_addr */
6560 fp->rxq->rx_bd_ring.p_phys_addr,
6561 /* cqe_pbl_addr */
6562 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6563 /* cqe_pbl_size */
6564 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6565 &rx_ret_params);
6566
6567 if (rc) {
6568 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6569 return rc;
6570 }
6571
6572 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6573 fp->rxq->handle = rx_ret_params.p_handle;
6574 fp->rxq->hw_cons_ptr =
6575 &fp->sb_info->sb_virt->pi_array[RX_PI];
6576
6577 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6578
6579 for (tc = 0; tc < ha->num_tc; tc++) {
6580 struct qlnx_tx_queue *txq = fp->txq[tc];
6581
6582 bzero(&qparams,
6583 sizeof(struct ecore_queue_start_common_params));
6584 bzero(&tx_ret_params,
6585 sizeof (struct ecore_txq_start_ret_params));
6586
6587 qparams.queue_id = txq->index / cdev->num_hwfns ;
6588 qparams.vport_id = vport_id;
6589 qparams.stats_id = vport_id;
6590 qparams.p_sb = fp->sb_info;
6591 qparams.sb_idx = TX_PI(tc);
6592
6593 rc = ecore_eth_tx_queue_start(p_hwfn,
6594 p_hwfn->hw_info.opaque_fid,
6595 &qparams, tc,
6596 /* bd_chain_phys_addr */
6597 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6598 ecore_chain_get_page_cnt(&txq->tx_pbl),
6599 &tx_ret_params);
6600
6601 if (rc) {
6602 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6603 txq->index, rc);
6604 return rc;
6605 }
6606
6607 txq->doorbell_addr = tx_ret_params.p_doorbell;
6608 txq->handle = tx_ret_params.p_handle;
6609
6610 txq->hw_cons_ptr =
6611 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6612 SET_FIELD(txq->tx_db.data.params,
6613 ETH_DB_DATA_DEST, DB_DEST_XCM);
6614 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6615 DB_AGG_CMD_SET);
6616 SET_FIELD(txq->tx_db.data.params,
6617 ETH_DB_DATA_AGG_VAL_SEL,
6618 DQ_XCM_ETH_TX_BD_PROD_CMD);
6619
6620 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6621 }
6622 }
6623
6624 /* Fill struct with RSS params */
6625 if (ha->num_rss > 1) {
6626 rss_params->update_rss_config = 1;
6627 rss_params->rss_enable = 1;
6628 rss_params->update_rss_capabilities = 1;
6629 rss_params->update_rss_ind_table = 1;
6630 rss_params->update_rss_key = 1;
6631 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6632 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6633 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6634
6635 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6636 fp = &ha->fp_array[(i % ha->num_rss)];
6637 rss_params->rss_ind_table[i] = fp->rxq->handle;
6638 }
6639
6640 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6641 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6642
6643 } else {
6644 memset(rss_params, 0, sizeof(*rss_params));
6645 }
6646
6647 /* Prepare and send the vport enable */
6648 memset(&vport_update_params, 0, sizeof(vport_update_params));
6649 vport_update_params.vport_id = vport_id;
6650 vport_update_params.update_vport_active_tx_flg = 1;
6651 vport_update_params.vport_active_tx_flg = 1;
6652 vport_update_params.update_vport_active_rx_flg = 1;
6653 vport_update_params.vport_active_rx_flg = 1;
6654 vport_update_params.rss_params = rss_params;
6655 vport_update_params.update_inner_vlan_removal_flg = 1;
6656 vport_update_params.inner_vlan_removal_flg = 1;
6657
6658 if (hw_lro_enable) {
6659 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6660
6661 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6662
6663 tpa_params.update_tpa_en_flg = 1;
6664 tpa_params.tpa_ipv4_en_flg = 1;
6665 tpa_params.tpa_ipv6_en_flg = 1;
6666
6667 tpa_params.update_tpa_param_flg = 1;
6668 tpa_params.tpa_pkt_split_flg = 0;
6669 tpa_params.tpa_hdr_data_split_flg = 0;
6670 tpa_params.tpa_gro_consistent_flg = 0;
6671 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6672 tpa_params.tpa_max_size = (uint16_t)(-1);
6673 tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2;
6674 tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2;
6675
6676 vport_update_params.sge_tpa_params = &tpa_params;
6677 }
6678
6679 rc = qlnx_update_vport(cdev, &vport_update_params);
6680 if (rc) {
6681 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6682 return rc;
6683 }
6684
6685 return 0;
6686 }
6687
6688 static int
qlnx_drain_txq(qlnx_host_t * ha,struct qlnx_fastpath * fp,struct qlnx_tx_queue * txq)6689 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6690 struct qlnx_tx_queue *txq)
6691 {
6692 uint16_t hw_bd_cons;
6693 uint16_t ecore_cons_idx;
6694
6695 QL_DPRINT2(ha, "enter\n");
6696
6697 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6698
6699 while (hw_bd_cons !=
6700 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6701 mtx_lock(&fp->tx_mtx);
6702
6703 (void)qlnx_tx_int(ha, fp, txq);
6704
6705 mtx_unlock(&fp->tx_mtx);
6706
6707 qlnx_mdelay(__func__, 2);
6708
6709 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6710 }
6711
6712 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6713
6714 return 0;
6715 }
6716
6717 static int
qlnx_stop_queues(qlnx_host_t * ha)6718 qlnx_stop_queues(qlnx_host_t *ha)
6719 {
6720 struct qlnx_update_vport_params vport_update_params;
6721 struct ecore_dev *cdev;
6722 struct qlnx_fastpath *fp;
6723 int rc, tc, i;
6724
6725 cdev = &ha->cdev;
6726
6727 /* Disable the vport */
6728
6729 memset(&vport_update_params, 0, sizeof(vport_update_params));
6730
6731 vport_update_params.vport_id = 0;
6732 vport_update_params.update_vport_active_tx_flg = 1;
6733 vport_update_params.vport_active_tx_flg = 0;
6734 vport_update_params.update_vport_active_rx_flg = 1;
6735 vport_update_params.vport_active_rx_flg = 0;
6736 vport_update_params.rss_params = &ha->rss_params;
6737 vport_update_params.rss_params->update_rss_config = 0;
6738 vport_update_params.rss_params->rss_enable = 0;
6739 vport_update_params.update_inner_vlan_removal_flg = 0;
6740 vport_update_params.inner_vlan_removal_flg = 0;
6741
6742 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6743
6744 rc = qlnx_update_vport(cdev, &vport_update_params);
6745 if (rc) {
6746 QL_DPRINT1(ha, "Failed to update vport\n");
6747 return rc;
6748 }
6749
6750 /* Flush Tx queues. If needed, request drain from MCP */
6751 for_each_rss(i) {
6752 fp = &ha->fp_array[i];
6753
6754 for (tc = 0; tc < ha->num_tc; tc++) {
6755 struct qlnx_tx_queue *txq = fp->txq[tc];
6756
6757 rc = qlnx_drain_txq(ha, fp, txq);
6758 if (rc)
6759 return rc;
6760 }
6761 }
6762
6763 /* Stop all Queues in reverse order*/
6764 for (i = ha->num_rss - 1; i >= 0; i--) {
6765 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6766
6767 fp = &ha->fp_array[i];
6768
6769 /* Stop the Tx Queue(s)*/
6770 for (tc = 0; tc < ha->num_tc; tc++) {
6771 int tx_queue_id __unused;
6772
6773 tx_queue_id = tc * ha->num_rss + i;
6774 rc = ecore_eth_tx_queue_stop(p_hwfn,
6775 fp->txq[tc]->handle);
6776
6777 if (rc) {
6778 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6779 tx_queue_id);
6780 return rc;
6781 }
6782 }
6783
6784 /* Stop the Rx Queue*/
6785 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6786 false);
6787 if (rc) {
6788 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6789 return rc;
6790 }
6791 }
6792
6793 /* Stop the vport */
6794 for_each_hwfn(cdev, i) {
6795 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6796
6797 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6798
6799 if (rc) {
6800 QL_DPRINT1(ha, "Failed to stop VPORT\n");
6801 return rc;
6802 }
6803 }
6804
6805 return rc;
6806 }
6807
6808 static int
qlnx_set_ucast_rx_mac(qlnx_host_t * ha,enum ecore_filter_opcode opcode,unsigned char mac[ETH_ALEN])6809 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6810 enum ecore_filter_opcode opcode,
6811 unsigned char mac[ETH_ALEN])
6812 {
6813 struct ecore_filter_ucast ucast;
6814 struct ecore_dev *cdev;
6815 int rc;
6816
6817 cdev = &ha->cdev;
6818
6819 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6820
6821 ucast.opcode = opcode;
6822 ucast.type = ECORE_FILTER_MAC;
6823 ucast.is_rx_filter = 1;
6824 ucast.vport_to_add_to = 0;
6825 memcpy(&ucast.mac[0], mac, ETH_ALEN);
6826
6827 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6828
6829 return (rc);
6830 }
6831
6832 static int
qlnx_remove_all_ucast_mac(qlnx_host_t * ha)6833 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6834 {
6835 struct ecore_filter_ucast ucast;
6836 struct ecore_dev *cdev;
6837 int rc;
6838
6839 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6840
6841 ucast.opcode = ECORE_FILTER_REPLACE;
6842 ucast.type = ECORE_FILTER_MAC;
6843 ucast.is_rx_filter = 1;
6844
6845 cdev = &ha->cdev;
6846
6847 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6848
6849 return (rc);
6850 }
6851
6852 static int
qlnx_remove_all_mcast_mac(qlnx_host_t * ha)6853 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6854 {
6855 struct ecore_filter_mcast mcast;
6856 struct ecore_dev *cdev;
6857 int rc;
6858
6859 cdev = &ha->cdev;
6860
6861 bzero(&mcast, sizeof(struct ecore_filter_mcast));
6862 mcast.opcode = ECORE_FILTER_FLUSH;
6863
6864 rc = ecore_filter_mcast_cmd(cdev, &mcast, ECORE_SPQ_MODE_CB, NULL);
6865 if (rc == 0)
6866 bzero(ha->ecore_mcast_bins, sizeof(ha->ecore_mcast_bins));
6867
6868 return (rc);
6869 }
6870
6871 static int
qlnx_clean_filters(qlnx_host_t * ha)6872 qlnx_clean_filters(qlnx_host_t *ha)
6873 {
6874 int rc = 0;
6875
6876 /* Reset rx filter */
6877 ha->filter = 0;
6878
6879 /* Remove all unicast macs */
6880 rc = qlnx_remove_all_ucast_mac(ha);
6881 if (rc)
6882 return rc;
6883
6884 /* Remove all multicast macs */
6885 rc = qlnx_remove_all_mcast_mac(ha);
6886 if (rc)
6887 return rc;
6888
6889 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
6890
6891 return (rc);
6892 }
6893
6894 static int
qlnx_set_rx_accept_filter(qlnx_host_t * ha,uint8_t filter)6895 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
6896 {
6897 struct ecore_filter_accept_flags accept;
6898 int rc = 0;
6899 struct ecore_dev *cdev;
6900
6901 cdev = &ha->cdev;
6902
6903 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
6904
6905 accept.update_rx_mode_config = 1;
6906 accept.rx_accept_filter = filter;
6907
6908 accept.update_tx_mode_config = 1;
6909 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
6910 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
6911
6912 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
6913 ECORE_SPQ_MODE_CB, NULL);
6914
6915 return (rc);
6916 }
6917
6918 static int
qlnx_set_rx_mode(qlnx_host_t * ha)6919 qlnx_set_rx_mode(qlnx_host_t *ha)
6920 {
6921 int rc = 0;
6922 const if_t ifp = ha->ifp;
6923
6924 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, if_getlladdr(ifp));
6925 if (rc)
6926 return rc;
6927
6928 rc = qlnx_set_multi(ha);
6929 if (rc)
6930 return rc;
6931
6932 if (qlnx_vf_device(ha) == 0)
6933 rc = _qlnx_set_promisc_allmulti(ha, true, true);
6934 else
6935 rc = qlnx_set_promisc_allmulti(ha, if_getflags(ifp));
6936
6937 return (rc);
6938 }
6939
6940 static int
qlnx_set_link(qlnx_host_t * ha,bool link_up)6941 qlnx_set_link(qlnx_host_t *ha, bool link_up)
6942 {
6943 int i, rc = 0;
6944 struct ecore_dev *cdev;
6945 struct ecore_hwfn *hwfn;
6946 struct ecore_ptt *ptt;
6947
6948 if (qlnx_vf_device(ha) == 0)
6949 return (0);
6950
6951 cdev = &ha->cdev;
6952
6953 for_each_hwfn(cdev, i) {
6954 hwfn = &cdev->hwfns[i];
6955
6956 ptt = ecore_ptt_acquire(hwfn);
6957 if (!ptt)
6958 return -EBUSY;
6959
6960 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
6961
6962 ecore_ptt_release(hwfn, ptt);
6963
6964 if (rc)
6965 return rc;
6966 }
6967 return (rc);
6968 }
6969
6970 static uint64_t
qlnx_get_counter(if_t ifp,ift_counter cnt)6971 qlnx_get_counter(if_t ifp, ift_counter cnt)
6972 {
6973 qlnx_host_t *ha;
6974 uint64_t count;
6975
6976 ha = (qlnx_host_t *)if_getsoftc(ifp);
6977
6978 switch (cnt) {
6979 case IFCOUNTER_IPACKETS:
6980 count = ha->hw_stats.common.rx_ucast_pkts +
6981 ha->hw_stats.common.rx_mcast_pkts +
6982 ha->hw_stats.common.rx_bcast_pkts;
6983 break;
6984
6985 case IFCOUNTER_IERRORS:
6986 count = ha->hw_stats.common.rx_crc_errors +
6987 ha->hw_stats.common.rx_align_errors +
6988 ha->hw_stats.common.rx_oversize_packets +
6989 ha->hw_stats.common.rx_undersize_packets;
6990 break;
6991
6992 case IFCOUNTER_OPACKETS:
6993 count = ha->hw_stats.common.tx_ucast_pkts +
6994 ha->hw_stats.common.tx_mcast_pkts +
6995 ha->hw_stats.common.tx_bcast_pkts;
6996 break;
6997
6998 case IFCOUNTER_OERRORS:
6999 count = ha->hw_stats.common.tx_err_drop_pkts;
7000 break;
7001
7002 case IFCOUNTER_COLLISIONS:
7003 return (0);
7004
7005 case IFCOUNTER_IBYTES:
7006 count = ha->hw_stats.common.rx_ucast_bytes +
7007 ha->hw_stats.common.rx_mcast_bytes +
7008 ha->hw_stats.common.rx_bcast_bytes;
7009 break;
7010
7011 case IFCOUNTER_OBYTES:
7012 count = ha->hw_stats.common.tx_ucast_bytes +
7013 ha->hw_stats.common.tx_mcast_bytes +
7014 ha->hw_stats.common.tx_bcast_bytes;
7015 break;
7016
7017 case IFCOUNTER_IMCASTS:
7018 count = ha->hw_stats.common.rx_mcast_bytes;
7019 break;
7020
7021 case IFCOUNTER_OMCASTS:
7022 count = ha->hw_stats.common.tx_mcast_bytes;
7023 break;
7024
7025 case IFCOUNTER_IQDROPS:
7026 case IFCOUNTER_OQDROPS:
7027 case IFCOUNTER_NOPROTO:
7028
7029 default:
7030 return (if_get_counter_default(ifp, cnt));
7031 }
7032 return (count);
7033 }
7034
7035 static void
qlnx_timer(void * arg)7036 qlnx_timer(void *arg)
7037 {
7038 qlnx_host_t *ha;
7039
7040 ha = (qlnx_host_t *)arg;
7041
7042 if (ha->error_recovery) {
7043 ha->error_recovery = 0;
7044 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7045 return;
7046 }
7047
7048 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7049
7050 if (ha->storm_stats_gather)
7051 qlnx_sample_storm_stats(ha);
7052
7053 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7054
7055 return;
7056 }
7057
7058 static int
qlnx_load(qlnx_host_t * ha)7059 qlnx_load(qlnx_host_t *ha)
7060 {
7061 int i;
7062 int rc = 0;
7063 device_t dev;
7064
7065 dev = ha->pci_dev;
7066
7067 QL_DPRINT2(ha, "enter\n");
7068
7069 rc = qlnx_alloc_mem_arrays(ha);
7070 if (rc)
7071 goto qlnx_load_exit0;
7072
7073 qlnx_init_fp(ha);
7074
7075 rc = qlnx_alloc_mem_load(ha);
7076 if (rc)
7077 goto qlnx_load_exit1;
7078
7079 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7080 ha->num_rss, ha->num_tc);
7081
7082 for (i = 0; i < ha->num_rss; i++) {
7083 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7084 (INTR_TYPE_NET | INTR_MPSAFE),
7085 NULL, qlnx_fp_isr, &ha->irq_vec[i],
7086 &ha->irq_vec[i].handle))) {
7087 QL_DPRINT1(ha, "could not setup interrupt\n");
7088 goto qlnx_load_exit2;
7089 }
7090
7091 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7092 irq %p handle %p\n", i,
7093 ha->irq_vec[i].irq_rid,
7094 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7095
7096 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7097 }
7098
7099 rc = qlnx_start_queues(ha);
7100 if (rc)
7101 goto qlnx_load_exit2;
7102
7103 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7104
7105 /* Add primary mac and set Rx filters */
7106 rc = qlnx_set_rx_mode(ha);
7107 if (rc)
7108 goto qlnx_load_exit2;
7109
7110 /* Ask for link-up using current configuration */
7111 qlnx_set_link(ha, true);
7112
7113 if (qlnx_vf_device(ha) == 0)
7114 qlnx_link_update(&ha->cdev.hwfns[0]);
7115
7116 ha->state = QLNX_STATE_OPEN;
7117
7118 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7119
7120 if (ha->flags.callout_init)
7121 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7122
7123 goto qlnx_load_exit0;
7124
7125 qlnx_load_exit2:
7126 qlnx_free_mem_load(ha);
7127
7128 qlnx_load_exit1:
7129 ha->num_rss = 0;
7130
7131 qlnx_load_exit0:
7132 QL_DPRINT2(ha, "exit [%d]\n", rc);
7133 return rc;
7134 }
7135
7136 static void
qlnx_drain_soft_lro(qlnx_host_t * ha)7137 qlnx_drain_soft_lro(qlnx_host_t *ha)
7138 {
7139 #ifdef QLNX_SOFT_LRO
7140
7141 if_t ifp;
7142 int i;
7143
7144 ifp = ha->ifp;
7145
7146 if (if_getcapenable(ifp) & IFCAP_LRO) {
7147 for (i = 0; i < ha->num_rss; i++) {
7148 struct qlnx_fastpath *fp = &ha->fp_array[i];
7149 struct lro_ctrl *lro;
7150
7151 lro = &fp->rxq->lro;
7152
7153 tcp_lro_flush_all(lro);
7154 }
7155 }
7156
7157 #endif /* #ifdef QLNX_SOFT_LRO */
7158
7159 return;
7160 }
7161
7162 static void
qlnx_unload(qlnx_host_t * ha)7163 qlnx_unload(qlnx_host_t *ha)
7164 {
7165 struct ecore_dev *cdev;
7166 device_t dev;
7167 int i;
7168
7169 cdev = &ha->cdev;
7170 dev = ha->pci_dev;
7171
7172 QL_DPRINT2(ha, "enter\n");
7173 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7174
7175 if (ha->state == QLNX_STATE_OPEN) {
7176 qlnx_set_link(ha, false);
7177 qlnx_clean_filters(ha);
7178 qlnx_stop_queues(ha);
7179 ecore_hw_stop_fastpath(cdev);
7180
7181 for (i = 0; i < ha->num_rss; i++) {
7182 if (ha->irq_vec[i].handle) {
7183 (void)bus_teardown_intr(dev,
7184 ha->irq_vec[i].irq,
7185 ha->irq_vec[i].handle);
7186 ha->irq_vec[i].handle = NULL;
7187 }
7188 }
7189
7190 qlnx_drain_fp_taskqueues(ha);
7191 qlnx_drain_soft_lro(ha);
7192 qlnx_free_mem_load(ha);
7193 }
7194
7195 if (ha->flags.callout_init)
7196 callout_drain(&ha->qlnx_callout);
7197
7198 qlnx_mdelay(__func__, 1000);
7199
7200 ha->state = QLNX_STATE_CLOSED;
7201
7202 QL_DPRINT2(ha, "exit\n");
7203 return;
7204 }
7205
7206 static int
qlnx_grc_dumpsize(qlnx_host_t * ha,uint32_t * num_dwords,int hwfn_index)7207 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7208 {
7209 int rval = -1;
7210 struct ecore_hwfn *p_hwfn;
7211 struct ecore_ptt *p_ptt;
7212
7213 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7214
7215 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7216 p_ptt = ecore_ptt_acquire(p_hwfn);
7217
7218 if (!p_ptt) {
7219 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7220 return (rval);
7221 }
7222
7223 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7224
7225 if (rval == DBG_STATUS_OK)
7226 rval = 0;
7227 else {
7228 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7229 "[0x%x]\n", rval);
7230 }
7231
7232 ecore_ptt_release(p_hwfn, p_ptt);
7233
7234 return (rval);
7235 }
7236
7237 static int
qlnx_idle_chk_size(qlnx_host_t * ha,uint32_t * num_dwords,int hwfn_index)7238 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7239 {
7240 int rval = -1;
7241 struct ecore_hwfn *p_hwfn;
7242 struct ecore_ptt *p_ptt;
7243
7244 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7245
7246 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7247 p_ptt = ecore_ptt_acquire(p_hwfn);
7248
7249 if (!p_ptt) {
7250 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7251 return (rval);
7252 }
7253
7254 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7255
7256 if (rval == DBG_STATUS_OK)
7257 rval = 0;
7258 else {
7259 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7260 " [0x%x]\n", rval);
7261 }
7262
7263 ecore_ptt_release(p_hwfn, p_ptt);
7264
7265 return (rval);
7266 }
7267
7268 static void
qlnx_sample_storm_stats(qlnx_host_t * ha)7269 qlnx_sample_storm_stats(qlnx_host_t *ha)
7270 {
7271 int i, index;
7272 struct ecore_dev *cdev;
7273 qlnx_storm_stats_t *s_stats;
7274 uint32_t reg;
7275 struct ecore_ptt *p_ptt;
7276 struct ecore_hwfn *hwfn;
7277
7278 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7279 ha->storm_stats_gather = 0;
7280 return;
7281 }
7282
7283 cdev = &ha->cdev;
7284
7285 for_each_hwfn(cdev, i) {
7286 hwfn = &cdev->hwfns[i];
7287
7288 p_ptt = ecore_ptt_acquire(hwfn);
7289 if (!p_ptt)
7290 return;
7291
7292 index = ha->storm_stats_index +
7293 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7294
7295 s_stats = &ha->storm_stats[index];
7296
7297 /* XSTORM */
7298 reg = XSEM_REG_FAST_MEMORY +
7299 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7300 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7301
7302 reg = XSEM_REG_FAST_MEMORY +
7303 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7304 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7305
7306 reg = XSEM_REG_FAST_MEMORY +
7307 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7308 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7309
7310 reg = XSEM_REG_FAST_MEMORY +
7311 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7312 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7313
7314 /* YSTORM */
7315 reg = YSEM_REG_FAST_MEMORY +
7316 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7317 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7318
7319 reg = YSEM_REG_FAST_MEMORY +
7320 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7321 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7322
7323 reg = YSEM_REG_FAST_MEMORY +
7324 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7325 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7326
7327 reg = YSEM_REG_FAST_MEMORY +
7328 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7329 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7330
7331 /* PSTORM */
7332 reg = PSEM_REG_FAST_MEMORY +
7333 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7334 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7335
7336 reg = PSEM_REG_FAST_MEMORY +
7337 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7338 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7339
7340 reg = PSEM_REG_FAST_MEMORY +
7341 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7342 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7343
7344 reg = PSEM_REG_FAST_MEMORY +
7345 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7346 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7347
7348 /* TSTORM */
7349 reg = TSEM_REG_FAST_MEMORY +
7350 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7351 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7352
7353 reg = TSEM_REG_FAST_MEMORY +
7354 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7355 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7356
7357 reg = TSEM_REG_FAST_MEMORY +
7358 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7359 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7360
7361 reg = TSEM_REG_FAST_MEMORY +
7362 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7363 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7364
7365 /* MSTORM */
7366 reg = MSEM_REG_FAST_MEMORY +
7367 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7368 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7369
7370 reg = MSEM_REG_FAST_MEMORY +
7371 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7372 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7373
7374 reg = MSEM_REG_FAST_MEMORY +
7375 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7376 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7377
7378 reg = MSEM_REG_FAST_MEMORY +
7379 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7380 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7381
7382 /* USTORM */
7383 reg = USEM_REG_FAST_MEMORY +
7384 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7385 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7386
7387 reg = USEM_REG_FAST_MEMORY +
7388 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7389 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7390
7391 reg = USEM_REG_FAST_MEMORY +
7392 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7393 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7394
7395 reg = USEM_REG_FAST_MEMORY +
7396 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7397 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7398
7399 ecore_ptt_release(hwfn, p_ptt);
7400 }
7401
7402 ha->storm_stats_index++;
7403
7404 return;
7405 }
7406
7407 /*
7408 * Name: qlnx_dump_buf8
7409 * Function: dumps a buffer as bytes
7410 */
7411 static void
qlnx_dump_buf8(qlnx_host_t * ha,const char * msg,void * dbuf,uint32_t len)7412 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7413 {
7414 device_t dev;
7415 uint32_t i = 0;
7416 uint8_t *buf;
7417
7418 dev = ha->pci_dev;
7419 buf = dbuf;
7420
7421 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7422
7423 while (len >= 16) {
7424 device_printf(dev,"0x%08x:"
7425 " %02x %02x %02x %02x %02x %02x %02x %02x"
7426 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7427 buf[0], buf[1], buf[2], buf[3],
7428 buf[4], buf[5], buf[6], buf[7],
7429 buf[8], buf[9], buf[10], buf[11],
7430 buf[12], buf[13], buf[14], buf[15]);
7431 i += 16;
7432 len -= 16;
7433 buf += 16;
7434 }
7435 switch (len) {
7436 case 1:
7437 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7438 break;
7439 case 2:
7440 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7441 break;
7442 case 3:
7443 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7444 i, buf[0], buf[1], buf[2]);
7445 break;
7446 case 4:
7447 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7448 buf[0], buf[1], buf[2], buf[3]);
7449 break;
7450 case 5:
7451 device_printf(dev,"0x%08x:"
7452 " %02x %02x %02x %02x %02x\n", i,
7453 buf[0], buf[1], buf[2], buf[3], buf[4]);
7454 break;
7455 case 6:
7456 device_printf(dev,"0x%08x:"
7457 " %02x %02x %02x %02x %02x %02x\n", i,
7458 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7459 break;
7460 case 7:
7461 device_printf(dev,"0x%08x:"
7462 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7463 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7464 break;
7465 case 8:
7466 device_printf(dev,"0x%08x:"
7467 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7468 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7469 buf[7]);
7470 break;
7471 case 9:
7472 device_printf(dev,"0x%08x:"
7473 " %02x %02x %02x %02x %02x %02x %02x %02x"
7474 " %02x\n", i,
7475 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7476 buf[7], buf[8]);
7477 break;
7478 case 10:
7479 device_printf(dev,"0x%08x:"
7480 " %02x %02x %02x %02x %02x %02x %02x %02x"
7481 " %02x %02x\n", i,
7482 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7483 buf[7], buf[8], buf[9]);
7484 break;
7485 case 11:
7486 device_printf(dev,"0x%08x:"
7487 " %02x %02x %02x %02x %02x %02x %02x %02x"
7488 " %02x %02x %02x\n", i,
7489 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7490 buf[7], buf[8], buf[9], buf[10]);
7491 break;
7492 case 12:
7493 device_printf(dev,"0x%08x:"
7494 " %02x %02x %02x %02x %02x %02x %02x %02x"
7495 " %02x %02x %02x %02x\n", i,
7496 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7497 buf[7], buf[8], buf[9], buf[10], buf[11]);
7498 break;
7499 case 13:
7500 device_printf(dev,"0x%08x:"
7501 " %02x %02x %02x %02x %02x %02x %02x %02x"
7502 " %02x %02x %02x %02x %02x\n", i,
7503 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7504 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7505 break;
7506 case 14:
7507 device_printf(dev,"0x%08x:"
7508 " %02x %02x %02x %02x %02x %02x %02x %02x"
7509 " %02x %02x %02x %02x %02x %02x\n", i,
7510 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7511 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7512 buf[13]);
7513 break;
7514 case 15:
7515 device_printf(dev,"0x%08x:"
7516 " %02x %02x %02x %02x %02x %02x %02x %02x"
7517 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7518 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7519 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7520 buf[13], buf[14]);
7521 break;
7522 default:
7523 break;
7524 }
7525
7526 device_printf(dev, "%s: %s dump end\n", __func__, msg);
7527
7528 return;
7529 }
7530
7531 #ifdef CONFIG_ECORE_SRIOV
7532
7533 static void
__qlnx_osal_iov_vf_cleanup(struct ecore_hwfn * p_hwfn,uint8_t rel_vf_id)7534 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7535 {
7536 struct ecore_public_vf_info *vf_info;
7537
7538 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7539
7540 if (!vf_info)
7541 return;
7542
7543 /* Clear the VF mac */
7544 memset(vf_info->forced_mac, 0, ETH_ALEN);
7545
7546 vf_info->forced_vlan = 0;
7547
7548 return;
7549 }
7550
7551 void
qlnx_osal_iov_vf_cleanup(void * p_hwfn,uint8_t relative_vf_id)7552 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7553 {
7554 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7555 return;
7556 }
7557
7558 static int
__qlnx_iov_chk_ucast(struct ecore_hwfn * p_hwfn,int vfid,struct ecore_filter_ucast * params)7559 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7560 struct ecore_filter_ucast *params)
7561 {
7562 struct ecore_public_vf_info *vf;
7563
7564 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7565 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7566 "VF[%d] vport not initialized\n", vfid);
7567 return ECORE_INVAL;
7568 }
7569
7570 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
7571 if (!vf)
7572 return -EINVAL;
7573
7574 /* No real decision to make; Store the configured MAC */
7575 if (params->type == ECORE_FILTER_MAC ||
7576 params->type == ECORE_FILTER_MAC_VLAN)
7577 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7578
7579 return 0;
7580 }
7581
7582 int
qlnx_iov_chk_ucast(void * p_hwfn,int vfid,void * params)7583 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
7584 {
7585 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
7586 }
7587
7588 static int
__qlnx_iov_update_vport(struct ecore_hwfn * hwfn,uint8_t vfid,struct ecore_sp_vport_update_params * params,uint16_t * tlvs)7589 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
7590 struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
7591 {
7592 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
7593 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7594 "VF[%d] vport not initialized\n", vfid);
7595 return ECORE_INVAL;
7596 }
7597
7598 /* Untrusted VFs can't even be trusted to know that fact.
7599 * Simply indicate everything is configured fine, and trace
7600 * configuration 'behind their back'.
7601 */
7602 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
7603 return 0;
7604
7605 return 0;
7606
7607 }
7608 int
qlnx_iov_update_vport(void * hwfn,uint8_t vfid,void * params,uint16_t * tlvs)7609 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
7610 {
7611 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
7612 }
7613
7614 static int
qlnx_find_hwfn_index(struct ecore_hwfn * p_hwfn)7615 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
7616 {
7617 int i;
7618 struct ecore_dev *cdev;
7619
7620 cdev = p_hwfn->p_dev;
7621
7622 for (i = 0; i < cdev->num_hwfns; i++) {
7623 if (&cdev->hwfns[i] == p_hwfn)
7624 break;
7625 }
7626
7627 if (i >= cdev->num_hwfns)
7628 return (-1);
7629
7630 return (i);
7631 }
7632
7633 static int
__qlnx_pf_vf_msg(struct ecore_hwfn * p_hwfn,uint16_t rel_vf_id)7634 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
7635 {
7636 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7637 int i;
7638
7639 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7640 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7641
7642 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7643 return (-1);
7644
7645 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7646 atomic_testandset_32(&ha->sriov_task[i].flags,
7647 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
7648
7649 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7650 &ha->sriov_task[i].pf_task);
7651 }
7652
7653 return (ECORE_SUCCESS);
7654 }
7655
7656 int
qlnx_pf_vf_msg(void * p_hwfn,uint16_t relative_vf_id)7657 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
7658 {
7659 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
7660 }
7661
7662 static void
__qlnx_vf_flr_update(struct ecore_hwfn * p_hwfn)7663 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
7664 {
7665 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7666 int i;
7667
7668 if (!ha->sriov_initialized)
7669 return;
7670
7671 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7672 ha, p_hwfn->p_dev, p_hwfn);
7673
7674 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7675 return;
7676
7677 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7678 atomic_testandset_32(&ha->sriov_task[i].flags,
7679 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
7680
7681 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7682 &ha->sriov_task[i].pf_task);
7683 }
7684
7685 return;
7686 }
7687
7688 void
qlnx_vf_flr_update(void * p_hwfn)7689 qlnx_vf_flr_update(void *p_hwfn)
7690 {
7691 __qlnx_vf_flr_update(p_hwfn);
7692
7693 return;
7694 }
7695
7696 #ifndef QLNX_VF
7697
7698 static void
qlnx_vf_bulleting_update(struct ecore_hwfn * p_hwfn)7699 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
7700 {
7701 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7702 int i;
7703
7704 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7705 ha, p_hwfn->p_dev, p_hwfn);
7706
7707 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7708 return;
7709
7710 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
7711 ha, p_hwfn->p_dev, p_hwfn, i);
7712
7713 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7714 atomic_testandset_32(&ha->sriov_task[i].flags,
7715 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
7716
7717 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7718 &ha->sriov_task[i].pf_task);
7719 }
7720 }
7721
7722 static void
qlnx_initialize_sriov(qlnx_host_t * ha)7723 qlnx_initialize_sriov(qlnx_host_t *ha)
7724 {
7725 device_t dev;
7726 nvlist_t *pf_schema, *vf_schema;
7727 int iov_error;
7728
7729 dev = ha->pci_dev;
7730
7731 pf_schema = pci_iov_schema_alloc_node();
7732 vf_schema = pci_iov_schema_alloc_node();
7733
7734 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
7735 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
7736 IOV_SCHEMA_HASDEFAULT, FALSE);
7737 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
7738 IOV_SCHEMA_HASDEFAULT, FALSE);
7739 pci_iov_schema_add_uint16(vf_schema, "num-queues",
7740 IOV_SCHEMA_HASDEFAULT, 1);
7741
7742 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
7743
7744 if (iov_error != 0) {
7745 ha->sriov_initialized = 0;
7746 } else {
7747 device_printf(dev, "SRIOV initialized\n");
7748 ha->sriov_initialized = 1;
7749 }
7750
7751 return;
7752 }
7753
7754 static void
qlnx_sriov_disable(qlnx_host_t * ha)7755 qlnx_sriov_disable(qlnx_host_t *ha)
7756 {
7757 struct ecore_dev *cdev;
7758 int i, j;
7759
7760 cdev = &ha->cdev;
7761
7762 ecore_iov_set_vfs_to_disable(cdev, true);
7763
7764 for_each_hwfn(cdev, i) {
7765 struct ecore_hwfn *hwfn = &cdev->hwfns[i];
7766 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
7767
7768 if (!ptt) {
7769 QL_DPRINT1(ha, "Failed to acquire ptt\n");
7770 return;
7771 }
7772 /* Clean WFQ db and configure equal weight for all vports */
7773 ecore_clean_wfq_db(hwfn, ptt);
7774
7775 ecore_for_each_vf(hwfn, j) {
7776 int k = 0;
7777
7778 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
7779 continue;
7780
7781 if (ecore_iov_is_vf_started(hwfn, j)) {
7782 /* Wait until VF is disabled before releasing */
7783
7784 for (k = 0; k < 100; k++) {
7785 if (!ecore_iov_is_vf_stopped(hwfn, j)) {
7786 qlnx_mdelay(__func__, 10);
7787 } else
7788 break;
7789 }
7790 }
7791
7792 if (k < 100)
7793 ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
7794 ptt, j);
7795 else {
7796 QL_DPRINT1(ha,
7797 "Timeout waiting for VF's FLR to end\n");
7798 }
7799 }
7800 ecore_ptt_release(hwfn, ptt);
7801 }
7802
7803 ecore_iov_set_vfs_to_disable(cdev, false);
7804
7805 return;
7806 }
7807
7808 static void
qlnx_sriov_enable_qid_config(struct ecore_hwfn * hwfn,u16 vfid,struct ecore_iov_vf_init_params * params)7809 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
7810 struct ecore_iov_vf_init_params *params)
7811 {
7812 u16 base, i;
7813
7814 /* Since we have an equal resource distribution per-VF, and we assume
7815 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
7816 * sequentially from there.
7817 */
7818 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
7819
7820 params->rel_vf_id = vfid;
7821
7822 for (i = 0; i < params->num_queues; i++) {
7823 params->req_rx_queue[i] = base + i;
7824 params->req_tx_queue[i] = base + i;
7825 }
7826
7827 /* PF uses indices 0 for itself; Set vport/RSS afterwards */
7828 params->vport_id = vfid + 1;
7829 params->rss_eng_id = vfid + 1;
7830
7831 return;
7832 }
7833
7834 static int
qlnx_iov_init(device_t dev,uint16_t num_vfs,const nvlist_t * nvlist_params)7835 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
7836 {
7837 qlnx_host_t *ha;
7838 struct ecore_dev *cdev;
7839 struct ecore_iov_vf_init_params params;
7840 int ret, j, i;
7841 uint32_t max_vfs;
7842
7843 if ((ha = device_get_softc(dev)) == NULL) {
7844 device_printf(dev, "%s: cannot get softc\n", __func__);
7845 return (-1);
7846 }
7847
7848 if (qlnx_create_pf_taskqueues(ha) != 0)
7849 goto qlnx_iov_init_err0;
7850
7851 cdev = &ha->cdev;
7852
7853 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
7854
7855 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
7856 dev, num_vfs, max_vfs);
7857
7858 if (num_vfs >= max_vfs) {
7859 QL_DPRINT1(ha, "Can start at most %d VFs\n",
7860 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
7861 goto qlnx_iov_init_err0;
7862 }
7863
7864 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
7865 M_NOWAIT);
7866
7867 if (ha->vf_attr == NULL)
7868 goto qlnx_iov_init_err0;
7869
7870 memset(¶ms, 0, sizeof(params));
7871
7872 /* Initialize HW for VF access */
7873 for_each_hwfn(cdev, j) {
7874 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
7875 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
7876
7877 /* Make sure not to use more than 16 queues per VF */
7878 params.num_queues = min_t(int,
7879 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
7880 16);
7881
7882 if (!ptt) {
7883 QL_DPRINT1(ha, "Failed to acquire ptt\n");
7884 goto qlnx_iov_init_err1;
7885 }
7886
7887 for (i = 0; i < num_vfs; i++) {
7888 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
7889 continue;
7890
7891 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms);
7892
7893 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
7894
7895 if (ret) {
7896 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
7897 ecore_ptt_release(hwfn, ptt);
7898 goto qlnx_iov_init_err1;
7899 }
7900 }
7901
7902 ecore_ptt_release(hwfn, ptt);
7903 }
7904
7905 ha->num_vfs = num_vfs;
7906 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
7907
7908 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
7909
7910 return (0);
7911
7912 qlnx_iov_init_err1:
7913 qlnx_sriov_disable(ha);
7914
7915 qlnx_iov_init_err0:
7916 qlnx_destroy_pf_taskqueues(ha);
7917 ha->num_vfs = 0;
7918
7919 return (-1);
7920 }
7921
7922 static void
qlnx_iov_uninit(device_t dev)7923 qlnx_iov_uninit(device_t dev)
7924 {
7925 qlnx_host_t *ha;
7926
7927 if ((ha = device_get_softc(dev)) == NULL) {
7928 device_printf(dev, "%s: cannot get softc\n", __func__);
7929 return;
7930 }
7931
7932 QL_DPRINT2(ha," dev = %p enter\n", dev);
7933
7934 qlnx_sriov_disable(ha);
7935 qlnx_destroy_pf_taskqueues(ha);
7936
7937 free(ha->vf_attr, M_QLNXBUF);
7938 ha->vf_attr = NULL;
7939
7940 ha->num_vfs = 0;
7941
7942 QL_DPRINT2(ha," dev = %p exit\n", dev);
7943 return;
7944 }
7945
7946 static int
qlnx_iov_add_vf(device_t dev,uint16_t vfnum,const nvlist_t * params)7947 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
7948 {
7949 qlnx_host_t *ha;
7950 qlnx_vf_attr_t *vf_attr;
7951 unsigned const char *mac;
7952 size_t size;
7953 struct ecore_hwfn *p_hwfn;
7954
7955 if ((ha = device_get_softc(dev)) == NULL) {
7956 device_printf(dev, "%s: cannot get softc\n", __func__);
7957 return (-1);
7958 }
7959
7960 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
7961
7962 if (vfnum > (ha->num_vfs - 1)) {
7963 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
7964 vfnum, (ha->num_vfs - 1));
7965 }
7966
7967 vf_attr = &ha->vf_attr[vfnum];
7968
7969 if (nvlist_exists_binary(params, "mac-addr")) {
7970 mac = nvlist_get_binary(params, "mac-addr", &size);
7971 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
7972 device_printf(dev,
7973 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
7974 __func__, vf_attr->mac_addr[0],
7975 vf_attr->mac_addr[1], vf_attr->mac_addr[2],
7976 vf_attr->mac_addr[3], vf_attr->mac_addr[4],
7977 vf_attr->mac_addr[5]);
7978 p_hwfn = &ha->cdev.hwfns[0];
7979 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
7980 vfnum);
7981 }
7982
7983 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
7984 return (0);
7985 }
7986
7987 static void
qlnx_handle_vf_msg(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)7988 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
7989 {
7990 uint64_t events[ECORE_VF_ARRAY_LENGTH];
7991 struct ecore_ptt *ptt;
7992 int i;
7993
7994 ptt = ecore_ptt_acquire(p_hwfn);
7995 if (!ptt) {
7996 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
7997 __qlnx_pf_vf_msg(p_hwfn, 0);
7998 return;
7999 }
8000
8001 ecore_iov_pf_get_pending_events(p_hwfn, events);
8002
8003 QL_DPRINT2(ha, "Event mask of VF events:"
8004 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8005 events[0], events[1], events[2]);
8006
8007 ecore_for_each_vf(p_hwfn, i) {
8008 /* Skip VFs with no pending messages */
8009 if (!(events[i / 64] & (1ULL << (i % 64))))
8010 continue;
8011
8012 QL_DPRINT2(ha,
8013 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8014 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8015
8016 /* Copy VF's message to PF's request buffer for that VF */
8017 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8018 continue;
8019
8020 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8021 }
8022
8023 ecore_ptt_release(p_hwfn, ptt);
8024
8025 return;
8026 }
8027
8028 static void
qlnx_handle_vf_flr_update(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8029 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8030 {
8031 struct ecore_ptt *ptt;
8032 int ret;
8033
8034 ptt = ecore_ptt_acquire(p_hwfn);
8035
8036 if (!ptt) {
8037 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8038 __qlnx_vf_flr_update(p_hwfn);
8039 return;
8040 }
8041
8042 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8043
8044 if (ret) {
8045 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8046 }
8047
8048 ecore_ptt_release(p_hwfn, ptt);
8049
8050 return;
8051 }
8052
8053 static void
qlnx_handle_bulletin_update(qlnx_host_t * ha,struct ecore_hwfn * p_hwfn)8054 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8055 {
8056 struct ecore_ptt *ptt;
8057 int i;
8058
8059 ptt = ecore_ptt_acquire(p_hwfn);
8060
8061 if (!ptt) {
8062 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8063 qlnx_vf_bulleting_update(p_hwfn);
8064 return;
8065 }
8066
8067 ecore_for_each_vf(p_hwfn, i) {
8068 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8069 p_hwfn, i);
8070 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8071 }
8072
8073 ecore_ptt_release(p_hwfn, ptt);
8074
8075 return;
8076 }
8077
8078 static void
qlnx_pf_taskqueue(void * context,int pending)8079 qlnx_pf_taskqueue(void *context, int pending)
8080 {
8081 struct ecore_hwfn *p_hwfn;
8082 qlnx_host_t *ha;
8083 int i;
8084
8085 p_hwfn = context;
8086
8087 if (p_hwfn == NULL)
8088 return;
8089
8090 ha = (qlnx_host_t *)(p_hwfn->p_dev);
8091
8092 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8093 return;
8094
8095 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8096 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8097 qlnx_handle_vf_msg(ha, p_hwfn);
8098
8099 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8100 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8101 qlnx_handle_vf_flr_update(ha, p_hwfn);
8102
8103 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8104 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8105 qlnx_handle_bulletin_update(ha, p_hwfn);
8106
8107 return;
8108 }
8109
8110 static int
qlnx_create_pf_taskqueues(qlnx_host_t * ha)8111 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8112 {
8113 int i;
8114 uint8_t tq_name[32];
8115
8116 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8117 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8118
8119 bzero(tq_name, sizeof (tq_name));
8120 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8121
8122 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8123
8124 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8125 taskqueue_thread_enqueue,
8126 &ha->sriov_task[i].pf_taskqueue);
8127
8128 if (ha->sriov_task[i].pf_taskqueue == NULL)
8129 return (-1);
8130
8131 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8132 PI_NET, "%s", tq_name);
8133
8134 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8135 }
8136
8137 return (0);
8138 }
8139
8140 static void
qlnx_destroy_pf_taskqueues(qlnx_host_t * ha)8141 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8142 {
8143 int i;
8144
8145 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8146 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8147 taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8148 &ha->sriov_task[i].pf_task);
8149 taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8150 ha->sriov_task[i].pf_taskqueue = NULL;
8151 }
8152 }
8153 return;
8154 }
8155
8156 static void
qlnx_inform_vf_link_state(struct ecore_hwfn * p_hwfn,qlnx_host_t * ha)8157 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8158 {
8159 struct ecore_mcp_link_capabilities caps;
8160 struct ecore_mcp_link_params params;
8161 struct ecore_mcp_link_state link;
8162 int i;
8163
8164 if (!p_hwfn->pf_iov_info)
8165 return;
8166
8167 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params));
8168 memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8169 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8170
8171 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8172 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8173 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8174
8175 QL_DPRINT2(ha, "called\n");
8176
8177 /* Update bulletin of all future possible VFs with link configuration */
8178 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8179 /* Modify link according to the VF's configured link state */
8180
8181 link.link_up = false;
8182
8183 if (ha->link_up) {
8184 link.link_up = true;
8185 /* Set speed according to maximum supported by HW.
8186 * that is 40G for regular devices and 100G for CMT
8187 * mode devices.
8188 */
8189 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8190 100000 : link.speed;
8191 }
8192 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8193 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps);
8194 }
8195
8196 qlnx_vf_bulleting_update(p_hwfn);
8197
8198 return;
8199 }
8200 #endif /* #ifndef QLNX_VF */
8201 #endif /* #ifdef CONFIG_ECORE_SRIOV */
8202