1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/ethtool.h>
5 #include <linux/printk.h>
6 #include <linux/dynamic_debug.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/cpumask.h>
14 #include <linux/crash_dump.h>
15 #include <linux/vmalloc.h>
16
17 #include "ionic.h"
18 #include "ionic_bus.h"
19 #include "ionic_dev.h"
20 #include "ionic_lif.h"
21 #include "ionic_txrx.h"
22 #include "ionic_ethtool.h"
23 #include "ionic_debugfs.h"
24
25 /* queuetype support level */
26 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
27 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
28 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
29 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support
30 * 2 = ... with CMB rings
31 */
32 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support
33 * 1 = ... with Tx SG version 1
34 * 3 = ... with CMB rings
35 */
36 };
37
38 static void ionic_link_status_check(struct ionic_lif *lif);
39 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
40 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
41 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
42
43 static void ionic_txrx_deinit(struct ionic_lif *lif);
44 static int ionic_txrx_init(struct ionic_lif *lif);
45 static int ionic_start_queues(struct ionic_lif *lif);
46 static void ionic_stop_queues(struct ionic_lif *lif);
47 static void ionic_lif_queue_identify(struct ionic_lif *lif);
48
ionic_dim_work(struct work_struct * work)49 static void ionic_dim_work(struct work_struct *work)
50 {
51 struct dim *dim = container_of(work, struct dim, work);
52 struct ionic_intr_info *intr;
53 struct dim_cq_moder cur_moder;
54 struct ionic_qcq *qcq;
55 struct ionic_lif *lif;
56 u32 new_coal;
57
58 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
59 qcq = container_of(dim, struct ionic_qcq, dim);
60 lif = qcq->q.lif;
61 new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
62 new_coal = new_coal ? new_coal : 1;
63
64 intr = &qcq->intr;
65 if (intr->dim_coal_hw != new_coal) {
66 intr->dim_coal_hw = new_coal;
67
68 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
69 intr->index, intr->dim_coal_hw);
70 }
71
72 dim->state = DIM_START_MEASURE;
73 }
74
ionic_lif_deferred_work(struct work_struct * work)75 static void ionic_lif_deferred_work(struct work_struct *work)
76 {
77 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
78 struct ionic_deferred *def = &lif->deferred;
79 struct ionic_deferred_work *w = NULL;
80
81 do {
82 spin_lock_bh(&def->lock);
83 if (!list_empty(&def->list)) {
84 w = list_first_entry(&def->list,
85 struct ionic_deferred_work, list);
86 list_del(&w->list);
87 }
88 spin_unlock_bh(&def->lock);
89
90 if (!w)
91 break;
92
93 switch (w->type) {
94 case IONIC_DW_TYPE_RX_MODE:
95 ionic_lif_rx_mode(lif);
96 break;
97 case IONIC_DW_TYPE_LINK_STATUS:
98 ionic_link_status_check(lif);
99 break;
100 case IONIC_DW_TYPE_LIF_RESET:
101 if (w->fw_status) {
102 ionic_lif_handle_fw_up(lif);
103 } else {
104 ionic_lif_handle_fw_down(lif);
105
106 /* Fire off another watchdog to see
107 * if the FW is already back rather than
108 * waiting another whole cycle
109 */
110 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1);
111 }
112 break;
113 default:
114 break;
115 }
116 kfree(w);
117 w = NULL;
118 } while (true);
119 }
120
ionic_lif_deferred_enqueue(struct ionic_deferred * def,struct ionic_deferred_work * work)121 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
122 struct ionic_deferred_work *work)
123 {
124 spin_lock_bh(&def->lock);
125 list_add_tail(&work->list, &def->list);
126 spin_unlock_bh(&def->lock);
127 schedule_work(&def->work);
128 }
129
ionic_link_status_check(struct ionic_lif * lif)130 static void ionic_link_status_check(struct ionic_lif *lif)
131 {
132 struct net_device *netdev = lif->netdev;
133 u16 link_status;
134 bool link_up;
135
136 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
137 return;
138
139 /* Don't put carrier back up if we're in a broken state */
140 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) {
141 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
142 return;
143 }
144
145 link_status = le16_to_cpu(lif->info->status.link_status);
146 link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
147
148 if (link_up) {
149 int err = 0;
150
151 if (netdev->flags & IFF_UP && netif_running(netdev)) {
152 mutex_lock(&lif->queue_lock);
153 err = ionic_start_queues(lif);
154 if (err && err != -EBUSY) {
155 netdev_err(netdev,
156 "Failed to start queues: %d\n", err);
157 set_bit(IONIC_LIF_F_BROKEN, lif->state);
158 netif_carrier_off(lif->netdev);
159 }
160 mutex_unlock(&lif->queue_lock);
161 }
162
163 if (!err && !netif_carrier_ok(netdev)) {
164 ionic_port_identify(lif->ionic);
165 netdev_info(netdev, "Link up - %d Gbps\n",
166 le32_to_cpu(lif->info->status.link_speed) / 1000);
167 netif_carrier_on(netdev);
168 }
169 } else {
170 if (netif_carrier_ok(netdev)) {
171 lif->link_down_count++;
172 netdev_info(netdev, "Link down\n");
173 netif_carrier_off(netdev);
174 }
175
176 if (netdev->flags & IFF_UP && netif_running(netdev)) {
177 mutex_lock(&lif->queue_lock);
178 ionic_stop_queues(lif);
179 mutex_unlock(&lif->queue_lock);
180 }
181 }
182
183 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
184 }
185
ionic_link_status_check_request(struct ionic_lif * lif,bool can_sleep)186 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
187 {
188 struct ionic_deferred_work *work;
189
190 /* we only need one request outstanding at a time */
191 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
192 return;
193
194 if (!can_sleep) {
195 work = kzalloc(sizeof(*work), GFP_ATOMIC);
196 if (!work) {
197 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
198 return;
199 }
200
201 work->type = IONIC_DW_TYPE_LINK_STATUS;
202 ionic_lif_deferred_enqueue(&lif->deferred, work);
203 } else {
204 ionic_link_status_check(lif);
205 }
206 }
207
ionic_napi_deadline(struct timer_list * timer)208 static void ionic_napi_deadline(struct timer_list *timer)
209 {
210 struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
211
212 napi_schedule(&qcq->napi);
213 }
214
ionic_isr(int irq,void * data)215 static irqreturn_t ionic_isr(int irq, void *data)
216 {
217 struct napi_struct *napi = data;
218
219 napi_schedule_irqoff(napi);
220
221 return IRQ_HANDLED;
222 }
223
ionic_request_irq(struct ionic_lif * lif,struct ionic_qcq * qcq)224 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
225 {
226 struct ionic_intr_info *intr = &qcq->intr;
227 struct device *dev = lif->ionic->dev;
228 struct ionic_queue *q = &qcq->q;
229 const char *name;
230
231 if (lif->registered)
232 name = lif->netdev->name;
233 else
234 name = dev_name(dev);
235
236 snprintf(intr->name, sizeof(intr->name),
237 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
238
239 return devm_request_irq(dev, intr->vector, ionic_isr,
240 0, intr->name, &qcq->napi);
241 }
242
ionic_intr_alloc(struct ionic_lif * lif,struct ionic_intr_info * intr)243 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
244 {
245 struct ionic *ionic = lif->ionic;
246 int index;
247
248 index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
249 if (index == ionic->nintrs) {
250 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
251 __func__, index, ionic->nintrs);
252 return -ENOSPC;
253 }
254
255 set_bit(index, ionic->intrs);
256 ionic_intr_init(&ionic->idev, intr, index);
257
258 return 0;
259 }
260
ionic_intr_free(struct ionic * ionic,int index)261 static void ionic_intr_free(struct ionic *ionic, int index)
262 {
263 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
264 clear_bit(index, ionic->intrs);
265 }
266
ionic_qcq_enable(struct ionic_qcq * qcq)267 static int ionic_qcq_enable(struct ionic_qcq *qcq)
268 {
269 struct ionic_queue *q = &qcq->q;
270 struct ionic_lif *lif = q->lif;
271 struct ionic_dev *idev;
272 struct device *dev;
273
274 struct ionic_admin_ctx ctx = {
275 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
276 .cmd.q_control = {
277 .opcode = IONIC_CMD_Q_CONTROL,
278 .lif_index = cpu_to_le16(lif->index),
279 .type = q->type,
280 .index = cpu_to_le32(q->index),
281 .oper = IONIC_Q_ENABLE,
282 },
283 };
284 int ret;
285
286 idev = &lif->ionic->idev;
287 dev = lif->ionic->dev;
288
289 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
290 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
291
292 if (qcq->flags & IONIC_QCQ_F_INTR)
293 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
294
295 ret = ionic_adminq_post_wait(lif, &ctx);
296 if (ret)
297 return ret;
298
299 if (qcq->napi.poll)
300 napi_enable(&qcq->napi);
301
302 if (qcq->flags & IONIC_QCQ_F_INTR) {
303 irq_set_affinity_hint(qcq->intr.vector,
304 &qcq->intr.affinity_mask);
305 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
306 IONIC_INTR_MASK_CLEAR);
307 }
308
309 return 0;
310 }
311
ionic_qcq_disable(struct ionic_lif * lif,struct ionic_qcq * qcq,int fw_err)312 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
313 {
314 struct ionic_queue *q;
315
316 struct ionic_admin_ctx ctx = {
317 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
318 .cmd.q_control = {
319 .opcode = IONIC_CMD_Q_CONTROL,
320 .oper = IONIC_Q_DISABLE,
321 },
322 };
323
324 if (!qcq) {
325 netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
326 return -ENXIO;
327 }
328
329 q = &qcq->q;
330
331 if (qcq->flags & IONIC_QCQ_F_INTR) {
332 struct ionic_dev *idev = &lif->ionic->idev;
333
334 cancel_work_sync(&qcq->dim.work);
335 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
336 IONIC_INTR_MASK_SET);
337 synchronize_irq(qcq->intr.vector);
338 irq_set_affinity_hint(qcq->intr.vector, NULL);
339 napi_disable(&qcq->napi);
340 del_timer_sync(&qcq->napi_deadline);
341 }
342
343 /* If there was a previous fw communcation error, don't bother with
344 * sending the adminq command and just return the same error value.
345 */
346 if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
347 return fw_err;
348
349 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
350 ctx.cmd.q_control.type = q->type;
351 ctx.cmd.q_control.index = cpu_to_le32(q->index);
352 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
353 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
354
355 return ionic_adminq_post_wait(lif, &ctx);
356 }
357
ionic_lif_qcq_deinit(struct ionic_lif * lif,struct ionic_qcq * qcq)358 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
359 {
360 struct ionic_dev *idev = &lif->ionic->idev;
361
362 if (!qcq)
363 return;
364
365 if (!(qcq->flags & IONIC_QCQ_F_INITED))
366 return;
367
368 if (qcq->flags & IONIC_QCQ_F_INTR) {
369 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
370 IONIC_INTR_MASK_SET);
371 netif_napi_del(&qcq->napi);
372 }
373
374 qcq->flags &= ~IONIC_QCQ_F_INITED;
375 }
376
ionic_qcq_intr_free(struct ionic_lif * lif,struct ionic_qcq * qcq)377 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
378 {
379 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
380 return;
381
382 irq_set_affinity_hint(qcq->intr.vector, NULL);
383 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
384 qcq->intr.vector = 0;
385 ionic_intr_free(lif->ionic, qcq->intr.index);
386 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
387 }
388
ionic_qcq_free(struct ionic_lif * lif,struct ionic_qcq * qcq)389 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
390 {
391 struct device *dev = lif->ionic->dev;
392
393 if (!qcq)
394 return;
395
396 ionic_debugfs_del_qcq(qcq);
397
398 if (qcq->q_base) {
399 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
400 qcq->q_base = NULL;
401 qcq->q_base_pa = 0;
402 }
403
404 if (qcq->cmb_q_base) {
405 iounmap(qcq->cmb_q_base);
406 ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order);
407 qcq->cmb_pgid = 0;
408 qcq->cmb_order = 0;
409 qcq->cmb_q_base = NULL;
410 qcq->cmb_q_base_pa = 0;
411 }
412
413 if (qcq->cq_base) {
414 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
415 qcq->cq_base = NULL;
416 qcq->cq_base_pa = 0;
417 }
418
419 if (qcq->sg_base) {
420 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
421 qcq->sg_base = NULL;
422 qcq->sg_base_pa = 0;
423 }
424
425 ionic_qcq_intr_free(lif, qcq);
426
427 vfree(qcq->cq.info);
428 qcq->cq.info = NULL;
429 vfree(qcq->q.info);
430 qcq->q.info = NULL;
431 }
432
ionic_qcqs_free(struct ionic_lif * lif)433 void ionic_qcqs_free(struct ionic_lif *lif)
434 {
435 struct device *dev = lif->ionic->dev;
436 struct ionic_qcq *adminqcq;
437 unsigned long irqflags;
438
439 if (lif->notifyqcq) {
440 ionic_qcq_free(lif, lif->notifyqcq);
441 devm_kfree(dev, lif->notifyqcq);
442 lif->notifyqcq = NULL;
443 }
444
445 if (lif->adminqcq) {
446 spin_lock_irqsave(&lif->adminq_lock, irqflags);
447 adminqcq = READ_ONCE(lif->adminqcq);
448 lif->adminqcq = NULL;
449 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
450 if (adminqcq) {
451 ionic_qcq_free(lif, adminqcq);
452 devm_kfree(dev, adminqcq);
453 }
454 }
455
456 if (lif->rxqcqs) {
457 devm_kfree(dev, lif->rxqstats);
458 lif->rxqstats = NULL;
459 devm_kfree(dev, lif->rxqcqs);
460 lif->rxqcqs = NULL;
461 }
462
463 if (lif->txqcqs) {
464 devm_kfree(dev, lif->txqstats);
465 lif->txqstats = NULL;
466 devm_kfree(dev, lif->txqcqs);
467 lif->txqcqs = NULL;
468 }
469 }
470
ionic_link_qcq_interrupts(struct ionic_qcq * src_qcq,struct ionic_qcq * n_qcq)471 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
472 struct ionic_qcq *n_qcq)
473 {
474 n_qcq->intr.vector = src_qcq->intr.vector;
475 n_qcq->intr.index = src_qcq->intr.index;
476 n_qcq->napi_qcq = src_qcq->napi_qcq;
477 }
478
ionic_alloc_qcq_interrupt(struct ionic_lif * lif,struct ionic_qcq * qcq)479 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
480 {
481 int err;
482
483 if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
484 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
485 return 0;
486 }
487
488 err = ionic_intr_alloc(lif, &qcq->intr);
489 if (err) {
490 netdev_warn(lif->netdev, "no intr for %s: %d\n",
491 qcq->q.name, err);
492 goto err_out;
493 }
494
495 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
496 if (err < 0) {
497 netdev_warn(lif->netdev, "no vector for %s: %d\n",
498 qcq->q.name, err);
499 goto err_out_free_intr;
500 }
501 qcq->intr.vector = err;
502 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
503 IONIC_INTR_MASK_SET);
504
505 err = ionic_request_irq(lif, qcq);
506 if (err) {
507 netdev_warn(lif->netdev, "irq request failed %d\n", err);
508 goto err_out_free_intr;
509 }
510
511 /* try to get the irq on the local numa node first */
512 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
513 dev_to_node(lif->ionic->dev));
514 if (qcq->intr.cpu != -1)
515 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
516
517 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
518 return 0;
519
520 err_out_free_intr:
521 ionic_intr_free(lif->ionic, qcq->intr.index);
522 err_out:
523 return err;
524 }
525
ionic_qcq_alloc(struct ionic_lif * lif,unsigned int type,unsigned int index,const char * name,unsigned int flags,unsigned int num_descs,unsigned int desc_size,unsigned int cq_desc_size,unsigned int sg_desc_size,unsigned int pid,struct ionic_qcq ** qcq)526 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
527 unsigned int index,
528 const char *name, unsigned int flags,
529 unsigned int num_descs, unsigned int desc_size,
530 unsigned int cq_desc_size,
531 unsigned int sg_desc_size,
532 unsigned int pid, struct ionic_qcq **qcq)
533 {
534 struct ionic_dev *idev = &lif->ionic->idev;
535 struct device *dev = lif->ionic->dev;
536 void *q_base, *cq_base, *sg_base;
537 dma_addr_t cq_base_pa = 0;
538 dma_addr_t sg_base_pa = 0;
539 dma_addr_t q_base_pa = 0;
540 struct ionic_qcq *new;
541 int err;
542
543 *qcq = NULL;
544
545 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
546 if (!new) {
547 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
548 err = -ENOMEM;
549 goto err_out;
550 }
551
552 new->q.dev = dev;
553 new->flags = flags;
554
555 new->q.info = vcalloc(num_descs, sizeof(*new->q.info));
556 if (!new->q.info) {
557 netdev_err(lif->netdev, "Cannot allocate queue info\n");
558 err = -ENOMEM;
559 goto err_out_free_qcq;
560 }
561
562 new->q.type = type;
563 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
564
565 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
566 desc_size, sg_desc_size, pid);
567 if (err) {
568 netdev_err(lif->netdev, "Cannot initialize queue\n");
569 goto err_out_free_q_info;
570 }
571
572 err = ionic_alloc_qcq_interrupt(lif, new);
573 if (err)
574 goto err_out;
575
576 new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info));
577 if (!new->cq.info) {
578 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
579 err = -ENOMEM;
580 goto err_out_free_irq;
581 }
582
583 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
584 if (err) {
585 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
586 goto err_out_free_cq_info;
587 }
588
589 if (flags & IONIC_QCQ_F_NOTIFYQ) {
590 int q_size;
591
592 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
593 * and don't alloc qc. We leave new->qc_size and new->qc_base
594 * as 0 to be sure we don't try to free it later.
595 */
596 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
597 new->q_size = PAGE_SIZE + q_size +
598 ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
599 new->q_base = dma_alloc_coherent(dev, new->q_size,
600 &new->q_base_pa, GFP_KERNEL);
601 if (!new->q_base) {
602 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
603 err = -ENOMEM;
604 goto err_out_free_cq_info;
605 }
606 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
607 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
608 ionic_q_map(&new->q, q_base, q_base_pa);
609
610 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
611 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
612 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
613 ionic_cq_bind(&new->cq, &new->q);
614 } else {
615 /* regular DMA q descriptors */
616 new->q_size = PAGE_SIZE + (num_descs * desc_size);
617 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
618 GFP_KERNEL);
619 if (!new->q_base) {
620 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
621 err = -ENOMEM;
622 goto err_out_free_cq_info;
623 }
624 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
625 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
626 ionic_q_map(&new->q, q_base, q_base_pa);
627
628 if (flags & IONIC_QCQ_F_CMB_RINGS) {
629 /* on-chip CMB q descriptors */
630 new->cmb_q_size = num_descs * desc_size;
631 new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
632
633 err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
634 new->cmb_order);
635 if (err) {
636 netdev_err(lif->netdev,
637 "Cannot allocate queue order %d from cmb: err %d\n",
638 new->cmb_order, err);
639 goto err_out_free_q;
640 }
641
642 new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size);
643 if (!new->cmb_q_base) {
644 netdev_err(lif->netdev, "Cannot map queue from cmb\n");
645 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
646 err = -ENOMEM;
647 goto err_out_free_q;
648 }
649
650 new->cmb_q_base_pa -= idev->phy_cmb_pages;
651 ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
652 }
653
654 /* cq DMA descriptors */
655 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
656 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
657 GFP_KERNEL);
658 if (!new->cq_base) {
659 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
660 err = -ENOMEM;
661 goto err_out_free_q;
662 }
663 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
664 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
665 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
666 ionic_cq_bind(&new->cq, &new->q);
667 }
668
669 if (flags & IONIC_QCQ_F_SG) {
670 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
671 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
672 GFP_KERNEL);
673 if (!new->sg_base) {
674 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
675 err = -ENOMEM;
676 goto err_out_free_cq;
677 }
678 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
679 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
680 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
681 }
682
683 INIT_WORK(&new->dim.work, ionic_dim_work);
684 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
685
686 *qcq = new;
687
688 return 0;
689
690 err_out_free_cq:
691 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
692 err_out_free_q:
693 if (new->cmb_q_base) {
694 iounmap(new->cmb_q_base);
695 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
696 }
697 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
698 err_out_free_cq_info:
699 vfree(new->cq.info);
700 err_out_free_irq:
701 if (flags & IONIC_QCQ_F_INTR) {
702 devm_free_irq(dev, new->intr.vector, &new->napi);
703 ionic_intr_free(lif->ionic, new->intr.index);
704 }
705 err_out_free_q_info:
706 vfree(new->q.info);
707 err_out_free_qcq:
708 devm_kfree(dev, new);
709 err_out:
710 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
711 return err;
712 }
713
ionic_qcqs_alloc(struct ionic_lif * lif)714 static int ionic_qcqs_alloc(struct ionic_lif *lif)
715 {
716 struct device *dev = lif->ionic->dev;
717 unsigned int flags;
718 int err;
719
720 flags = IONIC_QCQ_F_INTR;
721 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
722 IONIC_ADMINQ_LENGTH,
723 sizeof(struct ionic_admin_cmd),
724 sizeof(struct ionic_admin_comp),
725 0, lif->kern_pid, &lif->adminqcq);
726 if (err)
727 return err;
728 ionic_debugfs_add_qcq(lif, lif->adminqcq);
729
730 if (lif->ionic->nnqs_per_lif) {
731 flags = IONIC_QCQ_F_NOTIFYQ;
732 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
733 flags, IONIC_NOTIFYQ_LENGTH,
734 sizeof(struct ionic_notifyq_cmd),
735 sizeof(union ionic_notifyq_comp),
736 0, lif->kern_pid, &lif->notifyqcq);
737 if (err)
738 goto err_out;
739 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
740
741 /* Let the notifyq ride on the adminq interrupt */
742 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
743 }
744
745 err = -ENOMEM;
746 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
747 sizeof(*lif->txqcqs), GFP_KERNEL);
748 if (!lif->txqcqs)
749 goto err_out;
750 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
751 sizeof(*lif->rxqcqs), GFP_KERNEL);
752 if (!lif->rxqcqs)
753 goto err_out;
754
755 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1,
756 sizeof(*lif->txqstats), GFP_KERNEL);
757 if (!lif->txqstats)
758 goto err_out;
759 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1,
760 sizeof(*lif->rxqstats), GFP_KERNEL);
761 if (!lif->rxqstats)
762 goto err_out;
763
764 return 0;
765
766 err_out:
767 ionic_qcqs_free(lif);
768 return err;
769 }
770
ionic_qcq_sanitize(struct ionic_qcq * qcq)771 static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
772 {
773 qcq->q.tail_idx = 0;
774 qcq->q.head_idx = 0;
775 qcq->cq.tail_idx = 0;
776 qcq->cq.done_color = 1;
777 memset(qcq->q_base, 0, qcq->q_size);
778 if (qcq->cmb_q_base)
779 memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size);
780 memset(qcq->cq_base, 0, qcq->cq_size);
781 memset(qcq->sg_base, 0, qcq->sg_size);
782 }
783
ionic_lif_txq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)784 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
785 {
786 struct device *dev = lif->ionic->dev;
787 struct ionic_queue *q = &qcq->q;
788 struct ionic_cq *cq = &qcq->cq;
789 struct ionic_admin_ctx ctx = {
790 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
791 .cmd.q_init = {
792 .opcode = IONIC_CMD_Q_INIT,
793 .lif_index = cpu_to_le16(lif->index),
794 .type = q->type,
795 .ver = lif->qtype_info[q->type].version,
796 .index = cpu_to_le32(q->index),
797 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
798 IONIC_QINIT_F_SG),
799 .intr_index = cpu_to_le16(qcq->intr.index),
800 .pid = cpu_to_le16(q->pid),
801 .ring_size = ilog2(q->num_descs),
802 .ring_base = cpu_to_le64(q->base_pa),
803 .cq_ring_base = cpu_to_le64(cq->base_pa),
804 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
805 .features = cpu_to_le64(q->features),
806 },
807 };
808 int err;
809
810 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
811 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
812 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
813 }
814
815 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
816 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
817 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
818 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
819 dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base);
820 dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base);
821 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
822 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
823 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
824
825 ionic_qcq_sanitize(qcq);
826
827 err = ionic_adminq_post_wait(lif, &ctx);
828 if (err)
829 return err;
830
831 q->hw_type = ctx.comp.q_init.hw_type;
832 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
833 q->dbval = IONIC_DBELL_QID(q->hw_index);
834
835 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
836 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
837
838 q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
839 q->dbell_jiffies = jiffies;
840
841 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
842 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
843 qcq->napi_qcq = qcq;
844 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
845 }
846
847 qcq->flags |= IONIC_QCQ_F_INITED;
848
849 return 0;
850 }
851
ionic_lif_rxq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)852 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
853 {
854 struct device *dev = lif->ionic->dev;
855 struct ionic_queue *q = &qcq->q;
856 struct ionic_cq *cq = &qcq->cq;
857 struct ionic_admin_ctx ctx = {
858 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
859 .cmd.q_init = {
860 .opcode = IONIC_CMD_Q_INIT,
861 .lif_index = cpu_to_le16(lif->index),
862 .type = q->type,
863 .ver = lif->qtype_info[q->type].version,
864 .index = cpu_to_le32(q->index),
865 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
866 IONIC_QINIT_F_SG),
867 .intr_index = cpu_to_le16(cq->bound_intr->index),
868 .pid = cpu_to_le16(q->pid),
869 .ring_size = ilog2(q->num_descs),
870 .ring_base = cpu_to_le64(q->base_pa),
871 .cq_ring_base = cpu_to_le64(cq->base_pa),
872 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
873 .features = cpu_to_le64(q->features),
874 },
875 };
876 int err;
877
878 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
879 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
880 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
881 }
882
883 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
884 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
885 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
886 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
887 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
888 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
889 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
890
891 ionic_qcq_sanitize(qcq);
892
893 err = ionic_adminq_post_wait(lif, &ctx);
894 if (err)
895 return err;
896
897 q->hw_type = ctx.comp.q_init.hw_type;
898 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
899 q->dbval = IONIC_DBELL_QID(q->hw_index);
900
901 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
902 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
903
904 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
905 q->dbell_jiffies = jiffies;
906
907 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
908 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
909 else
910 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
911
912 qcq->napi_qcq = qcq;
913 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
914
915 qcq->flags |= IONIC_QCQ_F_INITED;
916
917 return 0;
918 }
919
ionic_lif_create_hwstamp_txq(struct ionic_lif * lif)920 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
921 {
922 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
923 unsigned int txq_i, flags;
924 struct ionic_qcq *txq;
925 u64 features;
926 int err;
927
928 if (lif->hwstamp_txq)
929 return 0;
930
931 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP;
932
933 num_desc = IONIC_MIN_TXRX_DESC;
934 desc_sz = sizeof(struct ionic_txq_desc);
935 comp_sz = 2 * sizeof(struct ionic_txq_comp);
936
937 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
938 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1))
939 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
940 else
941 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
942
943 txq_i = lif->ionic->ntxqs_per_lif;
944 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
945
946 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
947 num_desc, desc_sz, comp_sz, sg_desc_sz,
948 lif->kern_pid, &txq);
949 if (err)
950 goto err_qcq_alloc;
951
952 txq->q.features = features;
953
954 ionic_link_qcq_interrupts(lif->adminqcq, txq);
955 ionic_debugfs_add_qcq(lif, txq);
956
957 lif->hwstamp_txq = txq;
958
959 if (netif_running(lif->netdev)) {
960 err = ionic_lif_txq_init(lif, txq);
961 if (err)
962 goto err_qcq_init;
963
964 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
965 err = ionic_qcq_enable(txq);
966 if (err)
967 goto err_qcq_enable;
968 }
969 }
970
971 return 0;
972
973 err_qcq_enable:
974 ionic_lif_qcq_deinit(lif, txq);
975 err_qcq_init:
976 lif->hwstamp_txq = NULL;
977 ionic_debugfs_del_qcq(txq);
978 ionic_qcq_free(lif, txq);
979 devm_kfree(lif->ionic->dev, txq);
980 err_qcq_alloc:
981 return err;
982 }
983
ionic_lif_create_hwstamp_rxq(struct ionic_lif * lif)984 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
985 {
986 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
987 unsigned int rxq_i, flags;
988 struct ionic_qcq *rxq;
989 u64 features;
990 int err;
991
992 if (lif->hwstamp_rxq)
993 return 0;
994
995 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
996
997 num_desc = IONIC_MIN_TXRX_DESC;
998 desc_sz = sizeof(struct ionic_rxq_desc);
999 comp_sz = 2 * sizeof(struct ionic_rxq_comp);
1000 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
1001
1002 rxq_i = lif->ionic->nrxqs_per_lif;
1003 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
1004
1005 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
1006 num_desc, desc_sz, comp_sz, sg_desc_sz,
1007 lif->kern_pid, &rxq);
1008 if (err)
1009 goto err_qcq_alloc;
1010
1011 rxq->q.features = features;
1012
1013 ionic_link_qcq_interrupts(lif->adminqcq, rxq);
1014 ionic_debugfs_add_qcq(lif, rxq);
1015
1016 lif->hwstamp_rxq = rxq;
1017
1018 if (netif_running(lif->netdev)) {
1019 err = ionic_lif_rxq_init(lif, rxq);
1020 if (err)
1021 goto err_qcq_init;
1022
1023 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
1024 ionic_rx_fill(&rxq->q);
1025 err = ionic_qcq_enable(rxq);
1026 if (err)
1027 goto err_qcq_enable;
1028 }
1029 }
1030
1031 return 0;
1032
1033 err_qcq_enable:
1034 ionic_lif_qcq_deinit(lif, rxq);
1035 err_qcq_init:
1036 lif->hwstamp_rxq = NULL;
1037 ionic_debugfs_del_qcq(rxq);
1038 ionic_qcq_free(lif, rxq);
1039 devm_kfree(lif->ionic->dev, rxq);
1040 err_qcq_alloc:
1041 return err;
1042 }
1043
ionic_lif_config_hwstamp_rxq_all(struct ionic_lif * lif,bool rx_all)1044 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all)
1045 {
1046 struct ionic_queue_params qparam;
1047
1048 ionic_init_queue_params(lif, &qparam);
1049
1050 if (rx_all)
1051 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
1052 else
1053 qparam.rxq_features = 0;
1054
1055 /* if we're not running, just set the values and return */
1056 if (!netif_running(lif->netdev)) {
1057 lif->rxq_features = qparam.rxq_features;
1058 return 0;
1059 }
1060
1061 return ionic_reconfigure_queues(lif, &qparam);
1062 }
1063
ionic_lif_set_hwstamp_txmode(struct ionic_lif * lif,u16 txstamp_mode)1064 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode)
1065 {
1066 struct ionic_admin_ctx ctx = {
1067 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1068 .cmd.lif_setattr = {
1069 .opcode = IONIC_CMD_LIF_SETATTR,
1070 .index = cpu_to_le16(lif->index),
1071 .attr = IONIC_LIF_ATTR_TXSTAMP,
1072 .txstamp_mode = cpu_to_le16(txstamp_mode),
1073 },
1074 };
1075
1076 return ionic_adminq_post_wait(lif, &ctx);
1077 }
1078
ionic_lif_del_hwstamp_rxfilt(struct ionic_lif * lif)1079 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif)
1080 {
1081 struct ionic_admin_ctx ctx = {
1082 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1083 .cmd.rx_filter_del = {
1084 .opcode = IONIC_CMD_RX_FILTER_DEL,
1085 .lif_index = cpu_to_le16(lif->index),
1086 },
1087 };
1088 struct ionic_rx_filter *f;
1089 u32 filter_id;
1090 int err;
1091
1092 spin_lock_bh(&lif->rx_filters.lock);
1093
1094 f = ionic_rx_filter_rxsteer(lif);
1095 if (!f) {
1096 spin_unlock_bh(&lif->rx_filters.lock);
1097 return;
1098 }
1099
1100 filter_id = f->filter_id;
1101 ionic_rx_filter_free(lif, f);
1102
1103 spin_unlock_bh(&lif->rx_filters.lock);
1104
1105 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id);
1106
1107 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id);
1108
1109 err = ionic_adminq_post_wait(lif, &ctx);
1110 if (err && err != -EEXIST)
1111 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id);
1112 }
1113
ionic_lif_add_hwstamp_rxfilt(struct ionic_lif * lif,u64 pkt_class)1114 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1115 {
1116 struct ionic_admin_ctx ctx = {
1117 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1118 .cmd.rx_filter_add = {
1119 .opcode = IONIC_CMD_RX_FILTER_ADD,
1120 .lif_index = cpu_to_le16(lif->index),
1121 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS),
1122 .pkt_class = cpu_to_le64(pkt_class),
1123 },
1124 };
1125 u8 qtype;
1126 u32 qid;
1127 int err;
1128
1129 if (!lif->hwstamp_rxq)
1130 return -EINVAL;
1131
1132 qtype = lif->hwstamp_rxq->q.type;
1133 ctx.cmd.rx_filter_add.qtype = qtype;
1134
1135 qid = lif->hwstamp_rxq->q.index;
1136 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
1137
1138 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n");
1139 err = ionic_adminq_post_wait(lif, &ctx);
1140 if (err && err != -EEXIST)
1141 return err;
1142
1143 spin_lock_bh(&lif->rx_filters.lock);
1144 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED);
1145 spin_unlock_bh(&lif->rx_filters.lock);
1146
1147 return err;
1148 }
1149
ionic_lif_set_hwstamp_rxfilt(struct ionic_lif * lif,u64 pkt_class)1150 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1151 {
1152 ionic_lif_del_hwstamp_rxfilt(lif);
1153
1154 if (!pkt_class)
1155 return 0;
1156
1157 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
1158 }
1159
ionic_notifyq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)1160 static bool ionic_notifyq_service(struct ionic_cq *cq,
1161 struct ionic_cq_info *cq_info)
1162 {
1163 union ionic_notifyq_comp *comp = cq_info->cq_desc;
1164 struct ionic_deferred_work *work;
1165 struct net_device *netdev;
1166 struct ionic_queue *q;
1167 struct ionic_lif *lif;
1168 u64 eid;
1169
1170 q = cq->bound_q;
1171 lif = q->info[0].cb_arg;
1172 netdev = lif->netdev;
1173 eid = le64_to_cpu(comp->event.eid);
1174
1175 /* Have we run out of new completions to process? */
1176 if ((s64)(eid - lif->last_eid) <= 0)
1177 return false;
1178
1179 lif->last_eid = eid;
1180
1181 dev_dbg(lif->ionic->dev, "notifyq event:\n");
1182 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
1183 comp, sizeof(*comp), true);
1184
1185 switch (le16_to_cpu(comp->event.ecode)) {
1186 case IONIC_EVENT_LINK_CHANGE:
1187 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1188 break;
1189 case IONIC_EVENT_RESET:
1190 if (lif->ionic->idev.fw_status_ready &&
1191 !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
1192 !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
1193 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1194 if (!work) {
1195 netdev_err(lif->netdev, "Reset event dropped\n");
1196 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
1197 } else {
1198 work->type = IONIC_DW_TYPE_LIF_RESET;
1199 ionic_lif_deferred_enqueue(&lif->deferred, work);
1200 }
1201 }
1202 break;
1203 default:
1204 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
1205 comp->event.ecode, eid);
1206 break;
1207 }
1208
1209 return true;
1210 }
1211
ionic_adminq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)1212 static bool ionic_adminq_service(struct ionic_cq *cq,
1213 struct ionic_cq_info *cq_info)
1214 {
1215 struct ionic_admin_comp *comp = cq_info->cq_desc;
1216
1217 if (!color_match(comp->color, cq->done_color))
1218 return false;
1219
1220 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
1221
1222 return true;
1223 }
1224
ionic_adminq_napi(struct napi_struct * napi,int budget)1225 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
1226 {
1227 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
1228 struct ionic_lif *lif = napi_to_cq(napi)->lif;
1229 struct ionic_dev *idev = &lif->ionic->idev;
1230 unsigned long irqflags;
1231 unsigned int flags = 0;
1232 bool resched = false;
1233 int rx_work = 0;
1234 int tx_work = 0;
1235 int n_work = 0;
1236 int a_work = 0;
1237 int work_done;
1238 int credits;
1239
1240 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
1241 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
1242 ionic_notifyq_service, NULL, NULL);
1243
1244 spin_lock_irqsave(&lif->adminq_lock, irqflags);
1245 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
1246 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
1247 ionic_adminq_service, NULL, NULL);
1248 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
1249
1250 if (lif->hwstamp_rxq)
1251 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
1252 ionic_rx_service, NULL, NULL);
1253
1254 if (lif->hwstamp_txq)
1255 tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
1256 ionic_tx_service, NULL, NULL);
1257
1258 work_done = max(max(n_work, a_work), max(rx_work, tx_work));
1259 if (work_done < budget && napi_complete_done(napi, work_done)) {
1260 flags |= IONIC_INTR_CRED_UNMASK;
1261 intr->rearm_count++;
1262 }
1263
1264 if (work_done || flags) {
1265 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1266 credits = n_work + a_work + rx_work + tx_work;
1267 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
1268 }
1269
1270 if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
1271 resched = true;
1272 if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
1273 resched = true;
1274 if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
1275 resched = true;
1276 if (resched)
1277 mod_timer(&lif->adminqcq->napi_deadline,
1278 jiffies + IONIC_NAPI_DEADLINE);
1279
1280 return work_done;
1281 }
1282
ionic_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * ns)1283 void ionic_get_stats64(struct net_device *netdev,
1284 struct rtnl_link_stats64 *ns)
1285 {
1286 struct ionic_lif *lif = netdev_priv(netdev);
1287 struct ionic_lif_stats *ls;
1288
1289 memset(ns, 0, sizeof(*ns));
1290 ls = &lif->info->stats;
1291
1292 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
1293 le64_to_cpu(ls->rx_mcast_packets) +
1294 le64_to_cpu(ls->rx_bcast_packets);
1295
1296 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
1297 le64_to_cpu(ls->tx_mcast_packets) +
1298 le64_to_cpu(ls->tx_bcast_packets);
1299
1300 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
1301 le64_to_cpu(ls->rx_mcast_bytes) +
1302 le64_to_cpu(ls->rx_bcast_bytes);
1303
1304 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
1305 le64_to_cpu(ls->tx_mcast_bytes) +
1306 le64_to_cpu(ls->tx_bcast_bytes);
1307
1308 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
1309 le64_to_cpu(ls->rx_mcast_drop_packets) +
1310 le64_to_cpu(ls->rx_bcast_drop_packets);
1311
1312 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
1313 le64_to_cpu(ls->tx_mcast_drop_packets) +
1314 le64_to_cpu(ls->tx_bcast_drop_packets);
1315
1316 ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
1317
1318 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
1319
1320 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
1321 le64_to_cpu(ls->rx_queue_disabled) +
1322 le64_to_cpu(ls->rx_desc_fetch_error) +
1323 le64_to_cpu(ls->rx_desc_data_error);
1324
1325 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
1326 le64_to_cpu(ls->tx_queue_disabled) +
1327 le64_to_cpu(ls->tx_desc_fetch_error) +
1328 le64_to_cpu(ls->tx_desc_data_error);
1329
1330 ns->rx_errors = ns->rx_over_errors +
1331 ns->rx_missed_errors;
1332
1333 ns->tx_errors = ns->tx_aborted_errors;
1334 }
1335
ionic_addr_add(struct net_device * netdev,const u8 * addr)1336 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1337 {
1338 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
1339 }
1340
ionic_addr_del(struct net_device * netdev,const u8 * addr)1341 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1342 {
1343 /* Don't delete our own address from the uc list */
1344 if (ether_addr_equal(addr, netdev->dev_addr))
1345 return 0;
1346
1347 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
1348 }
1349
ionic_lif_rx_mode(struct ionic_lif * lif)1350 void ionic_lif_rx_mode(struct ionic_lif *lif)
1351 {
1352 struct net_device *netdev = lif->netdev;
1353 unsigned int nfilters;
1354 unsigned int nd_flags;
1355 char buf[128];
1356 u16 rx_mode;
1357 int i;
1358 #define REMAIN(__x) (sizeof(buf) - (__x))
1359
1360 mutex_lock(&lif->config_lock);
1361
1362 /* grab the flags once for local use */
1363 nd_flags = netdev->flags;
1364
1365 rx_mode = IONIC_RX_MODE_F_UNICAST;
1366 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1367 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1368 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1369 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1370
1371 /* sync the filters */
1372 ionic_rx_filter_sync(lif);
1373
1374 /* check for overflow state
1375 * if so, we track that we overflowed and enable NIC PROMISC
1376 * else if the overflow is set and not needed
1377 * we remove our overflow flag and check the netdev flags
1378 * to see if we can disable NIC PROMISC
1379 */
1380 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1381
1382 if (((lif->nucast + lif->nmcast) >= nfilters) ||
1383 (lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
1384 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1385 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1386 } else {
1387 if (!(nd_flags & IFF_PROMISC))
1388 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1389 if (!(nd_flags & IFF_ALLMULTI))
1390 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1391 }
1392
1393 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1394 lif->rx_mode, rx_mode);
1395 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1396 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1397 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1398 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1399 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1400 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1401 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1402 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1403 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1404 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1405 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
1406 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
1407 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
1408
1409 if (lif->rx_mode != rx_mode) {
1410 struct ionic_admin_ctx ctx = {
1411 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1412 .cmd.rx_mode_set = {
1413 .opcode = IONIC_CMD_RX_MODE_SET,
1414 .lif_index = cpu_to_le16(lif->index),
1415 },
1416 };
1417 int err;
1418
1419 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
1420 err = ionic_adminq_post_wait(lif, &ctx);
1421 if (err)
1422 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
1423 rx_mode, err);
1424 else
1425 lif->rx_mode = rx_mode;
1426 }
1427
1428 mutex_unlock(&lif->config_lock);
1429 }
1430
ionic_ndo_set_rx_mode(struct net_device * netdev)1431 static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1432 {
1433 struct ionic_lif *lif = netdev_priv(netdev);
1434 struct ionic_deferred_work *work;
1435
1436 /* Sync the kernel filter list with the driver filter list */
1437 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1438 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1439
1440 /* Shove off the rest of the rxmode work to the work task
1441 * which will include syncing the filters to the firmware.
1442 */
1443 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1444 if (!work) {
1445 netdev_err(lif->netdev, "rxmode change dropped\n");
1446 return;
1447 }
1448 work->type = IONIC_DW_TYPE_RX_MODE;
1449 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1450 ionic_lif_deferred_enqueue(&lif->deferred, work);
1451 }
1452
ionic_netdev_features_to_nic(netdev_features_t features)1453 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1454 {
1455 u64 wanted = 0;
1456
1457 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1458 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1459 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1460 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1461 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1462 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1463 if (features & NETIF_F_RXHASH)
1464 wanted |= IONIC_ETH_HW_RX_HASH;
1465 if (features & NETIF_F_RXCSUM)
1466 wanted |= IONIC_ETH_HW_RX_CSUM;
1467 if (features & NETIF_F_SG)
1468 wanted |= IONIC_ETH_HW_TX_SG;
1469 if (features & NETIF_F_HW_CSUM)
1470 wanted |= IONIC_ETH_HW_TX_CSUM;
1471 if (features & NETIF_F_TSO)
1472 wanted |= IONIC_ETH_HW_TSO;
1473 if (features & NETIF_F_TSO6)
1474 wanted |= IONIC_ETH_HW_TSO_IPV6;
1475 if (features & NETIF_F_TSO_ECN)
1476 wanted |= IONIC_ETH_HW_TSO_ECN;
1477 if (features & NETIF_F_GSO_GRE)
1478 wanted |= IONIC_ETH_HW_TSO_GRE;
1479 if (features & NETIF_F_GSO_GRE_CSUM)
1480 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1481 if (features & NETIF_F_GSO_IPXIP4)
1482 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1483 if (features & NETIF_F_GSO_IPXIP6)
1484 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1485 if (features & NETIF_F_GSO_UDP_TUNNEL)
1486 wanted |= IONIC_ETH_HW_TSO_UDP;
1487 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1488 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1489
1490 return cpu_to_le64(wanted);
1491 }
1492
ionic_set_nic_features(struct ionic_lif * lif,netdev_features_t features)1493 static int ionic_set_nic_features(struct ionic_lif *lif,
1494 netdev_features_t features)
1495 {
1496 struct device *dev = lif->ionic->dev;
1497 struct ionic_admin_ctx ctx = {
1498 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1499 .cmd.lif_setattr = {
1500 .opcode = IONIC_CMD_LIF_SETATTR,
1501 .index = cpu_to_le16(lif->index),
1502 .attr = IONIC_LIF_ATTR_FEATURES,
1503 },
1504 };
1505 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1506 IONIC_ETH_HW_VLAN_RX_STRIP |
1507 IONIC_ETH_HW_VLAN_RX_FILTER;
1508 u64 old_hw_features;
1509 int err;
1510
1511 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1512
1513 if (lif->phc)
1514 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP);
1515
1516 err = ionic_adminq_post_wait(lif, &ctx);
1517 if (err)
1518 return err;
1519
1520 old_hw_features = lif->hw_features;
1521 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1522 ctx.comp.lif_setattr.features);
1523
1524 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1525 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1526
1527 if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
1528 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1529 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1530
1531 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1532 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1533 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1534 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1535 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1536 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1537 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1538 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1539 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1540 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1541 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1542 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1543 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1544 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1545 if (lif->hw_features & IONIC_ETH_HW_TSO)
1546 dev_dbg(dev, "feature ETH_HW_TSO\n");
1547 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1548 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1549 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1550 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1551 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1552 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1553 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1554 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1555 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1556 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1557 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1558 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1559 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1560 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1561 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1562 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1563 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP)
1564 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n");
1565
1566 return 0;
1567 }
1568
ionic_init_nic_features(struct ionic_lif * lif)1569 static int ionic_init_nic_features(struct ionic_lif *lif)
1570 {
1571 struct net_device *netdev = lif->netdev;
1572 netdev_features_t features;
1573 int err;
1574
1575 /* set up what we expect to support by default */
1576 features = NETIF_F_HW_VLAN_CTAG_TX |
1577 NETIF_F_HW_VLAN_CTAG_RX |
1578 NETIF_F_HW_VLAN_CTAG_FILTER |
1579 NETIF_F_SG |
1580 NETIF_F_HW_CSUM |
1581 NETIF_F_RXCSUM |
1582 NETIF_F_TSO |
1583 NETIF_F_TSO6 |
1584 NETIF_F_TSO_ECN |
1585 NETIF_F_GSO_GRE |
1586 NETIF_F_GSO_GRE_CSUM |
1587 NETIF_F_GSO_IPXIP4 |
1588 NETIF_F_GSO_IPXIP6 |
1589 NETIF_F_GSO_UDP_TUNNEL |
1590 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1591
1592 if (lif->nxqs > 1)
1593 features |= NETIF_F_RXHASH;
1594
1595 err = ionic_set_nic_features(lif, features);
1596 if (err)
1597 return err;
1598
1599 /* tell the netdev what we actually can support */
1600 netdev->features |= NETIF_F_HIGHDMA;
1601
1602 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1603 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1604 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1605 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1606 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1607 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1608 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1609 netdev->hw_features |= NETIF_F_RXHASH;
1610 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1611 netdev->hw_features |= NETIF_F_SG;
1612
1613 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1614 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1615 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1616 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1617 if (lif->hw_features & IONIC_ETH_HW_TSO)
1618 netdev->hw_enc_features |= NETIF_F_TSO;
1619 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1620 netdev->hw_enc_features |= NETIF_F_TSO6;
1621 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1622 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1623 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1624 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1625 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1626 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1627 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1628 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1629 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1630 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1631 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1632 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1633 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1634 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1635
1636 netdev->hw_features |= netdev->hw_enc_features;
1637 netdev->features |= netdev->hw_features;
1638 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1639
1640 netdev->priv_flags |= IFF_UNICAST_FLT |
1641 IFF_LIVE_ADDR_CHANGE;
1642
1643 return 0;
1644 }
1645
ionic_set_features(struct net_device * netdev,netdev_features_t features)1646 static int ionic_set_features(struct net_device *netdev,
1647 netdev_features_t features)
1648 {
1649 struct ionic_lif *lif = netdev_priv(netdev);
1650 int err;
1651
1652 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1653 __func__, (u64)lif->netdev->features, (u64)features);
1654
1655 err = ionic_set_nic_features(lif, features);
1656
1657 return err;
1658 }
1659
ionic_set_attr_mac(struct ionic_lif * lif,u8 * mac)1660 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
1661 {
1662 struct ionic_admin_ctx ctx = {
1663 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1664 .cmd.lif_setattr = {
1665 .opcode = IONIC_CMD_LIF_SETATTR,
1666 .index = cpu_to_le16(lif->index),
1667 .attr = IONIC_LIF_ATTR_MAC,
1668 },
1669 };
1670
1671 ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
1672 return ionic_adminq_post_wait(lif, &ctx);
1673 }
1674
ionic_get_attr_mac(struct ionic_lif * lif,u8 * mac_addr)1675 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
1676 {
1677 struct ionic_admin_ctx ctx = {
1678 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1679 .cmd.lif_getattr = {
1680 .opcode = IONIC_CMD_LIF_GETATTR,
1681 .index = cpu_to_le16(lif->index),
1682 .attr = IONIC_LIF_ATTR_MAC,
1683 },
1684 };
1685 int err;
1686
1687 err = ionic_adminq_post_wait(lif, &ctx);
1688 if (err)
1689 return err;
1690
1691 ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
1692 return 0;
1693 }
1694
ionic_program_mac(struct ionic_lif * lif,u8 * mac)1695 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
1696 {
1697 u8 get_mac[ETH_ALEN];
1698 int err;
1699
1700 err = ionic_set_attr_mac(lif, mac);
1701 if (err)
1702 return err;
1703
1704 err = ionic_get_attr_mac(lif, get_mac);
1705 if (err)
1706 return err;
1707
1708 /* To deal with older firmware that silently ignores the set attr mac:
1709 * doesn't actually change the mac and doesn't return an error, so we
1710 * do the get attr to verify whether or not the set actually happened
1711 */
1712 if (!ether_addr_equal(get_mac, mac))
1713 return 1;
1714
1715 return 0;
1716 }
1717
ionic_set_mac_address(struct net_device * netdev,void * sa)1718 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1719 {
1720 struct ionic_lif *lif = netdev_priv(netdev);
1721 struct sockaddr *addr = sa;
1722 u8 *mac;
1723 int err;
1724
1725 mac = (u8 *)addr->sa_data;
1726 if (ether_addr_equal(netdev->dev_addr, mac))
1727 return 0;
1728
1729 err = ionic_program_mac(lif, mac);
1730 if (err < 0)
1731 return err;
1732
1733 if (err > 0)
1734 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
1735 __func__);
1736
1737 err = eth_prepare_mac_addr_change(netdev, addr);
1738 if (err)
1739 return err;
1740
1741 if (!is_zero_ether_addr(netdev->dev_addr)) {
1742 netdev_info(netdev, "deleting mac addr %pM\n",
1743 netdev->dev_addr);
1744 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr);
1745 }
1746
1747 eth_commit_mac_addr_change(netdev, addr);
1748 netdev_info(netdev, "updating mac addr %pM\n", mac);
1749
1750 return ionic_lif_addr_add(netdev_priv(netdev), mac);
1751 }
1752
ionic_stop_queues_reconfig(struct ionic_lif * lif)1753 void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1754 {
1755 /* Stop and clean the queues before reconfiguration */
1756 netif_device_detach(lif->netdev);
1757 ionic_stop_queues(lif);
1758 ionic_txrx_deinit(lif);
1759 }
1760
ionic_start_queues_reconfig(struct ionic_lif * lif)1761 static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1762 {
1763 int err;
1764
1765 /* Re-init the queues after reconfiguration */
1766
1767 /* The only way txrx_init can fail here is if communication
1768 * with FW is suddenly broken. There's not much we can do
1769 * at this point - error messages have already been printed,
1770 * so we can continue on and the user can eventually do a
1771 * DOWN and UP to try to reset and clear the issue.
1772 */
1773 err = ionic_txrx_init(lif);
1774 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1775 netif_device_attach(lif->netdev);
1776
1777 return err;
1778 }
1779
ionic_change_mtu(struct net_device * netdev,int new_mtu)1780 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1781 {
1782 struct ionic_lif *lif = netdev_priv(netdev);
1783 struct ionic_admin_ctx ctx = {
1784 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1785 .cmd.lif_setattr = {
1786 .opcode = IONIC_CMD_LIF_SETATTR,
1787 .index = cpu_to_le16(lif->index),
1788 .attr = IONIC_LIF_ATTR_MTU,
1789 .mtu = cpu_to_le32(new_mtu),
1790 },
1791 };
1792 int err;
1793
1794 err = ionic_adminq_post_wait(lif, &ctx);
1795 if (err)
1796 return err;
1797
1798 /* if we're not running, nothing more to do */
1799 if (!netif_running(netdev)) {
1800 netdev->mtu = new_mtu;
1801 return 0;
1802 }
1803
1804 mutex_lock(&lif->queue_lock);
1805 ionic_stop_queues_reconfig(lif);
1806 netdev->mtu = new_mtu;
1807 err = ionic_start_queues_reconfig(lif);
1808 mutex_unlock(&lif->queue_lock);
1809
1810 return err;
1811 }
1812
ionic_tx_timeout_work(struct work_struct * ws)1813 static void ionic_tx_timeout_work(struct work_struct *ws)
1814 {
1815 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1816 int err;
1817
1818 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1819 return;
1820
1821 /* if we were stopped before this scheduled job was launched,
1822 * don't bother the queues as they are already stopped.
1823 */
1824 if (!netif_running(lif->netdev))
1825 return;
1826
1827 mutex_lock(&lif->queue_lock);
1828 ionic_stop_queues_reconfig(lif);
1829 err = ionic_start_queues_reconfig(lif);
1830 mutex_unlock(&lif->queue_lock);
1831
1832 if (err)
1833 dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__);
1834 }
1835
ionic_tx_timeout(struct net_device * netdev,unsigned int txqueue)1836 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1837 {
1838 struct ionic_lif *lif = netdev_priv(netdev);
1839
1840 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue);
1841 schedule_work(&lif->tx_timeout_work);
1842 }
1843
ionic_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1844 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1845 u16 vid)
1846 {
1847 struct ionic_lif *lif = netdev_priv(netdev);
1848 int err;
1849
1850 err = ionic_lif_vlan_add(lif, vid);
1851 if (err)
1852 return err;
1853
1854 ionic_lif_rx_mode(lif);
1855
1856 return 0;
1857 }
1858
ionic_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1859 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1860 u16 vid)
1861 {
1862 struct ionic_lif *lif = netdev_priv(netdev);
1863 int err;
1864
1865 err = ionic_lif_vlan_del(lif, vid);
1866 if (err)
1867 return err;
1868
1869 ionic_lif_rx_mode(lif);
1870
1871 return 0;
1872 }
1873
ionic_lif_rss_config(struct ionic_lif * lif,const u16 types,const u8 * key,const u32 * indir)1874 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1875 const u8 *key, const u32 *indir)
1876 {
1877 struct ionic_admin_ctx ctx = {
1878 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1879 .cmd.lif_setattr = {
1880 .opcode = IONIC_CMD_LIF_SETATTR,
1881 .attr = IONIC_LIF_ATTR_RSS,
1882 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1883 },
1884 };
1885 unsigned int i, tbl_sz;
1886
1887 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1888 lif->rss_types = types;
1889 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1890 }
1891
1892 if (key)
1893 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1894
1895 if (indir) {
1896 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1897 for (i = 0; i < tbl_sz; i++)
1898 lif->rss_ind_tbl[i] = indir[i];
1899 }
1900
1901 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1902 IONIC_RSS_HASH_KEY_SIZE);
1903
1904 return ionic_adminq_post_wait(lif, &ctx);
1905 }
1906
ionic_lif_rss_init(struct ionic_lif * lif)1907 static int ionic_lif_rss_init(struct ionic_lif *lif)
1908 {
1909 unsigned int tbl_sz;
1910 unsigned int i;
1911
1912 lif->rss_types = IONIC_RSS_TYPE_IPV4 |
1913 IONIC_RSS_TYPE_IPV4_TCP |
1914 IONIC_RSS_TYPE_IPV4_UDP |
1915 IONIC_RSS_TYPE_IPV6 |
1916 IONIC_RSS_TYPE_IPV6_TCP |
1917 IONIC_RSS_TYPE_IPV6_UDP;
1918
1919 /* Fill indirection table with 'default' values */
1920 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1921 for (i = 0; i < tbl_sz; i++)
1922 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1923
1924 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1925 }
1926
ionic_lif_rss_deinit(struct ionic_lif * lif)1927 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1928 {
1929 int tbl_sz;
1930
1931 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1932 memset(lif->rss_ind_tbl, 0, tbl_sz);
1933 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1934
1935 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1936 }
1937
ionic_lif_quiesce(struct ionic_lif * lif)1938 static void ionic_lif_quiesce(struct ionic_lif *lif)
1939 {
1940 struct ionic_admin_ctx ctx = {
1941 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1942 .cmd.lif_setattr = {
1943 .opcode = IONIC_CMD_LIF_SETATTR,
1944 .index = cpu_to_le16(lif->index),
1945 .attr = IONIC_LIF_ATTR_STATE,
1946 .state = IONIC_LIF_QUIESCE,
1947 },
1948 };
1949 int err;
1950
1951 err = ionic_adminq_post_wait(lif, &ctx);
1952 if (err)
1953 netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err);
1954 }
1955
ionic_txrx_disable(struct ionic_lif * lif)1956 static void ionic_txrx_disable(struct ionic_lif *lif)
1957 {
1958 unsigned int i;
1959 int err = 0;
1960
1961 if (lif->txqcqs) {
1962 for (i = 0; i < lif->nxqs; i++)
1963 err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
1964 }
1965
1966 if (lif->hwstamp_txq)
1967 err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
1968
1969 if (lif->rxqcqs) {
1970 for (i = 0; i < lif->nxqs; i++)
1971 err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
1972 }
1973
1974 if (lif->hwstamp_rxq)
1975 err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
1976
1977 ionic_lif_quiesce(lif);
1978 }
1979
ionic_txrx_deinit(struct ionic_lif * lif)1980 static void ionic_txrx_deinit(struct ionic_lif *lif)
1981 {
1982 unsigned int i;
1983
1984 if (lif->txqcqs) {
1985 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
1986 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1987 ionic_tx_flush(&lif->txqcqs[i]->cq);
1988 ionic_tx_empty(&lif->txqcqs[i]->q);
1989 }
1990 }
1991
1992 if (lif->rxqcqs) {
1993 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
1994 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1995 ionic_rx_empty(&lif->rxqcqs[i]->q);
1996 }
1997 }
1998 lif->rx_mode = 0;
1999
2000 if (lif->hwstamp_txq) {
2001 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
2002 ionic_tx_flush(&lif->hwstamp_txq->cq);
2003 ionic_tx_empty(&lif->hwstamp_txq->q);
2004 }
2005
2006 if (lif->hwstamp_rxq) {
2007 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
2008 ionic_rx_empty(&lif->hwstamp_rxq->q);
2009 }
2010 }
2011
ionic_txrx_free(struct ionic_lif * lif)2012 void ionic_txrx_free(struct ionic_lif *lif)
2013 {
2014 unsigned int i;
2015
2016 if (lif->txqcqs) {
2017 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
2018 ionic_qcq_free(lif, lif->txqcqs[i]);
2019 devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
2020 lif->txqcqs[i] = NULL;
2021 }
2022 }
2023
2024 if (lif->rxqcqs) {
2025 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
2026 ionic_qcq_free(lif, lif->rxqcqs[i]);
2027 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
2028 lif->rxqcqs[i] = NULL;
2029 }
2030 }
2031
2032 if (lif->hwstamp_txq) {
2033 ionic_qcq_free(lif, lif->hwstamp_txq);
2034 devm_kfree(lif->ionic->dev, lif->hwstamp_txq);
2035 lif->hwstamp_txq = NULL;
2036 }
2037
2038 if (lif->hwstamp_rxq) {
2039 ionic_qcq_free(lif, lif->hwstamp_rxq);
2040 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq);
2041 lif->hwstamp_rxq = NULL;
2042 }
2043 }
2044
ionic_txrx_alloc(struct ionic_lif * lif)2045 static int ionic_txrx_alloc(struct ionic_lif *lif)
2046 {
2047 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2048 unsigned int flags, i;
2049 int err = 0;
2050
2051 num_desc = lif->ntxq_descs;
2052 desc_sz = sizeof(struct ionic_txq_desc);
2053 comp_sz = sizeof(struct ionic_txq_comp);
2054
2055 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2056 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2057 sizeof(struct ionic_txq_sg_desc_v1))
2058 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2059 else
2060 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2061
2062 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2063
2064 if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state))
2065 flags |= IONIC_QCQ_F_CMB_RINGS;
2066
2067 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2068 flags |= IONIC_QCQ_F_INTR;
2069
2070 for (i = 0; i < lif->nxqs; i++) {
2071 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2072 num_desc, desc_sz, comp_sz, sg_desc_sz,
2073 lif->kern_pid, &lif->txqcqs[i]);
2074 if (err)
2075 goto err_out;
2076
2077 if (flags & IONIC_QCQ_F_INTR) {
2078 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2079 lif->txqcqs[i]->intr.index,
2080 lif->tx_coalesce_hw);
2081 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2082 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2083 }
2084
2085 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2086 }
2087
2088 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
2089
2090 if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
2091 flags |= IONIC_QCQ_F_CMB_RINGS;
2092
2093 num_desc = lif->nrxq_descs;
2094 desc_sz = sizeof(struct ionic_rxq_desc);
2095 comp_sz = sizeof(struct ionic_rxq_comp);
2096 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2097
2098 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2099 comp_sz *= 2;
2100
2101 for (i = 0; i < lif->nxqs; i++) {
2102 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2103 num_desc, desc_sz, comp_sz, sg_desc_sz,
2104 lif->kern_pid, &lif->rxqcqs[i]);
2105 if (err)
2106 goto err_out;
2107
2108 lif->rxqcqs[i]->q.features = lif->rxq_features;
2109
2110 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2111 lif->rxqcqs[i]->intr.index,
2112 lif->rx_coalesce_hw);
2113 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
2114 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
2115
2116 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2117 ionic_link_qcq_interrupts(lif->rxqcqs[i],
2118 lif->txqcqs[i]);
2119
2120 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2121 }
2122
2123 return 0;
2124
2125 err_out:
2126 ionic_txrx_free(lif);
2127
2128 return err;
2129 }
2130
ionic_txrx_init(struct ionic_lif * lif)2131 static int ionic_txrx_init(struct ionic_lif *lif)
2132 {
2133 unsigned int i;
2134 int err;
2135
2136 for (i = 0; i < lif->nxqs; i++) {
2137 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
2138 if (err)
2139 goto err_out;
2140
2141 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
2142 if (err) {
2143 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2144 goto err_out;
2145 }
2146 }
2147
2148 if (lif->netdev->features & NETIF_F_RXHASH)
2149 ionic_lif_rss_init(lif);
2150
2151 ionic_lif_rx_mode(lif);
2152
2153 return 0;
2154
2155 err_out:
2156 while (i--) {
2157 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2158 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
2159 }
2160
2161 return err;
2162 }
2163
ionic_txrx_enable(struct ionic_lif * lif)2164 static int ionic_txrx_enable(struct ionic_lif *lif)
2165 {
2166 int derr = 0;
2167 int i, err;
2168
2169 for (i = 0; i < lif->nxqs; i++) {
2170 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
2171 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
2172 err = -ENXIO;
2173 goto err_out;
2174 }
2175
2176 ionic_rx_fill(&lif->rxqcqs[i]->q);
2177 err = ionic_qcq_enable(lif->rxqcqs[i]);
2178 if (err)
2179 goto err_out;
2180
2181 err = ionic_qcq_enable(lif->txqcqs[i]);
2182 if (err) {
2183 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
2184 goto err_out;
2185 }
2186 }
2187
2188 if (lif->hwstamp_rxq) {
2189 ionic_rx_fill(&lif->hwstamp_rxq->q);
2190 err = ionic_qcq_enable(lif->hwstamp_rxq);
2191 if (err)
2192 goto err_out_hwstamp_rx;
2193 }
2194
2195 if (lif->hwstamp_txq) {
2196 err = ionic_qcq_enable(lif->hwstamp_txq);
2197 if (err)
2198 goto err_out_hwstamp_tx;
2199 }
2200
2201 return 0;
2202
2203 err_out_hwstamp_tx:
2204 if (lif->hwstamp_rxq)
2205 derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
2206 err_out_hwstamp_rx:
2207 i = lif->nxqs;
2208 err_out:
2209 while (i--) {
2210 derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
2211 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
2212 }
2213
2214 return err;
2215 }
2216
ionic_start_queues(struct ionic_lif * lif)2217 static int ionic_start_queues(struct ionic_lif *lif)
2218 {
2219 int err;
2220
2221 if (test_bit(IONIC_LIF_F_BROKEN, lif->state))
2222 return -EIO;
2223
2224 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2225 return -EBUSY;
2226
2227 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
2228 return 0;
2229
2230 err = ionic_txrx_enable(lif);
2231 if (err) {
2232 clear_bit(IONIC_LIF_F_UP, lif->state);
2233 return err;
2234 }
2235 netif_tx_wake_all_queues(lif->netdev);
2236
2237 return 0;
2238 }
2239
ionic_open(struct net_device * netdev)2240 static int ionic_open(struct net_device *netdev)
2241 {
2242 struct ionic_lif *lif = netdev_priv(netdev);
2243 int err;
2244
2245 /* If recovering from a broken state, clear the bit and we'll try again */
2246 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
2247 netdev_info(netdev, "clearing broken state\n");
2248
2249 mutex_lock(&lif->queue_lock);
2250
2251 err = ionic_txrx_alloc(lif);
2252 if (err)
2253 goto err_unlock;
2254
2255 err = ionic_txrx_init(lif);
2256 if (err)
2257 goto err_txrx_free;
2258
2259 err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
2260 if (err)
2261 goto err_txrx_deinit;
2262
2263 err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
2264 if (err)
2265 goto err_txrx_deinit;
2266
2267 /* don't start the queues until we have link */
2268 if (netif_carrier_ok(netdev)) {
2269 err = ionic_start_queues(lif);
2270 if (err)
2271 goto err_txrx_deinit;
2272 }
2273
2274 /* If hardware timestamping is enabled, but the queues were freed by
2275 * ionic_stop, those need to be reallocated and initialized, too.
2276 */
2277 ionic_lif_hwstamp_recreate_queues(lif);
2278
2279 mutex_unlock(&lif->queue_lock);
2280
2281 return 0;
2282
2283 err_txrx_deinit:
2284 ionic_txrx_deinit(lif);
2285 err_txrx_free:
2286 ionic_txrx_free(lif);
2287 err_unlock:
2288 mutex_unlock(&lif->queue_lock);
2289 return err;
2290 }
2291
ionic_stop_queues(struct ionic_lif * lif)2292 static void ionic_stop_queues(struct ionic_lif *lif)
2293 {
2294 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
2295 return;
2296
2297 netif_tx_disable(lif->netdev);
2298 ionic_txrx_disable(lif);
2299 }
2300
ionic_stop(struct net_device * netdev)2301 static int ionic_stop(struct net_device *netdev)
2302 {
2303 struct ionic_lif *lif = netdev_priv(netdev);
2304
2305 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2306 return 0;
2307
2308 mutex_lock(&lif->queue_lock);
2309 ionic_stop_queues(lif);
2310 ionic_txrx_deinit(lif);
2311 ionic_txrx_free(lif);
2312 mutex_unlock(&lif->queue_lock);
2313
2314 return 0;
2315 }
2316
ionic_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)2317 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2318 {
2319 struct ionic_lif *lif = netdev_priv(netdev);
2320
2321 switch (cmd) {
2322 case SIOCSHWTSTAMP:
2323 return ionic_lif_hwstamp_set(lif, ifr);
2324 case SIOCGHWTSTAMP:
2325 return ionic_lif_hwstamp_get(lif, ifr);
2326 default:
2327 return -EOPNOTSUPP;
2328 }
2329 }
2330
ionic_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivf)2331 static int ionic_get_vf_config(struct net_device *netdev,
2332 int vf, struct ifla_vf_info *ivf)
2333 {
2334 struct ionic_lif *lif = netdev_priv(netdev);
2335 struct ionic *ionic = lif->ionic;
2336 int ret = 0;
2337
2338 if (!netif_device_present(netdev))
2339 return -EBUSY;
2340
2341 down_read(&ionic->vf_op_lock);
2342
2343 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2344 ret = -EINVAL;
2345 } else {
2346 struct ionic_vf *vfdata = &ionic->vfs[vf];
2347
2348 ivf->vf = vf;
2349 ivf->qos = 0;
2350 ivf->vlan = le16_to_cpu(vfdata->vlanid);
2351 ivf->spoofchk = vfdata->spoofchk;
2352 ivf->linkstate = vfdata->linkstate;
2353 ivf->max_tx_rate = le32_to_cpu(vfdata->maxrate);
2354 ivf->trusted = vfdata->trusted;
2355 ether_addr_copy(ivf->mac, vfdata->macaddr);
2356 }
2357
2358 up_read(&ionic->vf_op_lock);
2359 return ret;
2360 }
2361
ionic_get_vf_stats(struct net_device * netdev,int vf,struct ifla_vf_stats * vf_stats)2362 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
2363 struct ifla_vf_stats *vf_stats)
2364 {
2365 struct ionic_lif *lif = netdev_priv(netdev);
2366 struct ionic *ionic = lif->ionic;
2367 struct ionic_lif_stats *vs;
2368 int ret = 0;
2369
2370 if (!netif_device_present(netdev))
2371 return -EBUSY;
2372
2373 down_read(&ionic->vf_op_lock);
2374
2375 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2376 ret = -EINVAL;
2377 } else {
2378 memset(vf_stats, 0, sizeof(*vf_stats));
2379 vs = &ionic->vfs[vf].stats;
2380
2381 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
2382 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
2383 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
2384 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
2385 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
2386 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
2387 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
2388 le64_to_cpu(vs->rx_mcast_drop_packets) +
2389 le64_to_cpu(vs->rx_bcast_drop_packets);
2390 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
2391 le64_to_cpu(vs->tx_mcast_drop_packets) +
2392 le64_to_cpu(vs->tx_bcast_drop_packets);
2393 }
2394
2395 up_read(&ionic->vf_op_lock);
2396 return ret;
2397 }
2398
ionic_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)2399 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2400 {
2401 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC };
2402 struct ionic_lif *lif = netdev_priv(netdev);
2403 struct ionic *ionic = lif->ionic;
2404 int ret;
2405
2406 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
2407 return -EINVAL;
2408
2409 if (!netif_device_present(netdev))
2410 return -EBUSY;
2411
2412 down_write(&ionic->vf_op_lock);
2413
2414 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2415 ret = -EINVAL;
2416 } else {
2417 ether_addr_copy(vfc.macaddr, mac);
2418 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
2419 __func__, vf, vfc.macaddr);
2420
2421 ret = ionic_set_vf_config(ionic, vf, &vfc);
2422 if (!ret)
2423 ether_addr_copy(ionic->vfs[vf].macaddr, mac);
2424 }
2425
2426 up_write(&ionic->vf_op_lock);
2427 return ret;
2428 }
2429
ionic_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 proto)2430 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2431 u8 qos, __be16 proto)
2432 {
2433 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN };
2434 struct ionic_lif *lif = netdev_priv(netdev);
2435 struct ionic *ionic = lif->ionic;
2436 int ret;
2437
2438 /* until someday when we support qos */
2439 if (qos)
2440 return -EINVAL;
2441
2442 if (vlan > 4095)
2443 return -EINVAL;
2444
2445 if (proto != htons(ETH_P_8021Q))
2446 return -EPROTONOSUPPORT;
2447
2448 if (!netif_device_present(netdev))
2449 return -EBUSY;
2450
2451 down_write(&ionic->vf_op_lock);
2452
2453 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2454 ret = -EINVAL;
2455 } else {
2456 vfc.vlanid = cpu_to_le16(vlan);
2457 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
2458 __func__, vf, le16_to_cpu(vfc.vlanid));
2459
2460 ret = ionic_set_vf_config(ionic, vf, &vfc);
2461 if (!ret)
2462 ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
2463 }
2464
2465 up_write(&ionic->vf_op_lock);
2466 return ret;
2467 }
2468
ionic_set_vf_rate(struct net_device * netdev,int vf,int tx_min,int tx_max)2469 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2470 int tx_min, int tx_max)
2471 {
2472 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE };
2473 struct ionic_lif *lif = netdev_priv(netdev);
2474 struct ionic *ionic = lif->ionic;
2475 int ret;
2476
2477 /* setting the min just seems silly */
2478 if (tx_min)
2479 return -EINVAL;
2480
2481 if (!netif_device_present(netdev))
2482 return -EBUSY;
2483
2484 down_write(&ionic->vf_op_lock);
2485
2486 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2487 ret = -EINVAL;
2488 } else {
2489 vfc.maxrate = cpu_to_le32(tx_max);
2490 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
2491 __func__, vf, le32_to_cpu(vfc.maxrate));
2492
2493 ret = ionic_set_vf_config(ionic, vf, &vfc);
2494 if (!ret)
2495 ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
2496 }
2497
2498 up_write(&ionic->vf_op_lock);
2499 return ret;
2500 }
2501
ionic_set_vf_spoofchk(struct net_device * netdev,int vf,bool set)2502 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2503 {
2504 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK };
2505 struct ionic_lif *lif = netdev_priv(netdev);
2506 struct ionic *ionic = lif->ionic;
2507 int ret;
2508
2509 if (!netif_device_present(netdev))
2510 return -EBUSY;
2511
2512 down_write(&ionic->vf_op_lock);
2513
2514 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2515 ret = -EINVAL;
2516 } else {
2517 vfc.spoofchk = set;
2518 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
2519 __func__, vf, vfc.spoofchk);
2520
2521 ret = ionic_set_vf_config(ionic, vf, &vfc);
2522 if (!ret)
2523 ionic->vfs[vf].spoofchk = set;
2524 }
2525
2526 up_write(&ionic->vf_op_lock);
2527 return ret;
2528 }
2529
ionic_set_vf_trust(struct net_device * netdev,int vf,bool set)2530 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2531 {
2532 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST };
2533 struct ionic_lif *lif = netdev_priv(netdev);
2534 struct ionic *ionic = lif->ionic;
2535 int ret;
2536
2537 if (!netif_device_present(netdev))
2538 return -EBUSY;
2539
2540 down_write(&ionic->vf_op_lock);
2541
2542 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2543 ret = -EINVAL;
2544 } else {
2545 vfc.trust = set;
2546 dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
2547 __func__, vf, vfc.trust);
2548
2549 ret = ionic_set_vf_config(ionic, vf, &vfc);
2550 if (!ret)
2551 ionic->vfs[vf].trusted = set;
2552 }
2553
2554 up_write(&ionic->vf_op_lock);
2555 return ret;
2556 }
2557
ionic_set_vf_link_state(struct net_device * netdev,int vf,int set)2558 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2559 {
2560 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE };
2561 struct ionic_lif *lif = netdev_priv(netdev);
2562 struct ionic *ionic = lif->ionic;
2563 u8 vfls;
2564 int ret;
2565
2566 switch (set) {
2567 case IFLA_VF_LINK_STATE_ENABLE:
2568 vfls = IONIC_VF_LINK_STATUS_UP;
2569 break;
2570 case IFLA_VF_LINK_STATE_DISABLE:
2571 vfls = IONIC_VF_LINK_STATUS_DOWN;
2572 break;
2573 case IFLA_VF_LINK_STATE_AUTO:
2574 vfls = IONIC_VF_LINK_STATUS_AUTO;
2575 break;
2576 default:
2577 return -EINVAL;
2578 }
2579
2580 if (!netif_device_present(netdev))
2581 return -EBUSY;
2582
2583 down_write(&ionic->vf_op_lock);
2584
2585 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2586 ret = -EINVAL;
2587 } else {
2588 vfc.linkstate = vfls;
2589 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
2590 __func__, vf, vfc.linkstate);
2591
2592 ret = ionic_set_vf_config(ionic, vf, &vfc);
2593 if (!ret)
2594 ionic->vfs[vf].linkstate = set;
2595 }
2596
2597 up_write(&ionic->vf_op_lock);
2598 return ret;
2599 }
2600
ionic_vf_attr_replay(struct ionic_lif * lif)2601 static void ionic_vf_attr_replay(struct ionic_lif *lif)
2602 {
2603 struct ionic_vf_setattr_cmd vfc = { };
2604 struct ionic *ionic = lif->ionic;
2605 struct ionic_vf *v;
2606 int i;
2607
2608 if (!ionic->vfs)
2609 return;
2610
2611 down_read(&ionic->vf_op_lock);
2612
2613 for (i = 0; i < ionic->num_vfs; i++) {
2614 v = &ionic->vfs[i];
2615
2616 if (v->stats_pa) {
2617 vfc.attr = IONIC_VF_ATTR_STATSADDR;
2618 vfc.stats_pa = cpu_to_le64(v->stats_pa);
2619 ionic_set_vf_config(ionic, i, &vfc);
2620 vfc.stats_pa = 0;
2621 }
2622
2623 if (!is_zero_ether_addr(v->macaddr)) {
2624 vfc.attr = IONIC_VF_ATTR_MAC;
2625 ether_addr_copy(vfc.macaddr, v->macaddr);
2626 ionic_set_vf_config(ionic, i, &vfc);
2627 eth_zero_addr(vfc.macaddr);
2628 }
2629
2630 if (v->vlanid) {
2631 vfc.attr = IONIC_VF_ATTR_VLAN;
2632 vfc.vlanid = v->vlanid;
2633 ionic_set_vf_config(ionic, i, &vfc);
2634 vfc.vlanid = 0;
2635 }
2636
2637 if (v->maxrate) {
2638 vfc.attr = IONIC_VF_ATTR_RATE;
2639 vfc.maxrate = v->maxrate;
2640 ionic_set_vf_config(ionic, i, &vfc);
2641 vfc.maxrate = 0;
2642 }
2643
2644 if (v->spoofchk) {
2645 vfc.attr = IONIC_VF_ATTR_SPOOFCHK;
2646 vfc.spoofchk = v->spoofchk;
2647 ionic_set_vf_config(ionic, i, &vfc);
2648 vfc.spoofchk = 0;
2649 }
2650
2651 if (v->trusted) {
2652 vfc.attr = IONIC_VF_ATTR_TRUST;
2653 vfc.trust = v->trusted;
2654 ionic_set_vf_config(ionic, i, &vfc);
2655 vfc.trust = 0;
2656 }
2657
2658 if (v->linkstate) {
2659 vfc.attr = IONIC_VF_ATTR_LINKSTATE;
2660 vfc.linkstate = v->linkstate;
2661 ionic_set_vf_config(ionic, i, &vfc);
2662 vfc.linkstate = 0;
2663 }
2664 }
2665
2666 up_read(&ionic->vf_op_lock);
2667
2668 ionic_vf_start(ionic);
2669 }
2670
2671 static const struct net_device_ops ionic_netdev_ops = {
2672 .ndo_open = ionic_open,
2673 .ndo_stop = ionic_stop,
2674 .ndo_eth_ioctl = ionic_eth_ioctl,
2675 .ndo_start_xmit = ionic_start_xmit,
2676 .ndo_get_stats64 = ionic_get_stats64,
2677 .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
2678 .ndo_set_features = ionic_set_features,
2679 .ndo_set_mac_address = ionic_set_mac_address,
2680 .ndo_validate_addr = eth_validate_addr,
2681 .ndo_tx_timeout = ionic_tx_timeout,
2682 .ndo_change_mtu = ionic_change_mtu,
2683 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
2684 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
2685 .ndo_set_vf_vlan = ionic_set_vf_vlan,
2686 .ndo_set_vf_trust = ionic_set_vf_trust,
2687 .ndo_set_vf_mac = ionic_set_vf_mac,
2688 .ndo_set_vf_rate = ionic_set_vf_rate,
2689 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
2690 .ndo_get_vf_config = ionic_get_vf_config,
2691 .ndo_set_vf_link_state = ionic_set_vf_link_state,
2692 .ndo_get_vf_stats = ionic_get_vf_stats,
2693 };
2694
ionic_cmb_reconfig(struct ionic_lif * lif,struct ionic_queue_params * qparam)2695 static int ionic_cmb_reconfig(struct ionic_lif *lif,
2696 struct ionic_queue_params *qparam)
2697 {
2698 struct ionic_queue_params start_qparams;
2699 int err = 0;
2700
2701 /* When changing CMB queue parameters, we're using limited
2702 * on-device memory and don't have extra memory to use for
2703 * duplicate allocations, so we free it all first then
2704 * re-allocate with the new parameters.
2705 */
2706
2707 /* Checkpoint for possible unwind */
2708 ionic_init_queue_params(lif, &start_qparams);
2709
2710 /* Stop and free the queues */
2711 ionic_stop_queues_reconfig(lif);
2712 ionic_txrx_free(lif);
2713
2714 /* Set up new qparams */
2715 ionic_set_queue_params(lif, qparam);
2716
2717 if (netif_running(lif->netdev)) {
2718 /* Alloc and start the new configuration */
2719 err = ionic_txrx_alloc(lif);
2720 if (err) {
2721 dev_warn(lif->ionic->dev,
2722 "CMB reconfig failed, restoring values: %d\n", err);
2723
2724 /* Back out the changes */
2725 ionic_set_queue_params(lif, &start_qparams);
2726 err = ionic_txrx_alloc(lif);
2727 if (err) {
2728 dev_err(lif->ionic->dev,
2729 "CMB restore failed: %d\n", err);
2730 goto err_out;
2731 }
2732 }
2733
2734 err = ionic_start_queues_reconfig(lif);
2735 if (err) {
2736 dev_err(lif->ionic->dev,
2737 "CMB reconfig failed: %d\n", err);
2738 goto err_out;
2739 }
2740 }
2741
2742 err_out:
2743 /* This was detached in ionic_stop_queues_reconfig() */
2744 netif_device_attach(lif->netdev);
2745
2746 return err;
2747 }
2748
ionic_swap_queues(struct ionic_qcq * a,struct ionic_qcq * b)2749 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2750 {
2751 /* only swapping the queues, not the napi, flags, or other stuff */
2752 swap(a->q.features, b->q.features);
2753 swap(a->q.num_descs, b->q.num_descs);
2754 swap(a->q.desc_size, b->q.desc_size);
2755 swap(a->q.base, b->q.base);
2756 swap(a->q.base_pa, b->q.base_pa);
2757 swap(a->q.info, b->q.info);
2758 swap(a->q_base, b->q_base);
2759 swap(a->q_base_pa, b->q_base_pa);
2760 swap(a->q_size, b->q_size);
2761
2762 swap(a->q.sg_desc_size, b->q.sg_desc_size);
2763 swap(a->q.sg_base, b->q.sg_base);
2764 swap(a->q.sg_base_pa, b->q.sg_base_pa);
2765 swap(a->sg_base, b->sg_base);
2766 swap(a->sg_base_pa, b->sg_base_pa);
2767 swap(a->sg_size, b->sg_size);
2768
2769 swap(a->cq.num_descs, b->cq.num_descs);
2770 swap(a->cq.desc_size, b->cq.desc_size);
2771 swap(a->cq.base, b->cq.base);
2772 swap(a->cq.base_pa, b->cq.base_pa);
2773 swap(a->cq.info, b->cq.info);
2774 swap(a->cq_base, b->cq_base);
2775 swap(a->cq_base_pa, b->cq_base_pa);
2776 swap(a->cq_size, b->cq_size);
2777
2778 ionic_debugfs_del_qcq(a);
2779 ionic_debugfs_add_qcq(a->q.lif, a);
2780 }
2781
ionic_reconfigure_queues(struct ionic_lif * lif,struct ionic_queue_params * qparam)2782 int ionic_reconfigure_queues(struct ionic_lif *lif,
2783 struct ionic_queue_params *qparam)
2784 {
2785 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2786 struct ionic_qcq **tx_qcqs = NULL;
2787 struct ionic_qcq **rx_qcqs = NULL;
2788 unsigned int flags, i;
2789 int err = 0;
2790
2791 /* Are we changing q params while CMB is on */
2792 if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) ||
2793 (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx))
2794 return ionic_cmb_reconfig(lif, qparam);
2795
2796 /* allocate temporary qcq arrays to hold new queue structs */
2797 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2798 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2799 sizeof(struct ionic_qcq *), GFP_KERNEL);
2800 if (!tx_qcqs) {
2801 err = -ENOMEM;
2802 goto err_out;
2803 }
2804 }
2805 if (qparam->nxqs != lif->nxqs ||
2806 qparam->nrxq_descs != lif->nrxq_descs ||
2807 qparam->rxq_features != lif->rxq_features) {
2808 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2809 sizeof(struct ionic_qcq *), GFP_KERNEL);
2810 if (!rx_qcqs) {
2811 err = -ENOMEM;
2812 goto err_out;
2813 }
2814 }
2815
2816 /* allocate new desc_info and rings, but leave the interrupt setup
2817 * until later so as to not mess with the still-running queues
2818 */
2819 if (tx_qcqs) {
2820 num_desc = qparam->ntxq_descs;
2821 desc_sz = sizeof(struct ionic_txq_desc);
2822 comp_sz = sizeof(struct ionic_txq_comp);
2823
2824 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2825 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2826 sizeof(struct ionic_txq_sg_desc_v1))
2827 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2828 else
2829 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2830
2831 for (i = 0; i < qparam->nxqs; i++) {
2832 /* If missing, short placeholder qcq needed for swap */
2833 if (!lif->txqcqs[i]) {
2834 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2835 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2836 4, desc_sz, comp_sz, sg_desc_sz,
2837 lif->kern_pid, &lif->txqcqs[i]);
2838 if (err)
2839 goto err_out;
2840 }
2841
2842 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2843 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2844 num_desc, desc_sz, comp_sz, sg_desc_sz,
2845 lif->kern_pid, &tx_qcqs[i]);
2846 if (err)
2847 goto err_out;
2848 }
2849 }
2850
2851 if (rx_qcqs) {
2852 num_desc = qparam->nrxq_descs;
2853 desc_sz = sizeof(struct ionic_rxq_desc);
2854 comp_sz = sizeof(struct ionic_rxq_comp);
2855 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2856
2857 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2858 comp_sz *= 2;
2859
2860 for (i = 0; i < qparam->nxqs; i++) {
2861 /* If missing, short placeholder qcq needed for swap */
2862 if (!lif->rxqcqs[i]) {
2863 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
2864 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2865 4, desc_sz, comp_sz, sg_desc_sz,
2866 lif->kern_pid, &lif->rxqcqs[i]);
2867 if (err)
2868 goto err_out;
2869 }
2870
2871 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2872 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2873 num_desc, desc_sz, comp_sz, sg_desc_sz,
2874 lif->kern_pid, &rx_qcqs[i]);
2875 if (err)
2876 goto err_out;
2877
2878 rx_qcqs[i]->q.features = qparam->rxq_features;
2879 }
2880 }
2881
2882 /* stop and clean the queues */
2883 ionic_stop_queues_reconfig(lif);
2884
2885 if (qparam->nxqs != lif->nxqs) {
2886 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2887 if (err)
2888 goto err_out_reinit_unlock;
2889 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2890 if (err) {
2891 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2892 goto err_out_reinit_unlock;
2893 }
2894 }
2895
2896 /* swap new desc_info and rings, keeping existing interrupt config */
2897 if (tx_qcqs) {
2898 lif->ntxq_descs = qparam->ntxq_descs;
2899 for (i = 0; i < qparam->nxqs; i++)
2900 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2901 }
2902
2903 if (rx_qcqs) {
2904 lif->nrxq_descs = qparam->nrxq_descs;
2905 for (i = 0; i < qparam->nxqs; i++)
2906 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2907 }
2908
2909 /* if we need to change the interrupt layout, this is the time */
2910 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2911 qparam->nxqs != lif->nxqs) {
2912 if (qparam->intr_split) {
2913 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2914 } else {
2915 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2916 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2917 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2918 }
2919
2920 /* Clear existing interrupt assignments. We check for NULL here
2921 * because we're checking the whole array for potential qcqs, not
2922 * just those qcqs that have just been set up.
2923 */
2924 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
2925 if (lif->txqcqs[i])
2926 ionic_qcq_intr_free(lif, lif->txqcqs[i]);
2927 if (lif->rxqcqs[i])
2928 ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
2929 }
2930
2931 /* re-assign the interrupts */
2932 for (i = 0; i < qparam->nxqs; i++) {
2933 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2934 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
2935 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2936 lif->rxqcqs[i]->intr.index,
2937 lif->rx_coalesce_hw);
2938
2939 if (qparam->intr_split) {
2940 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2941 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
2942 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2943 lif->txqcqs[i]->intr.index,
2944 lif->tx_coalesce_hw);
2945 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2946 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2947 } else {
2948 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2949 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
2950 }
2951 }
2952 }
2953
2954 /* now we can rework the debugfs mappings */
2955 if (tx_qcqs) {
2956 for (i = 0; i < qparam->nxqs; i++) {
2957 ionic_debugfs_del_qcq(lif->txqcqs[i]);
2958 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2959 }
2960 }
2961
2962 if (rx_qcqs) {
2963 for (i = 0; i < qparam->nxqs; i++) {
2964 ionic_debugfs_del_qcq(lif->rxqcqs[i]);
2965 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2966 }
2967 }
2968
2969 swap(lif->nxqs, qparam->nxqs);
2970 swap(lif->rxq_features, qparam->rxq_features);
2971
2972 err_out_reinit_unlock:
2973 /* re-init the queues, but don't lose an error code */
2974 if (err)
2975 ionic_start_queues_reconfig(lif);
2976 else
2977 err = ionic_start_queues_reconfig(lif);
2978
2979 err_out:
2980 /* free old allocs without cleaning intr */
2981 for (i = 0; i < qparam->nxqs; i++) {
2982 if (tx_qcqs && tx_qcqs[i]) {
2983 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2984 ionic_qcq_free(lif, tx_qcqs[i]);
2985 devm_kfree(lif->ionic->dev, tx_qcqs[i]);
2986 tx_qcqs[i] = NULL;
2987 }
2988 if (rx_qcqs && rx_qcqs[i]) {
2989 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2990 ionic_qcq_free(lif, rx_qcqs[i]);
2991 devm_kfree(lif->ionic->dev, rx_qcqs[i]);
2992 rx_qcqs[i] = NULL;
2993 }
2994 }
2995
2996 /* free q array */
2997 if (rx_qcqs) {
2998 devm_kfree(lif->ionic->dev, rx_qcqs);
2999 rx_qcqs = NULL;
3000 }
3001 if (tx_qcqs) {
3002 devm_kfree(lif->ionic->dev, tx_qcqs);
3003 tx_qcqs = NULL;
3004 }
3005
3006 /* clean the unused dma and info allocations when new set is smaller
3007 * than the full array, but leave the qcq shells in place
3008 */
3009 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
3010 if (lif->txqcqs && lif->txqcqs[i]) {
3011 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3012 ionic_qcq_free(lif, lif->txqcqs[i]);
3013 }
3014
3015 if (lif->rxqcqs && lif->rxqcqs[i]) {
3016 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3017 ionic_qcq_free(lif, lif->rxqcqs[i]);
3018 }
3019 }
3020
3021 if (err)
3022 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
3023
3024 return err;
3025 }
3026
ionic_lif_alloc(struct ionic * ionic)3027 int ionic_lif_alloc(struct ionic *ionic)
3028 {
3029 struct device *dev = ionic->dev;
3030 union ionic_lif_identity *lid;
3031 struct net_device *netdev;
3032 struct ionic_lif *lif;
3033 int tbl_sz;
3034 int err;
3035
3036 lid = kzalloc(sizeof(*lid), GFP_KERNEL);
3037 if (!lid)
3038 return -ENOMEM;
3039
3040 netdev = alloc_etherdev_mqs(sizeof(*lif),
3041 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
3042 if (!netdev) {
3043 dev_err(dev, "Cannot allocate netdev, aborting\n");
3044 err = -ENOMEM;
3045 goto err_out_free_lid;
3046 }
3047
3048 SET_NETDEV_DEV(netdev, dev);
3049
3050 lif = netdev_priv(netdev);
3051 lif->netdev = netdev;
3052 ionic->lif = lif;
3053 lif->ionic = ionic;
3054 netdev->netdev_ops = &ionic_netdev_ops;
3055 ionic_ethtool_set_ops(netdev);
3056
3057 netdev->watchdog_timeo = 2 * HZ;
3058 netif_carrier_off(netdev);
3059
3060 lif->identity = lid;
3061 lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
3062 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
3063 if (err) {
3064 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
3065 lif->lif_type, err);
3066 goto err_out_free_netdev;
3067 }
3068 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
3069 le32_to_cpu(lif->identity->eth.min_frame_size));
3070 lif->netdev->max_mtu =
3071 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
3072
3073 lif->neqs = ionic->neqs_per_lif;
3074 lif->nxqs = ionic->ntxqs_per_lif;
3075
3076 lif->index = 0;
3077
3078 if (is_kdump_kernel()) {
3079 lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
3080 lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
3081 } else {
3082 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
3083 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
3084 }
3085
3086 /* Convert the default coalesce value to actual hw resolution */
3087 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
3088 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
3089 lif->rx_coalesce_usecs);
3090 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
3091 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
3092 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
3093 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
3094
3095 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
3096
3097 mutex_init(&lif->queue_lock);
3098 mutex_init(&lif->config_lock);
3099
3100 spin_lock_init(&lif->adminq_lock);
3101
3102 spin_lock_init(&lif->deferred.lock);
3103 INIT_LIST_HEAD(&lif->deferred.list);
3104 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
3105
3106 /* allocate lif info */
3107 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
3108 lif->info = dma_alloc_coherent(dev, lif->info_sz,
3109 &lif->info_pa, GFP_KERNEL);
3110 if (!lif->info) {
3111 dev_err(dev, "Failed to allocate lif info, aborting\n");
3112 err = -ENOMEM;
3113 goto err_out_free_mutex;
3114 }
3115
3116 ionic_debugfs_add_lif(lif);
3117
3118 /* allocate control queues and txrx queue arrays */
3119 ionic_lif_queue_identify(lif);
3120 err = ionic_qcqs_alloc(lif);
3121 if (err)
3122 goto err_out_free_lif_info;
3123
3124 /* allocate rss indirection table */
3125 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
3126 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
3127 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
3128 &lif->rss_ind_tbl_pa,
3129 GFP_KERNEL);
3130
3131 if (!lif->rss_ind_tbl) {
3132 err = -ENOMEM;
3133 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
3134 goto err_out_free_qcqs;
3135 }
3136 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
3137
3138 ionic_lif_alloc_phc(lif);
3139
3140 return 0;
3141
3142 err_out_free_qcqs:
3143 ionic_qcqs_free(lif);
3144 err_out_free_lif_info:
3145 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3146 lif->info = NULL;
3147 lif->info_pa = 0;
3148 err_out_free_mutex:
3149 mutex_destroy(&lif->config_lock);
3150 mutex_destroy(&lif->queue_lock);
3151 err_out_free_netdev:
3152 free_netdev(lif->netdev);
3153 lif = NULL;
3154 err_out_free_lid:
3155 kfree(lid);
3156
3157 return err;
3158 }
3159
ionic_lif_reset(struct ionic_lif * lif)3160 static void ionic_lif_reset(struct ionic_lif *lif)
3161 {
3162 struct ionic_dev *idev = &lif->ionic->idev;
3163
3164 if (!ionic_is_fw_running(idev))
3165 return;
3166
3167 mutex_lock(&lif->ionic->dev_cmd_lock);
3168 ionic_dev_cmd_lif_reset(idev, lif->index);
3169 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3170 mutex_unlock(&lif->ionic->dev_cmd_lock);
3171 }
3172
ionic_lif_handle_fw_down(struct ionic_lif * lif)3173 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
3174 {
3175 struct ionic *ionic = lif->ionic;
3176
3177 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
3178 return;
3179
3180 dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
3181
3182 netif_device_detach(lif->netdev);
3183
3184 mutex_lock(&lif->queue_lock);
3185 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
3186 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
3187 ionic_stop_queues(lif);
3188 }
3189
3190 if (netif_running(lif->netdev)) {
3191 ionic_txrx_deinit(lif);
3192 ionic_txrx_free(lif);
3193 }
3194 ionic_lif_deinit(lif);
3195 ionic_reset(ionic);
3196 ionic_qcqs_free(lif);
3197
3198 mutex_unlock(&lif->queue_lock);
3199
3200 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
3201 dev_info(ionic->dev, "FW Down: LIFs stopped\n");
3202 }
3203
ionic_restart_lif(struct ionic_lif * lif)3204 int ionic_restart_lif(struct ionic_lif *lif)
3205 {
3206 struct ionic *ionic = lif->ionic;
3207 int err;
3208
3209 mutex_lock(&lif->queue_lock);
3210
3211 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
3212 dev_info(ionic->dev, "FW Up: clearing broken state\n");
3213
3214 err = ionic_qcqs_alloc(lif);
3215 if (err)
3216 goto err_unlock;
3217
3218 err = ionic_lif_init(lif);
3219 if (err)
3220 goto err_qcqs_free;
3221
3222 ionic_vf_attr_replay(lif);
3223
3224 if (lif->registered)
3225 ionic_lif_set_netdev_info(lif);
3226
3227 ionic_rx_filter_replay(lif);
3228
3229 if (netif_running(lif->netdev)) {
3230 err = ionic_txrx_alloc(lif);
3231 if (err)
3232 goto err_lifs_deinit;
3233
3234 err = ionic_txrx_init(lif);
3235 if (err)
3236 goto err_txrx_free;
3237 }
3238
3239 mutex_unlock(&lif->queue_lock);
3240
3241 clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
3242 ionic_link_status_check_request(lif, CAN_SLEEP);
3243 netif_device_attach(lif->netdev);
3244
3245 return 0;
3246
3247 err_txrx_free:
3248 ionic_txrx_free(lif);
3249 err_lifs_deinit:
3250 ionic_lif_deinit(lif);
3251 err_qcqs_free:
3252 ionic_qcqs_free(lif);
3253 err_unlock:
3254 mutex_unlock(&lif->queue_lock);
3255
3256 return err;
3257 }
3258
ionic_lif_handle_fw_up(struct ionic_lif * lif)3259 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
3260 {
3261 struct ionic *ionic = lif->ionic;
3262 int err;
3263
3264 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3265 return;
3266
3267 dev_info(ionic->dev, "FW Up: restarting LIFs\n");
3268
3269 /* This is a little different from what happens at
3270 * probe time because the LIF already exists so we
3271 * just need to reanimate it.
3272 */
3273 ionic_init_devinfo(ionic);
3274 err = ionic_identify(ionic);
3275 if (err)
3276 goto err_out;
3277 err = ionic_port_identify(ionic);
3278 if (err)
3279 goto err_out;
3280 err = ionic_port_init(ionic);
3281 if (err)
3282 goto err_out;
3283
3284 err = ionic_restart_lif(lif);
3285 if (err)
3286 goto err_out;
3287
3288 dev_info(ionic->dev, "FW Up: LIFs restarted\n");
3289
3290 /* restore the hardware timestamping queues */
3291 ionic_lif_hwstamp_replay(lif);
3292
3293 return;
3294
3295 err_out:
3296 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
3297 }
3298
ionic_lif_free(struct ionic_lif * lif)3299 void ionic_lif_free(struct ionic_lif *lif)
3300 {
3301 struct device *dev = lif->ionic->dev;
3302
3303 ionic_lif_free_phc(lif);
3304
3305 /* free rss indirection table */
3306 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
3307 lif->rss_ind_tbl_pa);
3308 lif->rss_ind_tbl = NULL;
3309 lif->rss_ind_tbl_pa = 0;
3310
3311 /* free queues */
3312 ionic_qcqs_free(lif);
3313 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3314 ionic_lif_reset(lif);
3315
3316 /* free lif info */
3317 kfree(lif->identity);
3318 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3319 lif->info = NULL;
3320 lif->info_pa = 0;
3321
3322 /* unmap doorbell page */
3323 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3324 lif->kern_dbpage = NULL;
3325
3326 mutex_destroy(&lif->config_lock);
3327 mutex_destroy(&lif->queue_lock);
3328
3329 /* free netdev & lif */
3330 ionic_debugfs_del_lif(lif);
3331 free_netdev(lif->netdev);
3332 }
3333
ionic_lif_deinit(struct ionic_lif * lif)3334 void ionic_lif_deinit(struct ionic_lif *lif)
3335 {
3336 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
3337 return;
3338
3339 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3340 cancel_work_sync(&lif->deferred.work);
3341 cancel_work_sync(&lif->tx_timeout_work);
3342 ionic_rx_filters_deinit(lif);
3343 if (lif->netdev->features & NETIF_F_RXHASH)
3344 ionic_lif_rss_deinit(lif);
3345 }
3346
3347 napi_disable(&lif->adminqcq->napi);
3348 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3349 ionic_lif_qcq_deinit(lif, lif->adminqcq);
3350
3351 ionic_lif_reset(lif);
3352 }
3353
ionic_lif_adminq_init(struct ionic_lif * lif)3354 static int ionic_lif_adminq_init(struct ionic_lif *lif)
3355 {
3356 struct device *dev = lif->ionic->dev;
3357 struct ionic_q_init_comp comp;
3358 struct ionic_dev *idev;
3359 struct ionic_qcq *qcq;
3360 struct ionic_queue *q;
3361 int err;
3362
3363 idev = &lif->ionic->idev;
3364 qcq = lif->adminqcq;
3365 q = &qcq->q;
3366
3367 mutex_lock(&lif->ionic->dev_cmd_lock);
3368 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
3369 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3370 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3371 mutex_unlock(&lif->ionic->dev_cmd_lock);
3372 if (err) {
3373 netdev_err(lif->netdev, "adminq init failed %d\n", err);
3374 return err;
3375 }
3376
3377 q->hw_type = comp.hw_type;
3378 q->hw_index = le32_to_cpu(comp.hw_index);
3379 q->dbval = IONIC_DBELL_QID(q->hw_index);
3380
3381 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
3382 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
3383
3384 q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
3385 q->dbell_jiffies = jiffies;
3386
3387 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
3388
3389 qcq->napi_qcq = qcq;
3390 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
3391
3392 napi_enable(&qcq->napi);
3393
3394 if (qcq->flags & IONIC_QCQ_F_INTR)
3395 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
3396 IONIC_INTR_MASK_CLEAR);
3397
3398 qcq->flags |= IONIC_QCQ_F_INITED;
3399
3400 return 0;
3401 }
3402
ionic_lif_notifyq_init(struct ionic_lif * lif)3403 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
3404 {
3405 struct ionic_qcq *qcq = lif->notifyqcq;
3406 struct device *dev = lif->ionic->dev;
3407 struct ionic_queue *q = &qcq->q;
3408 int err;
3409
3410 struct ionic_admin_ctx ctx = {
3411 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3412 .cmd.q_init = {
3413 .opcode = IONIC_CMD_Q_INIT,
3414 .lif_index = cpu_to_le16(lif->index),
3415 .type = q->type,
3416 .ver = lif->qtype_info[q->type].version,
3417 .index = cpu_to_le32(q->index),
3418 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
3419 IONIC_QINIT_F_ENA),
3420 .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
3421 .pid = cpu_to_le16(q->pid),
3422 .ring_size = ilog2(q->num_descs),
3423 .ring_base = cpu_to_le64(q->base_pa),
3424 }
3425 };
3426
3427 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
3428 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
3429 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
3430 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
3431
3432 err = ionic_adminq_post_wait(lif, &ctx);
3433 if (err)
3434 return err;
3435
3436 lif->last_eid = 0;
3437 q->hw_type = ctx.comp.q_init.hw_type;
3438 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
3439 q->dbval = IONIC_DBELL_QID(q->hw_index);
3440
3441 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
3442 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
3443
3444 /* preset the callback info */
3445 q->info[0].cb_arg = lif;
3446
3447 qcq->flags |= IONIC_QCQ_F_INITED;
3448
3449 return 0;
3450 }
3451
ionic_station_set(struct ionic_lif * lif)3452 static int ionic_station_set(struct ionic_lif *lif)
3453 {
3454 struct net_device *netdev = lif->netdev;
3455 struct ionic_admin_ctx ctx = {
3456 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3457 .cmd.lif_getattr = {
3458 .opcode = IONIC_CMD_LIF_GETATTR,
3459 .index = cpu_to_le16(lif->index),
3460 .attr = IONIC_LIF_ATTR_MAC,
3461 },
3462 };
3463 u8 mac_address[ETH_ALEN];
3464 struct sockaddr addr;
3465 int err;
3466
3467 err = ionic_adminq_post_wait(lif, &ctx);
3468 if (err)
3469 return err;
3470 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
3471 ctx.comp.lif_getattr.mac);
3472 ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
3473
3474 if (is_zero_ether_addr(mac_address)) {
3475 eth_hw_addr_random(netdev);
3476 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
3477 ether_addr_copy(mac_address, netdev->dev_addr);
3478
3479 err = ionic_program_mac(lif, mac_address);
3480 if (err < 0)
3481 return err;
3482
3483 if (err > 0) {
3484 netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
3485 __func__);
3486 return 0;
3487 }
3488 }
3489
3490 if (!is_zero_ether_addr(netdev->dev_addr)) {
3491 /* If the netdev mac is non-zero and doesn't match the default
3492 * device address, it was set by something earlier and we're
3493 * likely here again after a fw-upgrade reset. We need to be
3494 * sure the netdev mac is in our filter list.
3495 */
3496 if (!ether_addr_equal(mac_address, netdev->dev_addr))
3497 ionic_lif_addr_add(lif, netdev->dev_addr);
3498 } else {
3499 /* Update the netdev mac with the device's mac */
3500 ether_addr_copy(addr.sa_data, mac_address);
3501 addr.sa_family = AF_INET;
3502 err = eth_prepare_mac_addr_change(netdev, &addr);
3503 if (err) {
3504 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
3505 addr.sa_data, err);
3506 return 0;
3507 }
3508
3509 eth_commit_mac_addr_change(netdev, &addr);
3510 }
3511
3512 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
3513 netdev->dev_addr);
3514 ionic_lif_addr_add(lif, netdev->dev_addr);
3515
3516 return 0;
3517 }
3518
ionic_lif_init(struct ionic_lif * lif)3519 int ionic_lif_init(struct ionic_lif *lif)
3520 {
3521 struct ionic_dev *idev = &lif->ionic->idev;
3522 struct device *dev = lif->ionic->dev;
3523 struct ionic_lif_init_comp comp;
3524 int dbpage_num;
3525 int err;
3526
3527 mutex_lock(&lif->ionic->dev_cmd_lock);
3528 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
3529 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3530 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3531 mutex_unlock(&lif->ionic->dev_cmd_lock);
3532 if (err)
3533 return err;
3534
3535 lif->hw_index = le16_to_cpu(comp.hw_index);
3536
3537 /* now that we have the hw_index we can figure out our doorbell page */
3538 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
3539 if (!lif->dbid_count) {
3540 dev_err(dev, "No doorbell pages, aborting\n");
3541 return -EINVAL;
3542 }
3543
3544 lif->kern_pid = 0;
3545 dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
3546 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
3547 if (!lif->kern_dbpage) {
3548 dev_err(dev, "Cannot map dbpage, aborting\n");
3549 return -ENOMEM;
3550 }
3551
3552 err = ionic_lif_adminq_init(lif);
3553 if (err)
3554 goto err_out_adminq_deinit;
3555
3556 if (lif->ionic->nnqs_per_lif) {
3557 err = ionic_lif_notifyq_init(lif);
3558 if (err)
3559 goto err_out_notifyq_deinit;
3560 }
3561
3562 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3563 err = ionic_set_nic_features(lif, lif->netdev->features);
3564 else
3565 err = ionic_init_nic_features(lif);
3566 if (err)
3567 goto err_out_notifyq_deinit;
3568
3569 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3570 err = ionic_rx_filters_init(lif);
3571 if (err)
3572 goto err_out_notifyq_deinit;
3573 }
3574
3575 err = ionic_station_set(lif);
3576 if (err)
3577 goto err_out_notifyq_deinit;
3578
3579 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
3580
3581 set_bit(IONIC_LIF_F_INITED, lif->state);
3582
3583 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
3584
3585 return 0;
3586
3587 err_out_notifyq_deinit:
3588 napi_disable(&lif->adminqcq->napi);
3589 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3590 err_out_adminq_deinit:
3591 ionic_lif_qcq_deinit(lif, lif->adminqcq);
3592 ionic_lif_reset(lif);
3593 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3594 lif->kern_dbpage = NULL;
3595
3596 return err;
3597 }
3598
ionic_lif_notify_work(struct work_struct * ws)3599 static void ionic_lif_notify_work(struct work_struct *ws)
3600 {
3601 }
3602
ionic_lif_set_netdev_info(struct ionic_lif * lif)3603 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
3604 {
3605 struct ionic_admin_ctx ctx = {
3606 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3607 .cmd.lif_setattr = {
3608 .opcode = IONIC_CMD_LIF_SETATTR,
3609 .index = cpu_to_le16(lif->index),
3610 .attr = IONIC_LIF_ATTR_NAME,
3611 },
3612 };
3613
3614 strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
3615 sizeof(ctx.cmd.lif_setattr.name));
3616
3617 ionic_adminq_post_wait(lif, &ctx);
3618 }
3619
ionic_netdev_lif(struct net_device * netdev)3620 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
3621 {
3622 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
3623 return NULL;
3624
3625 return netdev_priv(netdev);
3626 }
3627
ionic_lif_notify(struct notifier_block * nb,unsigned long event,void * info)3628 static int ionic_lif_notify(struct notifier_block *nb,
3629 unsigned long event, void *info)
3630 {
3631 struct net_device *ndev = netdev_notifier_info_to_dev(info);
3632 struct ionic *ionic = container_of(nb, struct ionic, nb);
3633 struct ionic_lif *lif = ionic_netdev_lif(ndev);
3634
3635 if (!lif || lif->ionic != ionic)
3636 return NOTIFY_DONE;
3637
3638 switch (event) {
3639 case NETDEV_CHANGENAME:
3640 ionic_lif_set_netdev_info(lif);
3641 break;
3642 }
3643
3644 return NOTIFY_DONE;
3645 }
3646
ionic_lif_register(struct ionic_lif * lif)3647 int ionic_lif_register(struct ionic_lif *lif)
3648 {
3649 int err;
3650
3651 ionic_lif_register_phc(lif);
3652
3653 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
3654
3655 lif->ionic->nb.notifier_call = ionic_lif_notify;
3656
3657 err = register_netdevice_notifier(&lif->ionic->nb);
3658 if (err)
3659 lif->ionic->nb.notifier_call = NULL;
3660
3661 /* only register LIF0 for now */
3662 err = register_netdev(lif->netdev);
3663 if (err) {
3664 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
3665 ionic_lif_unregister_phc(lif);
3666 return err;
3667 }
3668
3669 ionic_link_status_check_request(lif, CAN_SLEEP);
3670 lif->registered = true;
3671 ionic_lif_set_netdev_info(lif);
3672
3673 return 0;
3674 }
3675
ionic_lif_unregister(struct ionic_lif * lif)3676 void ionic_lif_unregister(struct ionic_lif *lif)
3677 {
3678 if (lif->ionic->nb.notifier_call) {
3679 unregister_netdevice_notifier(&lif->ionic->nb);
3680 cancel_work_sync(&lif->ionic->nb_work);
3681 lif->ionic->nb.notifier_call = NULL;
3682 }
3683
3684 if (lif->netdev->reg_state == NETREG_REGISTERED)
3685 unregister_netdev(lif->netdev);
3686
3687 ionic_lif_unregister_phc(lif);
3688
3689 lif->registered = false;
3690 }
3691
ionic_lif_queue_identify(struct ionic_lif * lif)3692 static void ionic_lif_queue_identify(struct ionic_lif *lif)
3693 {
3694 union ionic_q_identity __iomem *q_ident;
3695 struct ionic *ionic = lif->ionic;
3696 struct ionic_dev *idev;
3697 int qtype;
3698 int err;
3699
3700 idev = &lif->ionic->idev;
3701 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
3702
3703 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
3704 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
3705
3706 /* filter out the ones we know about */
3707 switch (qtype) {
3708 case IONIC_QTYPE_ADMINQ:
3709 case IONIC_QTYPE_NOTIFYQ:
3710 case IONIC_QTYPE_RXQ:
3711 case IONIC_QTYPE_TXQ:
3712 break;
3713 default:
3714 continue;
3715 }
3716
3717 memset(qti, 0, sizeof(*qti));
3718
3719 mutex_lock(&ionic->dev_cmd_lock);
3720 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3721 ionic_qtype_versions[qtype]);
3722 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3723 if (!err) {
3724 qti->version = readb(&q_ident->version);
3725 qti->supported = readb(&q_ident->supported);
3726 qti->features = readq(&q_ident->features);
3727 qti->desc_sz = readw(&q_ident->desc_sz);
3728 qti->comp_sz = readw(&q_ident->comp_sz);
3729 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
3730 qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3731 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
3732 }
3733 mutex_unlock(&ionic->dev_cmd_lock);
3734
3735 if (err == -EINVAL) {
3736 dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3737 continue;
3738 } else if (err == -EIO) {
3739 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3740 return;
3741 } else if (err) {
3742 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3743 qtype, err);
3744 return;
3745 }
3746
3747 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3748 qtype, qti->version);
3749 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3750 qtype, qti->supported);
3751 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3752 qtype, qti->features);
3753 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3754 qtype, qti->desc_sz);
3755 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3756 qtype, qti->comp_sz);
3757 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3758 qtype, qti->sg_desc_sz);
3759 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3760 qtype, qti->max_sg_elems);
3761 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3762 qtype, qti->sg_desc_stride);
3763
3764 if (qti->max_sg_elems >= IONIC_MAX_FRAGS) {
3765 qti->max_sg_elems = IONIC_MAX_FRAGS - 1;
3766 dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n",
3767 qtype, qti->max_sg_elems);
3768 }
3769
3770 if (qti->max_sg_elems > MAX_SKB_FRAGS) {
3771 qti->max_sg_elems = MAX_SKB_FRAGS;
3772 dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n",
3773 qtype, qti->max_sg_elems);
3774 }
3775 }
3776 }
3777
ionic_lif_identify(struct ionic * ionic,u8 lif_type,union ionic_lif_identity * lid)3778 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3779 union ionic_lif_identity *lid)
3780 {
3781 struct ionic_dev *idev = &ionic->idev;
3782 size_t sz;
3783 int err;
3784
3785 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3786
3787 mutex_lock(&ionic->dev_cmd_lock);
3788 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3789 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3790 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3791 mutex_unlock(&ionic->dev_cmd_lock);
3792 if (err)
3793 return (err);
3794
3795 dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3796 le64_to_cpu(lid->capabilities));
3797
3798 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3799 le32_to_cpu(lid->eth.max_ucast_filters));
3800 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3801 le32_to_cpu(lid->eth.max_mcast_filters));
3802 dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3803 le64_to_cpu(lid->eth.config.features));
3804 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3805 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3806 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3807 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3808 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3809 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3810 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3811 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3812 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3813 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3814 dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3815 le32_to_cpu(lid->eth.config.mtu));
3816
3817 return 0;
3818 }
3819
ionic_lif_size(struct ionic * ionic)3820 int ionic_lif_size(struct ionic *ionic)
3821 {
3822 struct ionic_identity *ident = &ionic->ident;
3823 unsigned int nintrs, dev_nintrs;
3824 union ionic_lif_config *lc;
3825 unsigned int ntxqs_per_lif;
3826 unsigned int nrxqs_per_lif;
3827 unsigned int neqs_per_lif;
3828 unsigned int nnqs_per_lif;
3829 unsigned int nxqs, neqs;
3830 unsigned int min_intrs;
3831 int err;
3832
3833 /* retrieve basic values from FW */
3834 lc = &ident->lif.eth.config;
3835 dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3836 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3837 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3838 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3839 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3840
3841 /* limit values to play nice with kdump */
3842 if (is_kdump_kernel()) {
3843 dev_nintrs = 2;
3844 neqs_per_lif = 0;
3845 nnqs_per_lif = 0;
3846 ntxqs_per_lif = 1;
3847 nrxqs_per_lif = 1;
3848 }
3849
3850 /* reserve last queue id for hardware timestamping */
3851 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
3852 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
3853 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP);
3854 } else {
3855 ntxqs_per_lif -= 1;
3856 nrxqs_per_lif -= 1;
3857 }
3858 }
3859
3860 nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3861 nxqs = min(nxqs, num_online_cpus());
3862 neqs = min(neqs_per_lif, num_online_cpus());
3863
3864 try_again:
3865 /* interrupt usage:
3866 * 1 for master lif adminq/notifyq
3867 * 1 for each CPU for master lif TxRx queue pairs
3868 * whatever's left is for RDMA queues
3869 */
3870 nintrs = 1 + nxqs + neqs;
3871 min_intrs = 2; /* adminq + 1 TxRx queue pair */
3872
3873 if (nintrs > dev_nintrs)
3874 goto try_fewer;
3875
3876 err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3877 if (err < 0 && err != -ENOSPC) {
3878 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3879 return err;
3880 }
3881 if (err == -ENOSPC)
3882 goto try_fewer;
3883
3884 if (err != nintrs) {
3885 ionic_bus_free_irq_vectors(ionic);
3886 goto try_fewer;
3887 }
3888
3889 ionic->nnqs_per_lif = nnqs_per_lif;
3890 ionic->neqs_per_lif = neqs;
3891 ionic->ntxqs_per_lif = nxqs;
3892 ionic->nrxqs_per_lif = nxqs;
3893 ionic->nintrs = nintrs;
3894
3895 ionic_debugfs_add_sizes(ionic);
3896
3897 return 0;
3898
3899 try_fewer:
3900 if (nnqs_per_lif > 1) {
3901 nnqs_per_lif >>= 1;
3902 goto try_again;
3903 }
3904 if (neqs > 1) {
3905 neqs >>= 1;
3906 goto try_again;
3907 }
3908 if (nxqs > 1) {
3909 nxqs >>= 1;
3910 goto try_again;
3911 }
3912 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
3913 return -ENOSPC;
3914 }
3915