1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2015 - 2026 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include "irdma_main.h"
36
37 static struct irdma_rsrc_limits rsrc_limits_table[] = {
38 [0] = {
39 .qplimit = SZ_128,
40 },
41 [1] = {
42 .qplimit = SZ_1K,
43 },
44 [2] = {
45 .qplimit = SZ_2K,
46 },
47 [3] = {
48 .qplimit = SZ_4K,
49 },
50 [4] = {
51 .qplimit = SZ_16K,
52 },
53 [5] = {
54 .qplimit = SZ_64K,
55 },
56 [6] = {
57 .qplimit = SZ_128K,
58 },
59 [7] = {
60 .qplimit = SZ_256K,
61 },
62 };
63
64 /* types of hmc objects */
65 static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
66 IRDMA_HMC_IW_QP,
67 IRDMA_HMC_IW_CQ,
68 IRDMA_HMC_IW_HTE,
69 IRDMA_HMC_IW_ARP,
70 IRDMA_HMC_IW_APBVT_ENTRY,
71 IRDMA_HMC_IW_MR,
72 IRDMA_HMC_IW_XF,
73 IRDMA_HMC_IW_XFFL,
74 IRDMA_HMC_IW_Q1,
75 IRDMA_HMC_IW_Q1FL,
76 IRDMA_HMC_IW_PBLE,
77 IRDMA_HMC_IW_TIMER,
78 IRDMA_HMC_IW_FSIMC,
79 IRDMA_HMC_IW_FSIAV,
80 IRDMA_HMC_IW_RRF,
81 IRDMA_HMC_IW_RRFFL,
82 IRDMA_HMC_IW_HDR,
83 IRDMA_HMC_IW_MD,
84 IRDMA_HMC_IW_OOISC,
85 IRDMA_HMC_IW_OOISCFFL,
86 };
87
88 /**
89 * irdma_iwarp_ce_handler - handle iwarp completions
90 * @iwcq: iwarp cq receiving event
91 */
92 static void
irdma_iwarp_ce_handler(struct irdma_sc_cq * iwcq)93 irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
94 {
95 struct irdma_cq *cq = iwcq->back_cq;
96
97 if (!cq->user_mode)
98 atomic_set(&cq->armed, 0);
99 if (cq->ibcq.comp_handler)
100 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
101 }
102
103 /**
104 * irdma_puda_ce_handler - handle puda completion events
105 * @rf: RDMA PCI function
106 * @cq: puda completion q for event
107 */
108 static void
irdma_puda_ce_handler(struct irdma_pci_f * rf,struct irdma_sc_cq * cq)109 irdma_puda_ce_handler(struct irdma_pci_f *rf,
110 struct irdma_sc_cq *cq)
111 {
112 struct irdma_sc_dev *dev = &rf->sc_dev;
113 u32 compl_error;
114 int status;
115
116 do {
117 status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
118 if (status == -ENOENT)
119 break;
120 if (status) {
121 irdma_debug(dev, IRDMA_DEBUG_ERR, "puda status = %d\n", status);
122 break;
123 }
124 if (compl_error) {
125 irdma_debug(dev, IRDMA_DEBUG_ERR,
126 "puda compl_err = 0x%x\n", compl_error);
127 break;
128 }
129 } while (1);
130
131 irdma_sc_ccq_arm(cq);
132 }
133
134 /**
135 * irdma_process_ceq - handle ceq for completions
136 * @rf: RDMA PCI function
137 * @ceq: ceq having cq for completion
138 */
139 static void
irdma_process_ceq(struct irdma_pci_f * rf,struct irdma_ceq * ceq)140 irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
141 {
142 struct irdma_sc_dev *dev = &rf->sc_dev;
143 struct irdma_sc_ceq *sc_ceq;
144 struct irdma_sc_cq *cq;
145 unsigned long flags;
146
147 sc_ceq = &ceq->sc_ceq;
148 do {
149 spin_lock_irqsave(&ceq->ce_lock, flags);
150
151 cq = irdma_sc_process_ceq(dev, sc_ceq);
152 if (!cq || rf->reset) {
153 spin_unlock_irqrestore(&ceq->ce_lock, flags);
154 break;
155 }
156
157 if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
158 irdma_iwarp_ce_handler(cq);
159
160 spin_unlock_irqrestore(&ceq->ce_lock, flags);
161
162 if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
163 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
164 else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
165 cq->cq_type == IRDMA_CQ_TYPE_IEQ)
166 irdma_puda_ce_handler(rf, cq);
167 } while (1);
168 }
169
170 static void
irdma_set_flush_fields(struct irdma_sc_qp * qp,struct irdma_aeqe_info * info)171 irdma_set_flush_fields(struct irdma_sc_qp *qp,
172 struct irdma_aeqe_info *info)
173 {
174 struct qp_err_code qp_err;
175
176 qp->sq_flush_code = info->sq;
177 qp->rq_flush_code = info->rq;
178 qp_err = irdma_ae_to_qp_err_code(info->ae_id);
179
180 qp->flush_code = qp_err.flush_code;
181 qp->event_type = qp_err.event_type;
182 }
183
184 /**
185 * irdma_complete_cqp_request - perform post-completion cleanup
186 * @cqp: device CQP
187 * @cqp_request: CQP request
188 *
189 * Mark CQP request as done, wake up waiting thread or invoke
190 * callback function and release/free CQP request.
191 */
192 static void
irdma_complete_cqp_request(struct irdma_cqp * cqp,struct irdma_cqp_request * cqp_request)193 irdma_complete_cqp_request(struct irdma_cqp *cqp,
194 struct irdma_cqp_request *cqp_request)
195 {
196 WRITE_ONCE(cqp_request->request_done, true);
197 if (cqp_request->waiting)
198 wake_up(&cqp_request->waitq);
199 else if (cqp_request->callback_fcn)
200 cqp_request->callback_fcn(cqp_request);
201 irdma_put_cqp_request(cqp, cqp_request);
202 }
203
204 /**
205 * irdma_process_aeq - handle aeq events
206 * @rf: RDMA PCI function
207 *
208 * Return: True if an AE was processed.
209 */
210 static bool
irdma_process_aeq(struct irdma_pci_f * rf)211 irdma_process_aeq(struct irdma_pci_f *rf)
212 {
213 struct irdma_sc_dev *dev = &rf->sc_dev;
214 struct irdma_aeq *aeq = &rf->aeq;
215 struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
216 struct irdma_aeqe_info aeinfo;
217 struct irdma_aeqe_info *info = &aeinfo;
218 int ret;
219 struct irdma_qp *iwqp = NULL;
220 struct irdma_cq *iwcq = NULL;
221 struct irdma_sc_qp *qp = NULL;
222 struct irdma_device *iwdev = rf->iwdev;
223 struct irdma_qp_host_ctx_info *ctx_info = NULL;
224 unsigned long flags;
225 u32 aeqcnt = 0;
226
227 if (!sc_aeq->size)
228 return false;
229
230 do {
231 memset(info, 0, sizeof(*info));
232 ret = irdma_sc_get_next_aeqe(sc_aeq, info);
233 if (ret)
234 break;
235
236 if (info->aeqe_overflow) {
237 irdma_dev_err(&iwdev->ibdev, "AEQ has overflowed\n");
238 rf->reset = true;
239 rf->gen_ops.request_reset(rf);
240 return (aeqcnt > 0);
241 }
242
243 aeqcnt++;
244 atomic_inc(&iwdev->ae_info.ae_cnt);
245
246 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
247 "ae_id = 0x%x (%s), is_qp = %d, qp_id = %d, tcp_state = %d, iwarp_state = %d, ae_src = %d\n",
248 info->ae_id, irdma_get_ae_desc(info->ae_id),
249 info->qp, info->qp_cq_id, info->tcp_state,
250 info->iwarp_state, info->ae_src);
251
252 if (info->qp) {
253 spin_lock_irqsave(&rf->qptable_lock, flags);
254 iwqp = rf->qp_table[info->qp_cq_id];
255 if (!iwqp) {
256 spin_unlock_irqrestore(&rf->qptable_lock,
257 flags);
258 if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
259 struct irdma_device *iwdev = rf->iwdev;
260
261 if (!iwdev->vsi.tc_change_pending)
262 continue;
263
264 atomic_dec(&iwdev->vsi.qp_suspend_reqs);
265 wake_up(&iwdev->suspend_wq);
266 continue;
267 }
268 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
269 "qp_id %d is already freed\n",
270 info->qp_cq_id);
271 continue;
272 }
273 irdma_qp_add_ref(&iwqp->ibqp);
274 spin_unlock_irqrestore(&rf->qptable_lock, flags);
275 qp = &iwqp->sc_qp;
276 spin_lock_irqsave(&iwqp->lock, flags);
277 iwqp->hw_tcp_state = info->tcp_state;
278 iwqp->hw_iwarp_state = info->iwarp_state;
279
280 if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE) {
281 iwqp->last_aeq = info->ae_id;
282 iwqp->ae_src = info->ae_src;
283 }
284
285 spin_unlock_irqrestore(&iwqp->lock, flags);
286 ctx_info = &iwqp->ctx_info;
287 } else {
288 if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
289 continue;
290 }
291
292 switch (info->ae_id) {
293 struct irdma_cm_node *cm_node;
294
295 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
296 cm_node = iwqp->cm_node;
297 if (cm_node->accept_pend) {
298 atomic_dec(&cm_node->listener->pend_accepts_cnt);
299 cm_node->accept_pend = 0;
300 }
301 iwqp->rts_ae_rcvd = 1;
302 wake_up_interruptible(&iwqp->waitq);
303 break;
304 case IRDMA_AE_LLP_FIN_RECEIVED:
305 if (qp->term_flags)
306 break;
307 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
308 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
309 if (iwqp->ibqp_state == IB_QPS_RTS) {
310 irdma_next_iw_state(iwqp,
311 IRDMA_QP_STATE_CLOSING,
312 0, 0, 0);
313 irdma_cm_disconn(iwqp);
314 }
315 irdma_schedule_cm_timer(iwqp->cm_node,
316 (struct irdma_puda_buf *)iwqp,
317 IRDMA_TIMER_TYPE_CLOSE,
318 1, 0);
319 }
320 break;
321 case IRDMA_AE_LLP_CLOSE_COMPLETE:
322 if (qp->term_flags)
323 irdma_terminate_done(qp, 0);
324 else
325 irdma_cm_disconn(iwqp);
326 break;
327 case IRDMA_AE_BAD_CLOSE:
328 case IRDMA_AE_RESET_SENT:
329 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
330 0);
331 irdma_cm_disconn(iwqp);
332 break;
333 case IRDMA_AE_LLP_CONNECTION_RESET:
334 if (atomic_read(&iwqp->close_timer_started))
335 break;
336 irdma_cm_disconn(iwqp);
337 break;
338 case IRDMA_AE_QP_SUSPEND_COMPLETE:
339 if (iwqp->iwdev->vsi.tc_change_pending) {
340 if (!atomic_dec_return(&iwqp->sc_qp.vsi->qp_suspend_reqs))
341 wake_up(&iwqp->iwdev->suspend_wq);
342 }
343 if (iwqp->suspend_pending) {
344 iwqp->suspend_pending = false;
345 wake_up(&iwqp->iwdev->suspend_wq);
346 }
347 break;
348 case IRDMA_AE_TERMINATE_SENT:
349 irdma_terminate_send_fin(qp);
350 break;
351 case IRDMA_AE_LLP_TERMINATE_RECEIVED:
352 irdma_terminate_received(qp, info);
353 break;
354 case IRDMA_AE_LCE_CQ_CATASTROPHIC:
355 case IRDMA_AE_CQ_OPERATION_ERROR:
356 irdma_dev_err(&iwdev->ibdev,
357 "Processing CQ[0x%x] op error, AE 0x%04X\n",
358 info->qp_cq_id, info->ae_id);
359 spin_lock_irqsave(&rf->cqtable_lock, flags);
360 iwcq = rf->cq_table[info->qp_cq_id];
361 if (!iwcq) {
362 spin_unlock_irqrestore(&rf->cqtable_lock,
363 flags);
364 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
365 "cq_id %d is already freed\n",
366 info->qp_cq_id);
367 continue;
368 }
369 irdma_cq_add_ref(&iwcq->ibcq);
370 spin_unlock_irqrestore(&rf->cqtable_lock, flags);
371 if (iwcq->ibcq.event_handler) {
372 struct ib_event ibevent;
373
374 ibevent.device = iwcq->ibcq.device;
375 ibevent.event = IB_EVENT_CQ_ERR;
376 ibevent.element.cq = &iwcq->ibcq;
377 iwcq->ibcq.event_handler(&ibevent,
378 iwcq->ibcq.cq_context);
379 }
380 irdma_cq_rem_ref(&iwcq->ibcq);
381 break;
382 case IRDMA_AE_RESET_NOT_SENT:
383 case IRDMA_AE_LLP_DOUBT_REACHABILITY:
384 break;
385 case IRDMA_AE_RESOURCE_EXHAUSTION:
386 irdma_dev_err(&iwdev->ibdev,
387 "Resource exhaustion reason: q1 = %d xmit or rreq = %d\n",
388 info->ae_src == IRDMA_AE_SOURCE_RSRC_EXHT_Q1,
389 info->ae_src == IRDMA_AE_SOURCE_RSRC_EXHT_XT_RR);
390 break;
391 case IRDMA_AE_PRIV_OPERATION_DENIED:
392 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
393 case IRDMA_AE_STAG_ZERO_INVALID:
394 case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
395 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
396 case IRDMA_AE_DDP_UBE_INVALID_MO:
397 case IRDMA_AE_DDP_UBE_INVALID_QN:
398 case IRDMA_AE_DDP_NO_L_BIT:
399 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
400 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
401 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
402 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
403 case IRDMA_AE_INVALID_ARP_ENTRY:
404 case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
405 case IRDMA_AE_STALE_ARP_ENTRY:
406 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
407 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
408 case IRDMA_AE_LLP_SYN_RECEIVED:
409 case IRDMA_AE_LLP_TOO_MANY_RETRIES:
410 case IRDMA_AE_LCE_QP_CATASTROPHIC:
411 case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
412 case IRDMA_AE_LLP_TOO_MANY_RNRS:
413 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
414 default:
415 if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
416 ctx_info->roce_info->err_rq_idx_valid = info->err_rq_idx_valid;
417 if (info->rq) {
418 ctx_info->roce_info->err_rq_idx = info->wqe_idx;
419 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
420 ctx_info);
421 }
422 irdma_set_flush_fields(qp, info);
423 irdma_cm_disconn(iwqp);
424 break;
425 }
426 ctx_info->iwarp_info->err_rq_idx_valid = info->err_rq_idx_valid;
427 if (info->rq) {
428 ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
429 ctx_info->tcp_info_valid = false;
430 ctx_info->iwarp_info_valid = true;
431 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
432 ctx_info);
433 }
434 if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
435 iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
436 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
437 irdma_cm_disconn(iwqp);
438 } else {
439 irdma_terminate_connection(qp, info);
440 }
441 break;
442 }
443 if (info->qp)
444 irdma_qp_rem_ref(&iwqp->ibqp);
445 } while (1);
446
447 if (aeqcnt)
448 irdma_sc_repost_aeq_entries(dev, aeqcnt);
449
450 return (aeqcnt > 0);
451 }
452
453 /**
454 * irdma_ena_intr - set up device interrupts
455 * @dev: hardware control device structure
456 * @msix_id: id of the interrupt to be enabled
457 */
458 static void
irdma_ena_intr(struct irdma_sc_dev * dev,u32 msix_id)459 irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
460 {
461 dev->irq_ops->irdma_en_irq(dev, msix_id);
462 }
463
464 /**
465 * irdma_aeq_ceq0_tasklet_cb - tasklet for aeq and ceq 0
466 * @t: tasklet_struct ptr
467 */
468 static void
irdma_aeq_ceq0_tasklet_cb(unsigned long t)469 irdma_aeq_ceq0_tasklet_cb(unsigned long t)
470 {
471 struct irdma_pci_f *rf = from_tasklet(rf, (struct tasklet_struct *)t,
472 dpc_tasklet);
473
474 if (rf->msix_shared)
475 irdma_process_ceq(rf, rf->ceqlist);
476 irdma_process_aeq(rf);
477 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
478 }
479
480 /**
481 * irdma_ceq_tasklet_cb - tasklet handler for CEQ
482 * @t: tasklet_struct ptr
483 */
484 static void
irdma_ceq_tasklet_cb(unsigned long t)485 irdma_ceq_tasklet_cb(unsigned long t)
486 {
487 struct irdma_ceq *iwceq = from_tasklet(iwceq, (struct tasklet_struct *)t,
488 dpc_tasklet);
489 struct irdma_pci_f *rf = iwceq->rf;
490
491 irdma_process_ceq(rf, iwceq);
492 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
493 }
494
495 /**
496 * irdma_save_msix_info - copy msix vector information to iwarp device
497 * @rf: RDMA PCI function
498 *
499 * Allocate iwdev msix table and copy the msix info to the table
500 * Return 0 if successful, otherwise return error
501 */
502 static int
irdma_save_msix_info(struct irdma_pci_f * rf)503 irdma_save_msix_info(struct irdma_pci_f *rf)
504 {
505 struct irdma_qvlist_info *iw_qvlist;
506 struct irdma_qv_info *iw_qvinfo;
507 u16 ceq_idx;
508 u32 i;
509 u32 size;
510
511 if (!rf->msix_count) {
512 irdma_dev_err(to_ibdev(&rf->sc_dev), "No MSI-X vectors reserved for RDMA.\n");
513 return -EINVAL;
514 }
515
516 size = sizeof(struct irdma_msix_vector) * rf->msix_count;
517 size += sizeof(*iw_qvlist);
518 size += sizeof(*iw_qvinfo) * (rf->msix_count - 1);
519 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
520 if (!rf->iw_msixtbl)
521 return -ENOMEM;
522
523 rf->iw_qvlist = (struct irdma_qvlist_info *)
524 (&rf->iw_msixtbl[rf->msix_count]);
525 iw_qvlist = rf->iw_qvlist;
526 iw_qvinfo = iw_qvlist->qv_info;
527 iw_qvlist->num_vectors = rf->msix_count;
528 if (rf->msix_count <= num_online_cpus())
529 rf->msix_shared = true;
530 else if (rf->msix_count > num_online_cpus() + 1)
531 rf->msix_count = num_online_cpus() + 1;
532
533 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
534 rf->iw_msixtbl[i].idx = rf->msix_info.entry + i;
535 rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
536 if (!i) {
537 iw_qvinfo->aeq_idx = 0;
538 if (rf->msix_shared)
539 iw_qvinfo->ceq_idx = ceq_idx++;
540 else
541 iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
542 } else {
543 iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
544 iw_qvinfo->ceq_idx = ceq_idx++;
545 }
546 iw_qvinfo->itr_idx = IRDMA_IDX_NOITR;
547 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
548 }
549
550 return 0;
551 }
552
553 /**
554 * irdma_aeq_ceq0_irq_handler - interrupt handler for aeq and ceq0
555 * @data: RDMA PCI function
556 */
557 static void
irdma_aeq_ceq0_irq_handler(void * data)558 irdma_aeq_ceq0_irq_handler(void *data)
559 {
560 struct irdma_pci_f *rf = data;
561
562 tasklet_schedule(&rf->dpc_tasklet);
563 }
564
565 /**
566 * irdma_ceq_irq_handler - interrupt handler for ceq
567 * @data: ceq pointer
568 */
569 static void
irdma_ceq_irq_handler(void * data)570 irdma_ceq_irq_handler(void *data)
571 {
572 struct irdma_ceq *iwceq = data;
573
574 tasklet_schedule(&iwceq->dpc_tasklet);
575 }
576
577 /**
578 * irdma_free_irq - free device interrupts in FreeBSD manner
579 * @rf: RDMA PCI function
580 * @msix_vec: msix vector to disable irq
581 *
582 * The function is called when destroying irq. It tearsdown
583 * the interrupt and release resources.
584 */
585 static void
irdma_free_irq(struct irdma_pci_f * rf,struct irdma_msix_vector * msix_vec)586 irdma_free_irq(struct irdma_pci_f *rf, struct irdma_msix_vector *msix_vec)
587 {
588 if (msix_vec->tag) {
589 bus_teardown_intr(rf->dev_ctx.dev, msix_vec->res,
590 msix_vec->tag);
591 msix_vec->tag = NULL;
592 }
593 if (msix_vec->res) {
594 bus_release_resource(rf->dev_ctx.dev, SYS_RES_IRQ,
595 msix_vec->idx + 1,
596 msix_vec->res);
597 msix_vec->res = NULL;
598 }
599 }
600
601 /**
602 * irdma_destroy_irq - destroy device interrupts
603 * @rf: RDMA PCI function
604 * @msix_vec: msix vector to disable irq
605 * @dev_id: parameter to pass to free_irq (used during irq setup)
606 *
607 * The function is called when destroying aeq/ceq
608 */
609 static void
irdma_destroy_irq(struct irdma_pci_f * rf,struct irdma_msix_vector * msix_vec,void * dev_id)610 irdma_destroy_irq(struct irdma_pci_f *rf,
611 struct irdma_msix_vector *msix_vec, void *dev_id)
612 {
613 struct irdma_sc_dev *dev = &rf->sc_dev;
614
615 dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
616 irdma_free_irq(rf, msix_vec);
617 if (rf == dev_id) {
618 tasklet_kill(&rf->dpc_tasklet);
619 } else {
620 struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
621
622 tasklet_kill(&iwceq->dpc_tasklet);
623 }
624 }
625
626 /**
627 * irdma_destroy_cqp - destroy control qp
628 * @rf: RDMA PCI function
629 * @free_hwcqp: 1 if hw cqp should be freed
630 *
631 * Issue destroy cqp request and
632 * free the resources associated with the cqp
633 */
634 static void
irdma_destroy_cqp(struct irdma_pci_f * rf,bool free_hwcqp)635 irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
636 {
637 struct irdma_sc_dev *dev = &rf->sc_dev;
638 struct irdma_cqp *cqp = &rf->cqp;
639 int status = 0;
640
641 status = irdma_sc_cqp_destroy(dev->cqp, free_hwcqp);
642 if (status)
643 irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy CQP failed %d\n", status);
644
645 irdma_cleanup_pending_cqp_op(rf);
646 irdma_free_dma_mem(dev->hw, &cqp->sq);
647 kfree(cqp->scratch_array);
648 cqp->scratch_array = NULL;
649 kfree(cqp->cqp_requests);
650 cqp->cqp_requests = NULL;
651 }
652
653 static void
irdma_destroy_virt_aeq(struct irdma_pci_f * rf)654 irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
655 {
656 struct irdma_aeq *aeq = &rf->aeq;
657 u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
658 dma_addr_t *pg_arr = (dma_addr_t *) aeq->palloc.level1.addr;
659
660 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
661 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
662 vfree(aeq->mem.va);
663 }
664
665 /**
666 * irdma_destroy_aeq - destroy aeq
667 * @rf: RDMA PCI function
668 *
669 * Issue a destroy aeq request and
670 * free the resources associated with the aeq
671 * The function is called during driver unload
672 */
673 static void
irdma_destroy_aeq(struct irdma_pci_f * rf)674 irdma_destroy_aeq(struct irdma_pci_f *rf)
675 {
676 struct irdma_sc_dev *dev = &rf->sc_dev;
677 struct irdma_aeq *aeq = &rf->aeq;
678 int status = -EBUSY;
679
680 if (!rf->msix_shared) {
681 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
682 irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
683 }
684 if (rf->reset)
685 goto exit;
686
687 aeq->sc_aeq.size = 0;
688 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
689 if (status)
690 irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy AEQ failed %d\n", status);
691
692 exit:
693 if (aeq->virtual_map)
694 irdma_destroy_virt_aeq(rf);
695 else
696 irdma_free_dma_mem(dev->hw, &aeq->mem);
697 }
698
699 /**
700 * irdma_destroy_ceq - destroy ceq
701 * @rf: RDMA PCI function
702 * @iwceq: ceq to be destroyed
703 *
704 * Issue a destroy ceq request and
705 * free the resources associated with the ceq
706 */
707 static void
irdma_destroy_ceq(struct irdma_pci_f * rf,struct irdma_ceq * iwceq)708 irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
709 {
710 struct irdma_sc_dev *dev = &rf->sc_dev;
711 int status;
712
713 if (rf->reset)
714 goto exit;
715
716 status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
717 if (status) {
718 irdma_debug(dev, IRDMA_DEBUG_ERR, "CEQ destroy command failed %d\n", status);
719 goto exit;
720 }
721
722 status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
723 if (status)
724 irdma_debug(dev, IRDMA_DEBUG_ERR,
725 "CEQ destroy completion failed %d\n", status);
726 exit:
727 spin_lock_destroy(&iwceq->ce_lock);
728 spin_lock_destroy(&iwceq->sc_ceq.req_cq_lock);
729 kfree(iwceq->sc_ceq.reg_cq);
730 irdma_free_dma_mem(dev->hw, &iwceq->mem);
731 }
732
733 /**
734 * irdma_del_ceq_0 - destroy ceq 0
735 * @rf: RDMA PCI function
736 *
737 * Disable the ceq 0 interrupt and destroy the ceq 0
738 */
739 static void
irdma_del_ceq_0(struct irdma_pci_f * rf)740 irdma_del_ceq_0(struct irdma_pci_f *rf)
741 {
742 struct irdma_ceq *iwceq = rf->ceqlist;
743 struct irdma_msix_vector *msix_vec;
744
745 if (rf->msix_shared) {
746 msix_vec = &rf->iw_msixtbl[0];
747 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
748 msix_vec->ceq_id,
749 msix_vec->idx, false);
750 irdma_destroy_irq(rf, msix_vec, rf);
751 } else {
752 msix_vec = &rf->iw_msixtbl[1];
753 irdma_destroy_irq(rf, msix_vec, iwceq);
754 }
755
756 irdma_destroy_ceq(rf, iwceq);
757 rf->sc_dev.ceq_valid = false;
758 rf->ceqs_count = 0;
759 }
760
761 /**
762 * irdma_del_ceqs - destroy all ceq's except CEQ 0
763 * @rf: RDMA PCI function
764 *
765 * Go through all of the device ceq's, except 0, and for each
766 * ceq disable the ceq interrupt and destroy the ceq
767 */
768 static void
irdma_del_ceqs(struct irdma_pci_f * rf)769 irdma_del_ceqs(struct irdma_pci_f *rf)
770 {
771 struct irdma_ceq *iwceq = &rf->ceqlist[1];
772 struct irdma_msix_vector *msix_vec;
773 u32 i = 0;
774
775 if (rf->msix_shared)
776 msix_vec = &rf->iw_msixtbl[1];
777 else
778 msix_vec = &rf->iw_msixtbl[2];
779
780 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
781 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
782 msix_vec->idx, false);
783 irdma_destroy_irq(rf, msix_vec, iwceq);
784 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
785 IRDMA_OP_CEQ_DESTROY);
786 spin_lock_destroy(&iwceq->ce_lock);
787 spin_lock_destroy(&iwceq->sc_ceq.req_cq_lock);
788 kfree(iwceq->sc_ceq.reg_cq);
789 irdma_free_dma_mem(rf->sc_dev.hw, &iwceq->mem);
790 }
791 rf->ceqs_count = 1;
792 }
793
794 /**
795 * irdma_destroy_ccq - destroy control cq
796 * @rf: RDMA PCI function
797 *
798 * Issue destroy ccq request and
799 * free the resources associated with the ccq
800 */
801 static void
irdma_destroy_ccq(struct irdma_pci_f * rf)802 irdma_destroy_ccq(struct irdma_pci_f *rf)
803 {
804 struct irdma_sc_dev *dev = &rf->sc_dev;
805 struct irdma_ccq *ccq = &rf->ccq;
806 int status = 0;
807
808 if (rf->cqp_cmpl_wq)
809 destroy_workqueue(rf->cqp_cmpl_wq);
810 if (!rf->reset)
811 status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
812 if (status)
813 irdma_debug(dev, IRDMA_DEBUG_ERR, "CCQ destroy failed %d\n", status);
814 irdma_free_dma_mem(dev->hw, &ccq->mem_cq);
815 }
816
817 /**
818 * irdma_close_hmc_objects_type - delete hmc objects of a given type
819 * @dev: iwarp device
820 * @obj_type: the hmc object type to be deleted
821 * @hmc_info: host memory info struct
822 * @privileged: permission to close HMC objects
823 * @reset: true if called before reset
824 */
825 static void
irdma_close_hmc_objects_type(struct irdma_sc_dev * dev,enum irdma_hmc_rsrc_type obj_type,struct irdma_hmc_info * hmc_info,bool privileged,bool reset)826 irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
827 enum irdma_hmc_rsrc_type obj_type,
828 struct irdma_hmc_info *hmc_info,
829 bool privileged, bool reset)
830 {
831 struct irdma_hmc_del_obj_info info = {0};
832
833 info.hmc_info = hmc_info;
834 info.rsrc_type = obj_type;
835 info.count = hmc_info->hmc_obj[obj_type].cnt;
836 info.privileged = privileged;
837 if (irdma_sc_del_hmc_obj(dev, &info, reset))
838 irdma_debug(dev, IRDMA_DEBUG_ERR,
839 "del HMC obj of type %d failed\n", obj_type);
840 }
841
842 /**
843 * irdma_del_hmc_objects - remove all device hmc objects
844 * @dev: iwarp device
845 * @hmc_info: hmc_info to free
846 * @privileged: permission to delete HMC objects
847 * @reset: true if called before reset
848 * @vers: hardware version
849 */
850 void
irdma_del_hmc_objects(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,bool privileged,bool reset,enum irdma_vers vers)851 irdma_del_hmc_objects(struct irdma_sc_dev *dev,
852 struct irdma_hmc_info *hmc_info, bool privileged,
853 bool reset, enum irdma_vers vers)
854 {
855 unsigned int i;
856
857 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
858 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
859 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
860 hmc_info, privileged, reset);
861 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
862 break;
863 }
864 }
865
866 /**
867 * irdma_create_hmc_obj_type - create hmc object of a given type
868 * @dev: hardware control device structure
869 * @info: information for the hmc object to create
870 */
871 static int
irdma_create_hmc_obj_type(struct irdma_sc_dev * dev,struct irdma_hmc_create_obj_info * info)872 irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
873 struct irdma_hmc_create_obj_info *info)
874 {
875 return irdma_sc_create_hmc_obj(dev, info);
876 }
877
878 /**
879 * irdma_create_hmc_objs - create all hmc objects for the device
880 * @rf: RDMA PCI function
881 * @privileged: permission to create HMC objects
882 * @vers: HW version
883 *
884 * Create the device hmc objects and allocate hmc pages
885 * Return 0 if successful, otherwise clean up and return error
886 */
887 static int
irdma_create_hmc_objs(struct irdma_pci_f * rf,bool privileged,enum irdma_vers vers)888 irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
889 enum irdma_vers vers)
890 {
891 struct irdma_sc_dev *dev = &rf->sc_dev;
892 struct irdma_hmc_create_obj_info info = {0};
893 int i, status = 0;
894
895 info.hmc_info = dev->hmc_info;
896 info.privileged = privileged;
897 info.entry_type = rf->sd_type;
898
899 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
900 if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
901 continue;
902 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
903 info.rsrc_type = iw_hmc_obj_types[i];
904 info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
905 info.add_sd_cnt = 0;
906 status = irdma_create_hmc_obj_type(dev, &info);
907 if (status) {
908 irdma_debug(dev, IRDMA_DEBUG_ERR,
909 "create obj type %d status = %d\n",
910 iw_hmc_obj_types[i], status);
911 break;
912 }
913 }
914 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
915 break;
916 }
917
918 if (!status)
919 return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
920 true, true);
921
922 while (i) {
923 i--;
924 /* destroy the hmc objects of a given type */
925 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
926 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
927 dev->hmc_info, privileged,
928 false);
929 }
930
931 return status;
932 }
933
934 /**
935 * irdma_obj_aligned_mem - get aligned memory from device allocated memory
936 * @rf: RDMA PCI function
937 * @memptr: points to the memory addresses
938 * @size: size of memory needed
939 * @mask: mask for the aligned memory
940 *
941 * Get aligned memory of the requested size and
942 * update the memptr to point to the new aligned memory
943 * Return 0 if successful, otherwise return no memory error
944 */
945 static int
irdma_obj_aligned_mem(struct irdma_pci_f * rf,struct irdma_dma_mem * memptr,u32 size,u32 mask)946 irdma_obj_aligned_mem(struct irdma_pci_f *rf,
947 struct irdma_dma_mem *memptr, u32 size,
948 u32 mask)
949 {
950 unsigned long va, newva;
951 unsigned long extra;
952
953 va = (unsigned long)rf->obj_next.va;
954 newva = va;
955 if (mask)
956 newva = ALIGN(va, (unsigned long)mask + 1ULL);
957 extra = newva - va;
958 memptr->va = (u8 *)va + extra;
959 memptr->pa = rf->obj_next.pa + extra;
960 memptr->size = size;
961 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
962 return -ENOMEM;
963
964 rf->obj_next.va = (u8 *)memptr->va + size;
965 rf->obj_next.pa = memptr->pa + size;
966
967 return 0;
968 }
969
970 /**
971 * irdma_create_cqp - create control qp
972 * @rf: RDMA PCI function
973 *
974 * Return 0, if the cqp and all the resources associated with it
975 * are successfully created, otherwise return error
976 */
977 static int
irdma_create_cqp(struct irdma_pci_f * rf)978 irdma_create_cqp(struct irdma_pci_f *rf)
979 {
980 u32 sqsize = IRDMA_CQP_SW_SQSIZE_MAX;
981 struct irdma_dma_mem mem;
982 struct irdma_sc_dev *dev = &rf->sc_dev;
983 struct irdma_cqp_init_info cqp_init_info = {0};
984 struct irdma_cqp *cqp = &rf->cqp;
985 u16 maj_err, min_err;
986 int i, status;
987
988 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests),
989 GFP_KERNEL);
990 if (!cqp->cqp_requests)
991 return -ENOMEM;
992
993 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array),
994 GFP_KERNEL);
995 if (!cqp->scratch_array) {
996 status = -ENOMEM;
997 goto err_scratch;
998 }
999
1000 dev->cqp = &cqp->sc_cqp;
1001 dev->cqp->dev = dev;
1002 cqp->sq.size = sizeof(struct irdma_cqp_sq_wqe) * sqsize;
1003 cqp->sq.va = irdma_allocate_dma_mem(dev->hw, &cqp->sq, cqp->sq.size,
1004 IRDMA_CQP_ALIGNMENT);
1005 if (!cqp->sq.va) {
1006 status = -ENOMEM;
1007 goto err_sq;
1008 }
1009
1010 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
1011 IRDMA_HOST_CTX_ALIGNMENT_M);
1012 if (status)
1013 goto err_ctx;
1014
1015 dev->cqp->host_ctx_pa = mem.pa;
1016 dev->cqp->host_ctx = mem.va;
1017 /* populate the cqp init info */
1018 cqp_init_info.dev = dev;
1019 cqp_init_info.sq_size = sqsize;
1020 cqp_init_info.sq = cqp->sq.va;
1021 cqp_init_info.sq_pa = cqp->sq.pa;
1022 cqp_init_info.host_ctx_pa = mem.pa;
1023 cqp_init_info.host_ctx = mem.va;
1024 cqp_init_info.hmc_profile = rf->rsrc_profile;
1025 cqp_init_info.scratch_array = cqp->scratch_array;
1026 cqp_init_info.protocol_used = rf->protocol_used;
1027 cqp_init_info.en_rem_endpoint_trk = rf->en_rem_endpoint_trk;
1028 cqp_init_info.timer_slots = rf->timer_slots;
1029 memcpy(&cqp_init_info.dcqcn_params, &rf->dcqcn_params,
1030 sizeof(cqp_init_info.dcqcn_params));
1031
1032 switch (rf->rdma_ver) {
1033 case IRDMA_GEN_1:
1034 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
1035 break;
1036 case IRDMA_GEN_2:
1037 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
1038 break;
1039 }
1040 status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
1041 if (status) {
1042 irdma_debug(dev, IRDMA_DEBUG_ERR, "cqp init status %d\n", status);
1043 goto err_ctx;
1044 }
1045
1046 spin_lock_init(&cqp->req_lock);
1047 spin_lock_init(&cqp->compl_lock);
1048
1049 status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
1050 if (status) {
1051 irdma_debug(dev, IRDMA_DEBUG_ERR,
1052 "cqp create failed - status %d maj_err %d min_err %d\n",
1053 status, maj_err, min_err);
1054 goto err_ctx;
1055 }
1056
1057 INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
1058 INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
1059
1060 /* init the waitqueue of the cqp_requests and add them to the list */
1061 for (i = 0; i < sqsize; i++) {
1062 init_waitqueue_head(&cqp->cqp_requests[i].waitq);
1063 list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
1064 }
1065 init_waitqueue_head(&cqp->remove_wq);
1066 return 0;
1067
1068 err_ctx:
1069 irdma_free_dma_mem(dev->hw, &cqp->sq);
1070 err_sq:
1071 kfree(cqp->scratch_array);
1072 cqp->scratch_array = NULL;
1073 err_scratch:
1074 kfree(cqp->cqp_requests);
1075 cqp->cqp_requests = NULL;
1076
1077 return status;
1078 }
1079
1080 /**
1081 * irdma_create_ccq - create control cq
1082 * @rf: RDMA PCI function
1083 *
1084 * Return 0, if the ccq and the resources associated with it
1085 * are successfully created, otherwise return error
1086 */
1087 static int
irdma_create_ccq(struct irdma_pci_f * rf)1088 irdma_create_ccq(struct irdma_pci_f *rf)
1089 {
1090 struct irdma_sc_dev *dev = &rf->sc_dev;
1091 struct irdma_ccq_init_info info = {0};
1092 struct irdma_ccq *ccq = &rf->ccq;
1093 int status;
1094 int ccq_size = IW_CCQ_SIZE;
1095
1096 dev->ccq = &ccq->sc_cq;
1097 dev->ccq->dev = dev;
1098 info.dev = dev;
1099 ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
1100 ccq->mem_cq.size = sizeof(struct irdma_cqe) * ccq_size;
1101 ccq->mem_cq.va = irdma_allocate_dma_mem(dev->hw, &ccq->mem_cq,
1102 ccq->mem_cq.size,
1103 IRDMA_CQ0_ALIGNMENT);
1104 if (!ccq->mem_cq.va)
1105 return -ENOMEM;
1106
1107 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
1108 ccq->shadow_area.size,
1109 IRDMA_SHADOWAREA_M);
1110 if (status)
1111 goto exit;
1112
1113 ccq->sc_cq.back_cq = ccq;
1114 /* populate the ccq init info */
1115 info.cq_base = ccq->mem_cq.va;
1116 info.cq_pa = ccq->mem_cq.pa;
1117 info.num_elem = ccq_size;
1118 info.shadow_area = ccq->shadow_area.va;
1119 info.shadow_area_pa = ccq->shadow_area.pa;
1120 info.ceqe_mask = false;
1121 info.ceq_id_valid = true;
1122 info.shadow_read_threshold = 16;
1123 info.vsi = &rf->default_vsi;
1124 status = irdma_sc_ccq_init(dev->ccq, &info);
1125 if (!status)
1126 status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
1127 exit:
1128 if (status)
1129 irdma_free_dma_mem(dev->hw, &ccq->mem_cq);
1130
1131 return status;
1132 }
1133
1134 /**
1135 * irdma_alloc_set_mac - set up a mac address table entry
1136 * @iwdev: irdma device
1137 *
1138 * Allocate a mac ip entry and add it to the hw table Return 0
1139 * if successful, otherwise return error
1140 */
1141 static int
irdma_alloc_set_mac(struct irdma_device * iwdev)1142 irdma_alloc_set_mac(struct irdma_device *iwdev)
1143 {
1144 int status;
1145
1146 status = irdma_alloc_local_mac_entry(iwdev->rf,
1147 &iwdev->mac_ip_table_idx);
1148 if (!status) {
1149 status = irdma_add_local_mac_entry(iwdev->rf,
1150 (const u8 *)if_getlladdr(iwdev->netdev),
1151 (u8)iwdev->mac_ip_table_idx);
1152 if (status)
1153 irdma_del_local_mac_entry(iwdev->rf,
1154 (u8)iwdev->mac_ip_table_idx);
1155 }
1156 return status;
1157 }
1158
1159 /**
1160 * irdma_irq_request - set up the msix interrupt vector
1161 * @rf: RDMA PCI function
1162 * @msix_vec: interrupt vector information
1163 * @handler: function pointer to associate with interrupt
1164 * @argument: argument passed to the handler
1165 *
1166 * Allocate interrupt resources and setup interrupt
1167 * Return 0 if successful, otherwise return error
1168 * Note that after this function bus_describe_intr shall
1169 * be called.
1170 */
1171 static int
irdma_irq_request(struct irdma_pci_f * rf,struct irdma_msix_vector * msix_vec,driver_intr_t handler,void * argument)1172 irdma_irq_request(struct irdma_pci_f *rf,
1173 struct irdma_msix_vector *msix_vec,
1174 driver_intr_t handler, void *argument)
1175 {
1176 device_t dev = rf->dev_ctx.dev;
1177 int rid = msix_vec->idx + 1;
1178 int err, status;
1179
1180 msix_vec->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1181 if (!msix_vec->res) {
1182 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
1183 "Unable to allocate bus resource int[%d]\n", rid);
1184 return -EINVAL;
1185 }
1186 err = bus_setup_intr(dev, msix_vec->res, INTR_TYPE_NET | INTR_MPSAFE,
1187 NULL, handler, argument, &msix_vec->tag);
1188 if (err) {
1189 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
1190 "Unable to register handler with %x status\n", err);
1191 status = -EINVAL;
1192 goto fail_intr;
1193 }
1194 return 0;
1195
1196 fail_intr:
1197 bus_release_resource(dev, SYS_RES_IRQ, rid, msix_vec->res);
1198 msix_vec->res = NULL;
1199
1200 return status;
1201 }
1202
1203 /**
1204 * irdma_cfg_ceq_vector - set up the msix interrupt vector for
1205 * ceq
1206 * @rf: RDMA PCI function
1207 * @iwceq: ceq associated with the vector
1208 * @ceq_id: the id number of the iwceq
1209 * @msix_vec: interrupt vector information
1210 *
1211 * Allocate interrupt resources and enable irq handling
1212 * Return 0 if successful, otherwise return error
1213 */
1214 static int
irdma_cfg_ceq_vector(struct irdma_pci_f * rf,struct irdma_ceq * iwceq,u16 ceq_id,struct irdma_msix_vector * msix_vec)1215 irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1216 u16 ceq_id, struct irdma_msix_vector *msix_vec)
1217 {
1218 int status;
1219
1220 if (rf->msix_shared && !ceq_id) {
1221 snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1222 "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
1223 tasklet_setup(&rf->dpc_tasklet, irdma_aeq_ceq0_tasklet_cb);
1224 status = irdma_irq_request(rf, msix_vec, irdma_aeq_ceq0_irq_handler, rf);
1225 if (status)
1226 return status;
1227 bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
1228 } else {
1229 snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1230 "irdma-%s-CEQ-%d",
1231 dev_name(&rf->pcidev->dev), ceq_id);
1232 tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_tasklet_cb);
1233
1234 status = irdma_irq_request(rf, msix_vec, irdma_ceq_irq_handler, iwceq);
1235 if (status)
1236 return status;
1237 bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
1238 }
1239 msix_vec->ceq_id = ceq_id;
1240 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
1241
1242 return 0;
1243 }
1244
1245 /**
1246 * irdma_cfg_aeq_vector - set up the msix vector for aeq
1247 * @rf: RDMA PCI function
1248 *
1249 * Allocate interrupt resources and enable irq handling
1250 * Return 0 if successful, otherwise return error
1251 */
1252 static int
irdma_cfg_aeq_vector(struct irdma_pci_f * rf)1253 irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
1254 {
1255 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
1256 int status = 0;
1257
1258 if (!rf->msix_shared) {
1259 snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1260 "irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
1261 tasklet_setup(&rf->dpc_tasklet, irdma_aeq_ceq0_tasklet_cb);
1262 status = irdma_irq_request(rf, msix_vec, irdma_aeq_ceq0_irq_handler, rf);
1263 if (status)
1264 return status;
1265 bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
1266 }
1267
1268 if (status) {
1269 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, "aeq irq config fail\n");
1270 return status;
1271 }
1272
1273 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
1274
1275 return 0;
1276 }
1277
1278 /**
1279 * irdma_create_ceq - create completion event queue
1280 * @rf: RDMA PCI function
1281 * @iwceq: pointer to the ceq resources to be created
1282 * @ceq_id: the id number of the iwceq
1283 * @vsi: SC vsi struct
1284 *
1285 * Return 0, if the ceq and the resources associated with it
1286 * are successfully created, otherwise return error
1287 */
1288 static int
irdma_create_ceq(struct irdma_pci_f * rf,struct irdma_ceq * iwceq,u16 ceq_id,struct irdma_sc_vsi * vsi)1289 irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1290 u16 ceq_id, struct irdma_sc_vsi *vsi)
1291 {
1292 int status;
1293 struct irdma_ceq_init_info info = {0};
1294 struct irdma_sc_dev *dev = &rf->sc_dev;
1295 u32 ceq_size;
1296
1297 info.ceq_id = ceq_id;
1298 iwceq->rf = rf;
1299 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
1300 dev->hw_attrs.max_hw_ceq_size);
1301 iwceq->mem.size = sizeof(struct irdma_ceqe) * ceq_size;
1302 iwceq->mem.va = irdma_allocate_dma_mem(dev->hw, &iwceq->mem,
1303 iwceq->mem.size,
1304 IRDMA_CEQ_ALIGNMENT);
1305 if (!iwceq->mem.va)
1306 return -ENOMEM;
1307
1308 info.ceq_id = ceq_id;
1309 info.ceqe_base = iwceq->mem.va;
1310 info.ceqe_pa = iwceq->mem.pa;
1311 info.elem_cnt = ceq_size;
1312 info.reg_cq = kzalloc(sizeof(struct irdma_sc_cq *) * info.elem_cnt, GFP_KERNEL);
1313
1314 iwceq->sc_ceq.ceq_id = ceq_id;
1315 info.dev = dev;
1316 info.vsi = vsi;
1317 status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
1318 if (!status) {
1319 if (dev->ceq_valid)
1320 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
1321 IRDMA_OP_CEQ_CREATE);
1322 else
1323 status = irdma_sc_cceq_create(&iwceq->sc_ceq);
1324 }
1325
1326 if (status) {
1327 kfree(info.reg_cq);
1328 irdma_free_dma_mem(dev->hw, &iwceq->mem);
1329 }
1330
1331 return status;
1332 }
1333
1334 /**
1335 * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
1336 * @rf: RDMA PCI function
1337 *
1338 * Allocate a list for all device completion event queues
1339 * Create the ceq 0 and configure it's msix interrupt vector
1340 * Return 0, if successfully set up, otherwise return error
1341 */
1342 static int
irdma_setup_ceq_0(struct irdma_pci_f * rf)1343 irdma_setup_ceq_0(struct irdma_pci_f *rf)
1344 {
1345 struct irdma_ceq *iwceq;
1346 struct irdma_msix_vector *msix_vec;
1347 u32 i;
1348 int status = 0;
1349 u32 num_ceqs;
1350
1351 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1352 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
1353 memset(rf->ceqlist, 0, num_ceqs * sizeof(*rf->ceqlist));
1354 if (!rf->ceqlist) {
1355 status = -ENOMEM;
1356 goto exit;
1357 }
1358
1359 iwceq = &rf->ceqlist[0];
1360 status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
1361 if (status) {
1362 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
1363 "create ceq status = %d\n", status);
1364 goto exit;
1365 }
1366
1367 spin_lock_init(&iwceq->ce_lock);
1368 i = rf->msix_shared ? 0 : 1;
1369 msix_vec = &rf->iw_msixtbl[i];
1370 iwceq->irq = msix_vec->irq;
1371 iwceq->msix_idx = msix_vec->idx;
1372 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
1373 if (status) {
1374 irdma_destroy_ceq(rf, iwceq);
1375 goto exit;
1376 }
1377
1378 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1379 rf->ceqs_count++;
1380
1381 exit:
1382 if (status && !rf->ceqs_count) {
1383 kfree(rf->ceqlist);
1384 rf->ceqlist = NULL;
1385 return status;
1386 }
1387 rf->sc_dev.ceq_valid = true;
1388
1389 return 0;
1390 }
1391
1392 /**
1393 * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
1394 * @rf: RDMA PCI function
1395 * @vsi: VSI structure for this CEQ
1396 *
1397 * Allocate a list for all device completion event queues
1398 * Create the ceq's and configure their msix interrupt vectors
1399 * Return 0, if ceqs are successfully set up, otherwise return error
1400 */
1401 static int
irdma_setup_ceqs(struct irdma_pci_f * rf,struct irdma_sc_vsi * vsi)1402 irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
1403 {
1404 u32 i;
1405 u16 ceq_id;
1406 struct irdma_ceq *iwceq;
1407 struct irdma_msix_vector *msix_vec;
1408 int status;
1409 u32 num_ceqs;
1410
1411 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1412 i = (rf->msix_shared) ? 1 : 2;
1413 for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
1414 iwceq = &rf->ceqlist[ceq_id];
1415 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
1416 if (status) {
1417 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
1418 "create ceq status = %d\n", status);
1419 goto del_ceqs;
1420 }
1421 spin_lock_init(&iwceq->ce_lock);
1422 msix_vec = &rf->iw_msixtbl[i];
1423 iwceq->irq = msix_vec->irq;
1424 iwceq->msix_idx = msix_vec->idx;
1425 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
1426 if (status) {
1427 irdma_destroy_ceq(rf, iwceq);
1428 goto del_ceqs;
1429 }
1430 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1431 rf->ceqs_count++;
1432 }
1433
1434 return 0;
1435
1436 del_ceqs:
1437 irdma_del_ceqs(rf);
1438
1439 return status;
1440 }
1441
1442 static int
irdma_create_virt_aeq(struct irdma_pci_f * rf,u32 size)1443 irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
1444 {
1445 struct irdma_aeq *aeq = &rf->aeq;
1446 dma_addr_t *pg_arr;
1447 u32 pg_cnt;
1448 int status;
1449
1450 if (rf->rdma_ver < IRDMA_GEN_2)
1451 return -EOPNOTSUPP;
1452
1453 aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
1454 aeq->mem.va = vzalloc(aeq->mem.size);
1455
1456 if (!aeq->mem.va)
1457 return -ENOMEM;
1458
1459 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
1460 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
1461 if (status) {
1462 vfree(aeq->mem.va);
1463 return status;
1464 }
1465
1466 pg_arr = (dma_addr_t *) aeq->palloc.level1.addr;
1467 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
1468 if (status) {
1469 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
1470 vfree(aeq->mem.va);
1471 return status;
1472 }
1473
1474 return 0;
1475 }
1476
1477 /**
1478 * irdma_create_aeq - create async event queue
1479 * @rf: RDMA PCI function
1480 *
1481 * Return 0, if the aeq and the resources associated with it
1482 * are successfully created, otherwise return error
1483 */
1484 static int
irdma_create_aeq(struct irdma_pci_f * rf)1485 irdma_create_aeq(struct irdma_pci_f *rf)
1486 {
1487 struct irdma_aeq_init_info info = {0};
1488 struct irdma_sc_dev *dev = &rf->sc_dev;
1489 struct irdma_aeq *aeq = &rf->aeq;
1490 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
1491 u32 aeq_size;
1492 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
1493 int status;
1494
1495 aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
1496 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
1497 aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
1498
1499 aeq->mem.size = sizeof(struct irdma_sc_aeqe) * aeq_size;
1500 aeq->mem.va = irdma_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
1501 IRDMA_AEQ_ALIGNMENT);
1502 if (aeq->mem.va)
1503 goto skip_virt_aeq;
1504
1505 /* physically mapped aeq failed. setup virtual aeq */
1506 status = irdma_create_virt_aeq(rf, aeq_size);
1507 if (status)
1508 return status;
1509
1510 info.virtual_map = true;
1511 aeq->virtual_map = info.virtual_map;
1512 info.pbl_chunk_size = 1;
1513 info.first_pm_pbl_idx = aeq->palloc.level1.idx;
1514
1515 skip_virt_aeq:
1516 info.aeqe_base = aeq->mem.va;
1517 info.aeq_elem_pa = aeq->mem.pa;
1518 info.elem_cnt = aeq_size;
1519 info.dev = dev;
1520 info.msix_idx = rf->iw_msixtbl->idx;
1521 status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
1522 if (status)
1523 goto err;
1524
1525 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
1526 if (status)
1527 goto err;
1528
1529 return 0;
1530
1531 err:
1532 if (aeq->virtual_map)
1533 irdma_destroy_virt_aeq(rf);
1534 else
1535 irdma_free_dma_mem(dev->hw, &aeq->mem);
1536
1537 return status;
1538 }
1539
1540 /**
1541 * irdma_setup_aeq - set up the device aeq
1542 * @rf: RDMA PCI function
1543 *
1544 * Create the aeq and configure its msix interrupt vector
1545 * Return 0 if successful, otherwise return error
1546 */
1547 static int
irdma_setup_aeq(struct irdma_pci_f * rf)1548 irdma_setup_aeq(struct irdma_pci_f *rf)
1549 {
1550 struct irdma_sc_dev *dev = &rf->sc_dev;
1551 int status;
1552
1553 status = irdma_create_aeq(rf);
1554 if (status)
1555 return status;
1556
1557 status = irdma_cfg_aeq_vector(rf);
1558 if (status) {
1559 irdma_destroy_aeq(rf);
1560 return status;
1561 }
1562
1563 if (!rf->msix_shared)
1564 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
1565
1566 return 0;
1567 }
1568
1569 /**
1570 * irdma_initialize_ilq - create iwarp local queue for cm
1571 * @iwdev: irdma device
1572 *
1573 * Return 0 if successful, otherwise return error
1574 */
1575 static int
irdma_initialize_ilq(struct irdma_device * iwdev)1576 irdma_initialize_ilq(struct irdma_device *iwdev)
1577 {
1578 struct irdma_puda_rsrc_info info = {0};
1579 int status;
1580
1581 info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
1582 info.cq_id = 1;
1583 info.qp_id = 1;
1584 info.count = 1;
1585 info.pd_id = 1;
1586 info.abi_ver = IRDMA_ABI_VER;
1587 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1588 info.rq_size = info.sq_size;
1589 info.buf_size = 1024;
1590 info.tx_buf_cnt = 2 * info.sq_size;
1591 info.receive = irdma_receive_ilq;
1592 info.xmit_complete = irdma_cm_ilq_cmpl_handler;
1593 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1594 if (status)
1595 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, "ilq create fail\n");
1596
1597 return status;
1598 }
1599
1600 /**
1601 * irdma_initialize_ieq - create iwarp exception queue
1602 * @iwdev: irdma device
1603 *
1604 * Return 0 if successful, otherwise return error
1605 */
1606 static int
irdma_initialize_ieq(struct irdma_device * iwdev)1607 irdma_initialize_ieq(struct irdma_device *iwdev)
1608 {
1609 struct irdma_puda_rsrc_info info = {0};
1610 int status;
1611
1612 info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
1613 info.cq_id = 2;
1614 info.qp_id = iwdev->vsi.exception_lan_q;
1615 info.count = 1;
1616 info.pd_id = 2;
1617 info.abi_ver = IRDMA_ABI_VER;
1618 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1619 info.rq_size = info.sq_size;
1620 info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
1621 info.tx_buf_cnt = 4096;
1622 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1623 if (status)
1624 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, "ieq create fail\n");
1625
1626 return status;
1627 }
1628
1629 /**
1630 * irdma_reinitialize_ieq - destroy and re-create ieq
1631 * @vsi: VSI structure
1632 */
1633 void
irdma_reinitialize_ieq(struct irdma_sc_vsi * vsi)1634 irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
1635 {
1636 struct irdma_device *iwdev = vsi->back_vsi;
1637 struct irdma_pci_f *rf = iwdev->rf;
1638
1639 irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
1640 if (irdma_initialize_ieq(iwdev)) {
1641 iwdev->rf->reset = true;
1642 rf->gen_ops.request_reset(rf);
1643 }
1644 }
1645
1646 /**
1647 * irdma_hmc_setup - create hmc objects for the device
1648 * @rf: RDMA PCI function
1649 *
1650 * Set up the device private memory space for the number and size of
1651 * the hmc objects and create the objects
1652 * Return 0 if successful, otherwise return error
1653 */
1654 static int
irdma_hmc_setup(struct irdma_pci_f * rf)1655 irdma_hmc_setup(struct irdma_pci_f *rf)
1656 {
1657 struct irdma_sc_dev *dev = &rf->sc_dev;
1658 int status;
1659 u32 qpcnt;
1660
1661 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
1662
1663 rf->sd_type = IRDMA_SD_TYPE_DIRECT;
1664 status = irdma_cfg_fpm_val(dev, qpcnt);
1665 if (status)
1666 return status;
1667
1668 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
1669
1670 return status;
1671 }
1672
1673 /**
1674 * irdma_del_init_mem - deallocate memory resources
1675 * @rf: RDMA PCI function
1676 */
1677 static void
irdma_del_init_mem(struct irdma_pci_f * rf)1678 irdma_del_init_mem(struct irdma_pci_f *rf)
1679 {
1680 struct irdma_sc_dev *dev = &rf->sc_dev;
1681
1682 kfree(dev->hmc_info->sd_table.sd_entry);
1683 dev->hmc_info->sd_table.sd_entry = NULL;
1684 vfree(rf->mem_rsrc);
1685 rf->mem_rsrc = NULL;
1686 irdma_free_dma_mem(&rf->hw, &rf->obj_mem);
1687 if (rf->rdma_ver != IRDMA_GEN_1) {
1688 kfree(rf->allocated_ws_nodes);
1689 rf->allocated_ws_nodes = NULL;
1690 }
1691 mutex_destroy(&dev->ws_mutex);
1692 kfree(rf->ceqlist);
1693 rf->ceqlist = NULL;
1694 kfree(rf->iw_msixtbl);
1695 rf->iw_msixtbl = NULL;
1696 kfree(rf->hmc_info_mem);
1697 rf->hmc_info_mem = NULL;
1698 }
1699 /**
1700 * irdma_initialize_dev - initialize device
1701 * @rf: RDMA PCI function
1702 *
1703 * Allocate memory for the hmc objects and initialize iwdev
1704 * Return 0 if successful, otherwise clean up the resources
1705 * and return error
1706 */
1707 static int
irdma_initialize_dev(struct irdma_pci_f * rf)1708 irdma_initialize_dev(struct irdma_pci_f *rf)
1709 {
1710 int status;
1711 struct irdma_sc_dev *dev = &rf->sc_dev;
1712 struct irdma_device_init_info info = {0};
1713 struct irdma_dma_mem mem;
1714 u32 size;
1715
1716 size = sizeof(struct irdma_hmc_pble_rsrc) +
1717 sizeof(struct irdma_hmc_info) +
1718 (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
1719
1720 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1721 if (!rf->hmc_info_mem)
1722 return -ENOMEM;
1723
1724 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
1725 dev->hmc_info = &rf->hw.hmc;
1726 dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
1727 (rf->pble_rsrc + 1);
1728
1729 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
1730 IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
1731 if (status)
1732 goto error;
1733
1734 info.fpm_query_buf_pa = mem.pa;
1735 info.fpm_query_buf = mem.va;
1736
1737 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
1738 IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
1739 if (status)
1740 goto error;
1741
1742 info.fpm_commit_buf_pa = mem.pa;
1743 info.fpm_commit_buf = mem.va;
1744
1745 info.bar0 = rf->hw.hw_addr;
1746 info.hmc_fn_id = rf->peer_info->pf_id;
1747 /*
1748 * the debug_mask is already assigned at this point through sysctl and so the value shouldn't be overwritten
1749 */
1750 info.debug_mask = rf->sc_dev.debug_mask;
1751 info.hw = &rf->hw;
1752 status = irdma_sc_dev_init(&rf->sc_dev, &info);
1753 if (status)
1754 goto error;
1755
1756 return status;
1757 error:
1758 kfree(rf->hmc_info_mem);
1759 rf->hmc_info_mem = NULL;
1760
1761 return status;
1762 }
1763
1764 /**
1765 * irdma_rt_deinit_hw - clean up the irdma device resources
1766 * @iwdev: irdma device
1767 *
1768 * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
1769 * device queues and free the pble and the hmc objects
1770 */
1771 void
irdma_rt_deinit_hw(struct irdma_device * iwdev)1772 irdma_rt_deinit_hw(struct irdma_device *iwdev)
1773 {
1774 struct irdma_sc_qp qp = {{0}};
1775
1776 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_INIT, "state = %d\n", iwdev->init_state);
1777
1778 switch (iwdev->init_state) {
1779 case IP_ADDR_REGISTERED:
1780 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1781 irdma_del_local_mac_entry(iwdev->rf,
1782 (u8)iwdev->mac_ip_table_idx);
1783 /* fallthrough */
1784 case AEQ_CREATED:
1785 case PBLE_CHUNK_MEM:
1786 case CEQS_CREATED:
1787 case REM_ENDPOINT_TRK_CREATED:
1788 if (iwdev->rf->en_rem_endpoint_trk) {
1789 qp.dev = &iwdev->rf->sc_dev;
1790 qp.qp_uk.qp_id = IRDMA_REM_ENDPOINT_TRK_QPID;
1791 qp.qp_uk.qp_type = IRDMA_QP_TYPE_IWARP;
1792 irdma_cqp_qp_destroy_cmd(qp.dev, &qp);
1793 }
1794 /* fallthrough */
1795 case IEQ_CREATED:
1796 if (!iwdev->roce_mode)
1797 irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
1798 iwdev->rf->reset);
1799 /* fallthrough */
1800 case ILQ_CREATED:
1801 if (!iwdev->roce_mode)
1802 irdma_puda_dele_rsrc(&iwdev->vsi,
1803 IRDMA_PUDA_RSRC_TYPE_ILQ,
1804 iwdev->rf->reset);
1805 break;
1806 default:
1807 irdma_dev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
1808 break;
1809 }
1810
1811 irdma_cleanup_cm_core(&iwdev->cm_core);
1812 if (iwdev->vsi.pestat) {
1813 irdma_vsi_stats_free(&iwdev->vsi);
1814 kfree(iwdev->vsi.pestat);
1815 }
1816 if (iwdev->cleanup_wq)
1817 destroy_workqueue(iwdev->cleanup_wq);
1818 }
1819
1820 static int
irdma_setup_init_state(struct irdma_pci_f * rf)1821 irdma_setup_init_state(struct irdma_pci_f *rf)
1822 {
1823 int status;
1824
1825 status = irdma_save_msix_info(rf);
1826 if (status)
1827 return status;
1828
1829 rf->obj_mem.size = 8192;
1830 rf->obj_mem.va = irdma_allocate_dma_mem(&rf->hw, &rf->obj_mem,
1831 rf->obj_mem.size,
1832 IRDMA_HW_PAGE_SIZE);
1833 if (!rf->obj_mem.va) {
1834 status = -ENOMEM;
1835 goto clean_msixtbl;
1836 }
1837
1838 rf->obj_next = rf->obj_mem;
1839 status = irdma_initialize_dev(rf);
1840 if (status)
1841 goto clean_obj_mem;
1842
1843 /*
1844 * Apply sysctl settings to max_hw_ird/ord
1845 */
1846 rf->sc_dev.hw_attrs.max_hw_ird = irdma_sysctl_max_ird;
1847 rf->sc_dev.hw_attrs.max_hw_ord = irdma_sysctl_max_ord;
1848 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
1849 "using max_hw_ird = %d and max_hw_ord = %d\n",
1850 rf->sc_dev.hw_attrs.max_hw_ird,
1851 rf->sc_dev.hw_attrs.max_hw_ord);
1852
1853 return 0;
1854
1855 clean_obj_mem:
1856 irdma_free_dma_mem(&rf->hw, &rf->obj_mem);
1857 clean_msixtbl:
1858 kfree(rf->iw_msixtbl);
1859 rf->iw_msixtbl = NULL;
1860 return status;
1861 }
1862
1863 /**
1864 * irdma_get_used_rsrc - determine resources used internally
1865 * @iwdev: irdma device
1866 *
1867 * Called at the end of open to get all internal allocations
1868 */
1869 static void
irdma_get_used_rsrc(struct irdma_device * iwdev)1870 irdma_get_used_rsrc(struct irdma_device *iwdev)
1871 {
1872 iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
1873 iwdev->rf->max_pd);
1874 iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
1875 iwdev->rf->max_qp);
1876 iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
1877 iwdev->rf->max_cq);
1878 iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
1879 iwdev->rf->max_mr);
1880 }
1881
1882 void
irdma_ctrl_deinit_hw(struct irdma_pci_f * rf)1883 irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
1884 {
1885 enum init_completion_state state = rf->init_state;
1886
1887 rf->init_state = INVALID_STATE;
1888 if (rf->rsrc_created) {
1889 irdma_destroy_aeq(rf);
1890 irdma_destroy_pble_prm(rf->pble_rsrc);
1891 irdma_del_ceqs(rf);
1892 rf->rsrc_created = false;
1893 }
1894
1895 switch (state) {
1896 case CEQ0_CREATED:
1897 irdma_del_ceq_0(rf);
1898 /* fallthrough */
1899 case CCQ_CREATED:
1900 irdma_destroy_ccq(rf);
1901 /* fallthrough */
1902 case HW_RSRC_INITIALIZED:
1903 case HMC_OBJS_CREATED:
1904 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
1905 rf->reset, rf->rdma_ver);
1906 /* fallthrough */
1907 case CQP_CREATED:
1908 irdma_destroy_cqp(rf, !rf->reset);
1909 /* fallthrough */
1910 case INITIAL_STATE:
1911 irdma_del_init_mem(rf);
1912 break;
1913 case INVALID_STATE:
1914 default:
1915 irdma_dev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
1916 break;
1917 }
1918 }
1919
1920 /**
1921 * irdma_rt_init_hw - Initializes runtime portion of HW
1922 * @iwdev: irdma device
1923 * @l2params: qos, tc, mtu info from netdev driver
1924 *
1925 * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
1926 * device resource objects.
1927 */
1928 int
irdma_rt_init_hw(struct irdma_device * iwdev,struct irdma_l2params * l2params)1929 irdma_rt_init_hw(struct irdma_device *iwdev,
1930 struct irdma_l2params *l2params)
1931 {
1932 struct irdma_pci_f *rf = iwdev->rf;
1933 struct irdma_sc_dev *dev = &rf->sc_dev;
1934 struct irdma_sc_qp qp = {{0}};
1935 struct irdma_vsi_init_info vsi_info = {0};
1936 struct irdma_vsi_stats_info stats_info = {0};
1937 int status;
1938
1939 vsi_info.dev = dev;
1940 vsi_info.back_vsi = iwdev;
1941 vsi_info.params = l2params;
1942 vsi_info.pf_data_vsi_num = iwdev->vsi_num;
1943 vsi_info.register_qset = rf->gen_ops.register_qset;
1944 vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
1945 vsi_info.exception_lan_q = 2;
1946 irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
1947
1948 status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
1949 if (status)
1950 return status;
1951
1952 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1953 if (!stats_info.pestat) {
1954 irdma_cleanup_cm_core(&iwdev->cm_core);
1955 return -ENOMEM;
1956 }
1957 stats_info.fcn_id = dev->hmc_fn_id;
1958 status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
1959 if (status) {
1960 irdma_cleanup_cm_core(&iwdev->cm_core);
1961 kfree(stats_info.pestat);
1962 return status;
1963 }
1964
1965 do {
1966 if (!iwdev->roce_mode) {
1967 status = irdma_initialize_ilq(iwdev);
1968 if (status)
1969 break;
1970 iwdev->init_state = ILQ_CREATED;
1971 status = irdma_initialize_ieq(iwdev);
1972 if (status)
1973 break;
1974 iwdev->init_state = IEQ_CREATED;
1975 }
1976 if (iwdev->rf->en_rem_endpoint_trk) {
1977 qp.dev = dev;
1978 qp.qp_uk.qp_id = IRDMA_REM_ENDPOINT_TRK_QPID;
1979 qp.qp_uk.qp_type = IRDMA_QP_TYPE_IWARP;
1980 status = irdma_cqp_qp_create_cmd(dev, &qp);
1981 if (status)
1982 break;
1983 iwdev->init_state = REM_ENDPOINT_TRK_CREATED;
1984 }
1985 if (!rf->rsrc_created) {
1986 status = irdma_setup_ceqs(rf, &iwdev->vsi);
1987 if (status)
1988 break;
1989
1990 iwdev->init_state = CEQS_CREATED;
1991
1992 status = irdma_hmc_init_pble(&rf->sc_dev,
1993 rf->pble_rsrc);
1994 if (status) {
1995 irdma_del_ceqs(rf);
1996 break;
1997 }
1998
1999 iwdev->init_state = PBLE_CHUNK_MEM;
2000
2001 status = irdma_setup_aeq(rf);
2002 if (status) {
2003 irdma_destroy_pble_prm(rf->pble_rsrc);
2004 irdma_del_ceqs(rf);
2005 break;
2006 }
2007 iwdev->init_state = AEQ_CREATED;
2008 rf->rsrc_created = true;
2009 }
2010
2011 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
2012 irdma_alloc_set_mac(iwdev);
2013 irdma_add_ip(iwdev);
2014 iwdev->init_state = IP_ADDR_REGISTERED;
2015
2016 /*
2017 * handles asynch cleanup tasks - disconnect CM , free qp, free cq bufs
2018 */
2019 iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
2020 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
2021 if (!iwdev->cleanup_wq)
2022 return -ENOMEM;
2023 irdma_get_used_rsrc(iwdev);
2024 init_waitqueue_head(&iwdev->suspend_wq);
2025
2026 return 0;
2027 } while (0);
2028
2029 dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
2030 status, iwdev->init_state);
2031 irdma_rt_deinit_hw(iwdev);
2032
2033 return status;
2034 }
2035
2036 /**
2037 * irdma_ctrl_init_hw - Initializes control portion of HW
2038 * @rf: RDMA PCI function
2039 *
2040 * Create admin queues, HMC obejcts and RF resource objects
2041 */
2042 int
irdma_ctrl_init_hw(struct irdma_pci_f * rf)2043 irdma_ctrl_init_hw(struct irdma_pci_f *rf)
2044 {
2045 struct irdma_sc_dev *dev = &rf->sc_dev;
2046 int status;
2047
2048 do {
2049 status = irdma_setup_init_state(rf);
2050 if (status)
2051 break;
2052 rf->init_state = INITIAL_STATE;
2053
2054 status = irdma_create_cqp(rf);
2055 if (status)
2056 break;
2057 rf->init_state = CQP_CREATED;
2058
2059 dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
2060 if (rf->rdma_ver != IRDMA_GEN_1) {
2061 status = irdma_get_rdma_features(dev);
2062 if (status)
2063 break;
2064 }
2065
2066 status = irdma_hmc_setup(rf);
2067 if (status)
2068 break;
2069 rf->init_state = HMC_OBJS_CREATED;
2070
2071 status = irdma_initialize_hw_rsrc(rf);
2072 if (status)
2073 break;
2074 rf->init_state = HW_RSRC_INITIALIZED;
2075
2076 status = irdma_create_ccq(rf);
2077 if (status)
2078 break;
2079 rf->init_state = CCQ_CREATED;
2080
2081 status = irdma_setup_ceq_0(rf);
2082 if (status)
2083 break;
2084 rf->init_state = CEQ0_CREATED;
2085 /* Handles processing of CQP completions */
2086 rf->cqp_cmpl_wq = alloc_ordered_workqueue("irdma-cqp_cmpl_wq",
2087 WQ_HIGHPRI | WQ_UNBOUND);
2088 if (!rf->cqp_cmpl_wq) {
2089 status = -ENOMEM;
2090 break;
2091 }
2092 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
2093 irdma_sc_ccq_arm(dev->ccq);
2094 return 0;
2095 } while (0);
2096
2097 pr_err("IRDMA hardware initialization FAILED init_state=%d status=%d\n",
2098 rf->init_state, status);
2099 irdma_ctrl_deinit_hw(rf);
2100 return status;
2101 }
2102
2103 /**
2104 * irdma_set_hw_rsrc - set hw memory resources.
2105 * @rf: RDMA PCI function
2106 */
2107 static void
irdma_set_hw_rsrc(struct irdma_pci_f * rf)2108 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
2109 {
2110 rf->allocated_qps = (void *)(rf->mem_rsrc +
2111 (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
2112 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
2113 rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
2114 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
2115 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
2116 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
2117 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
2118
2119 rf->qp_table = (struct irdma_qp **)
2120 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
2121 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
2122
2123 spin_lock_init(&rf->rsrc_lock);
2124 spin_lock_init(&rf->arp_lock);
2125 spin_lock_init(&rf->qptable_lock);
2126 spin_lock_init(&rf->cqtable_lock);
2127 spin_lock_init(&rf->qh_list_lock);
2128 }
2129
2130 /**
2131 * irdma_calc_mem_rsrc_size - calculate memory resources size.
2132 * @rf: RDMA PCI function
2133 */
irdma_calc_mem_rsrc_size(struct irdma_pci_f * rf)2134 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf){
2135 u32 rsrc_size;
2136
2137 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
2138 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
2139 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
2140 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
2141 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
2142 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
2143 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
2144 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
2145 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
2146 rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
2147
2148 return rsrc_size;
2149 }
2150
2151 /**
2152 * irdma_initialize_hw_rsrc - initialize hw resource tracking array
2153 * @rf: RDMA PCI function
2154 */
2155 u32
irdma_initialize_hw_rsrc(struct irdma_pci_f * rf)2156 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
2157 {
2158 u32 rsrc_size;
2159 u32 mrdrvbits;
2160 u32 ret;
2161
2162 if (rf->rdma_ver != IRDMA_GEN_1) {
2163 rf->allocated_ws_nodes =
2164 kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES),
2165 sizeof(unsigned long), GFP_KERNEL);
2166 if (!rf->allocated_ws_nodes)
2167 return -ENOMEM;
2168
2169 set_bit(0, rf->allocated_ws_nodes);
2170 rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
2171 }
2172 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
2173 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
2174 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
2175 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
2176 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
2177 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
2178 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
2179 rf->max_mcg = rf->max_qp;
2180
2181 rsrc_size = irdma_calc_mem_rsrc_size(rf);
2182 rf->mem_rsrc = vzalloc(rsrc_size);
2183 if (!rf->mem_rsrc) {
2184 ret = -ENOMEM;
2185 goto mem_rsrc_vmalloc_fail;
2186 }
2187
2188 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
2189
2190 irdma_set_hw_rsrc(rf);
2191
2192 set_bit(0, rf->allocated_mrs);
2193 set_bit(0, rf->allocated_qps);
2194 set_bit(0, rf->allocated_cqs);
2195 set_bit(0, rf->allocated_pds);
2196 set_bit(0, rf->allocated_arps);
2197 set_bit(0, rf->allocated_ahs);
2198 set_bit(0, rf->allocated_mcgs);
2199 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
2200 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
2201 set_bit(IRDMA_REM_ENDPOINT_TRK_QPID, rf->allocated_qps); /* qp 3 Remote Endpt trk */
2202 set_bit(1, rf->allocated_cqs);
2203 set_bit(1, rf->allocated_pds);
2204 set_bit(2, rf->allocated_cqs);
2205 set_bit(2, rf->allocated_pds);
2206
2207 INIT_LIST_HEAD(&rf->mc_qht_list.list);
2208 /* stag index mask has a minimum of 14 bits */
2209 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
2210 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
2211
2212 return 0;
2213
2214 mem_rsrc_vmalloc_fail:
2215 kfree(rf->allocated_ws_nodes);
2216 rf->allocated_ws_nodes = NULL;
2217
2218 return ret;
2219 }
2220
2221 /**
2222 * irdma_cqp_ce_handler - handle cqp completions
2223 * @rf: RDMA PCI function
2224 * @cq: cq for cqp completions
2225 */
2226 void
irdma_cqp_ce_handler(struct irdma_pci_f * rf,struct irdma_sc_cq * cq)2227 irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
2228 {
2229 struct irdma_cqp_request *cqp_request;
2230 struct irdma_sc_dev *dev = &rf->sc_dev;
2231 u32 cqe_count = 0;
2232 struct irdma_ccq_cqe_info info;
2233 unsigned long flags;
2234 int ret;
2235
2236 do {
2237 memset(&info, 0, sizeof(info));
2238 spin_lock_irqsave(&rf->cqp.compl_lock, flags);
2239 ret = irdma_sc_ccq_get_cqe_info(cq, &info);
2240 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
2241 if (ret)
2242 break;
2243
2244 cqp_request = (struct irdma_cqp_request *)
2245 (uintptr_t)info.scratch;
2246 if (info.error && irdma_cqp_crit_err(dev,
2247 cqp_request->info.cqp_cmd,
2248 info.maj_err_code,
2249 info.min_err_code))
2250 irdma_dev_err(&rf->iwdev->ibdev,
2251 "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2252 info.op_code, info.maj_err_code,
2253 info.min_err_code);
2254 if (cqp_request) {
2255 cqp_request->compl_info.maj_err_code =
2256 info.maj_err_code;
2257 cqp_request->compl_info.min_err_code =
2258 info.min_err_code;
2259 cqp_request->compl_info.op_ret_val = info.op_ret_val;
2260 cqp_request->compl_info.error = info.error;
2261 irdma_complete_cqp_request(&rf->cqp, cqp_request);
2262 }
2263
2264 cqe_count++;
2265 } while (1);
2266
2267 if (cqe_count) {
2268 irdma_process_bh(dev);
2269 irdma_sc_ccq_arm(dev->ccq);
2270 }
2271 }
2272
2273 /**
2274 * cqp_compl_worker - Handle cqp completions
2275 * @work: Pointer to work structure
2276 */
2277 void
cqp_compl_worker(struct work_struct * work)2278 cqp_compl_worker(struct work_struct *work)
2279 {
2280 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
2281 cqp_cmpl_work);
2282 struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
2283
2284 irdma_cqp_ce_handler(rf, cq);
2285 }
2286
2287 /**
2288 * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
2289 * @cm_core: cm's core
2290 * @port: port to identify apbvt entry
2291 */
2292 static struct irdma_apbvt_entry *
irdma_lookup_apbvt_entry(struct irdma_cm_core * cm_core,u16 port)2293 irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
2294 u16 port)
2295 {
2296 struct irdma_apbvt_entry *entry;
2297
2298 HASH_FOR_EACH_POSSIBLE(cm_core->apbvt_hash_tbl, entry, hlist, port) {
2299 if (entry->port == port) {
2300 entry->use_cnt++;
2301 return entry;
2302 }
2303 }
2304
2305 return NULL;
2306 }
2307
2308 /**
2309 * irdma_next_iw_state - modify qp state
2310 * @iwqp: iwarp qp to modify
2311 * @state: next state for qp
2312 * @del_hash: del hash
2313 * @term: term message
2314 * @termlen: length of term message
2315 */
2316 void
irdma_next_iw_state(struct irdma_qp * iwqp,u8 state,u8 del_hash,u8 term,u8 termlen)2317 irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
2318 u8 termlen)
2319 {
2320 struct irdma_modify_qp_info info = {0};
2321
2322 info.next_iwarp_state = state;
2323 info.remove_hash_idx = del_hash;
2324 info.cq_num_valid = true;
2325 info.arp_cache_idx_valid = true;
2326 info.dont_send_term = true;
2327 info.dont_send_fin = true;
2328 info.termlen = termlen;
2329
2330 if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
2331 info.dont_send_term = false;
2332 if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
2333 info.dont_send_fin = false;
2334 if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
2335 info.reset_tcp_conn = true;
2336 iwqp->hw_iwarp_state = state;
2337 irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
2338 iwqp->iwarp_state = info.next_iwarp_state;
2339 }
2340
2341 /**
2342 * irdma_del_local_mac_entry - remove a mac entry from the hw
2343 * table
2344 * @rf: RDMA PCI function
2345 * @idx: the index of the mac ip address to delete
2346 */
2347 void
irdma_del_local_mac_entry(struct irdma_pci_f * rf,u16 idx)2348 irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
2349 {
2350 struct irdma_cqp *iwcqp = &rf->cqp;
2351 struct irdma_cqp_request *cqp_request;
2352 struct cqp_cmds_info *cqp_info;
2353
2354 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2355 if (!cqp_request)
2356 return;
2357
2358 cqp_info = &cqp_request->info;
2359 cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
2360 cqp_info->post_sq = 1;
2361 cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
2362 cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
2363 cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
2364 cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
2365
2366 irdma_handle_cqp_op(rf, cqp_request);
2367 irdma_put_cqp_request(iwcqp, cqp_request);
2368 }
2369
2370 /**
2371 * irdma_add_local_mac_entry - add a mac ip address entry to the
2372 * hw table
2373 * @rf: RDMA PCI function
2374 * @mac_addr: pointer to mac address
2375 * @idx: the index of the mac ip address to add
2376 */
2377 int
irdma_add_local_mac_entry(struct irdma_pci_f * rf,const u8 * mac_addr,u16 idx)2378 irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
2379 {
2380 struct irdma_local_mac_entry_info *info;
2381 struct irdma_cqp *iwcqp = &rf->cqp;
2382 struct irdma_cqp_request *cqp_request;
2383 struct cqp_cmds_info *cqp_info;
2384 int status;
2385
2386 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2387 if (!cqp_request)
2388 return -ENOMEM;
2389
2390 cqp_info = &cqp_request->info;
2391 cqp_info->post_sq = 1;
2392 info = &cqp_info->in.u.add_local_mac_entry.info;
2393 ether_addr_copy(info->mac_addr, mac_addr);
2394 info->entry_idx = idx;
2395 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2396 cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
2397 cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
2398 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2399 cqp_info->create = true;
2400
2401 status = irdma_handle_cqp_op(rf, cqp_request);
2402 irdma_put_cqp_request(iwcqp, cqp_request);
2403
2404 return status;
2405 }
2406
2407 /**
2408 * irdma_alloc_local_mac_entry - allocate a mac entry
2409 * @rf: RDMA PCI function
2410 * @mac_tbl_idx: the index of the new mac address
2411 *
2412 * Allocate a mac address entry and update the mac_tbl_idx
2413 * to hold the index of the newly created mac address
2414 * Return 0 if successful, otherwise return error
2415 */
2416 int
irdma_alloc_local_mac_entry(struct irdma_pci_f * rf,u16 * mac_tbl_idx)2417 irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
2418 {
2419 struct irdma_cqp *iwcqp = &rf->cqp;
2420 struct irdma_cqp_request *cqp_request;
2421 struct cqp_cmds_info *cqp_info;
2422 int status = 0;
2423
2424 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2425 if (!cqp_request)
2426 return -ENOMEM;
2427
2428 cqp_info = &cqp_request->info;
2429 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
2430 cqp_info->post_sq = 1;
2431 cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
2432 cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
2433 cqp_info->create = true;
2434
2435 status = irdma_handle_cqp_op(rf, cqp_request);
2436 if (!status)
2437 *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
2438
2439 irdma_put_cqp_request(iwcqp, cqp_request);
2440
2441 return status;
2442 }
2443
2444 /**
2445 * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
2446 * @iwdev: irdma device
2447 * @accel_local_port: port for apbvt
2448 * @add_port: add ordelete port
2449 */
2450 static int
irdma_cqp_manage_apbvt_cmd(struct irdma_device * iwdev,u16 accel_local_port,bool add_port)2451 irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
2452 u16 accel_local_port, bool add_port)
2453 {
2454 struct irdma_apbvt_info *info;
2455 struct irdma_cqp_request *cqp_request;
2456 struct cqp_cmds_info *cqp_info;
2457 int status;
2458
2459 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
2460 if (!cqp_request)
2461 return -ENOMEM;
2462
2463 cqp_info = &cqp_request->info;
2464 info = &cqp_info->in.u.manage_apbvt_entry.info;
2465 info->add = add_port;
2466 info->port = accel_local_port;
2467 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
2468 cqp_info->post_sq = 1;
2469 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2470 cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
2471 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV,
2472 "%s: port=0x%04x\n", (!add_port) ? "DELETE" : "ADD",
2473 accel_local_port);
2474
2475 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2476 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2477
2478 return status;
2479 }
2480
2481 /**
2482 * irdma_add_apbvt - add tcp port to HW apbvt table
2483 * @iwdev: irdma device
2484 * @port: port for apbvt
2485 */
2486 struct irdma_apbvt_entry *
irdma_add_apbvt(struct irdma_device * iwdev,u16 port)2487 irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
2488 {
2489 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2490 struct irdma_apbvt_entry *entry;
2491 unsigned long flags;
2492
2493 spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2494 entry = irdma_lookup_apbvt_entry(cm_core, port);
2495 if (entry) {
2496 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2497 return entry;
2498 }
2499
2500 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2501 if (!entry) {
2502 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2503 return NULL;
2504 }
2505
2506 entry->port = port;
2507 entry->use_cnt = 1;
2508 HASH_ADD(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
2509 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2510
2511 if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
2512 kfree(entry);
2513 return NULL;
2514 }
2515
2516 return entry;
2517 }
2518
2519 /**
2520 * irdma_del_apbvt - delete tcp port from HW apbvt table
2521 * @iwdev: irdma device
2522 * @entry: apbvt entry object
2523 */
2524 void
irdma_del_apbvt(struct irdma_device * iwdev,struct irdma_apbvt_entry * entry)2525 irdma_del_apbvt(struct irdma_device *iwdev,
2526 struct irdma_apbvt_entry *entry)
2527 {
2528 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2529 unsigned long flags;
2530
2531 spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2532 if (--entry->use_cnt) {
2533 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2534 return;
2535 }
2536
2537 HASH_DEL(cm_core->apbvt_hash_tbl, &entry->hlist);
2538 /*
2539 * apbvt_lock is held across CQP delete APBVT OP (non-waiting) to protect against race where add APBVT CQP can
2540 * race ahead of the delete APBVT for same port.
2541 */
2542 irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
2543 kfree(entry);
2544 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2545 }
2546
2547 void
irdma_arp_cqp_op(struct irdma_pci_f * rf,u16 arp_index,const unsigned char * mac_addr,u32 action)2548 irdma_arp_cqp_op(struct irdma_pci_f *rf, u16 arp_index,
2549 const unsigned char *mac_addr, u32 action)
2550 {
2551 struct irdma_add_arp_cache_entry_info *info;
2552 struct irdma_cqp_request *cqp_request;
2553 struct cqp_cmds_info *cqp_info;
2554
2555 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
2556 if (!cqp_request)
2557 return;
2558
2559 cqp_info = &cqp_request->info;
2560 if (action == IRDMA_ARP_ADD_UPDATE) {
2561 cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
2562 info = &cqp_info->in.u.add_arp_cache_entry.info;
2563 info->arp_index = (u16)arp_index;
2564 info->permanent = true;
2565 ether_addr_copy(info->mac_addr, mac_addr);
2566 cqp_info->in.u.add_arp_cache_entry.scratch =
2567 (uintptr_t)cqp_request;
2568 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2569 } else {
2570 cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
2571 cqp_info->in.u.del_arp_cache_entry.scratch =
2572 (uintptr_t)cqp_request;
2573 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2574 cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
2575 }
2576
2577 cqp_info->post_sq = 1;
2578 irdma_handle_cqp_op(rf, cqp_request);
2579 irdma_put_cqp_request(&rf->cqp, cqp_request);
2580 }
2581
2582 /**
2583 * irdma_manage_arp_cache - manage hw arp cache
2584 * @rf: RDMA PCI function
2585 * @mac_addr: mac address ptr
2586 * @ip_addr: ip addr for arp cache
2587 * @action: add, delete or modify
2588 */
2589 void
irdma_manage_arp_cache(struct irdma_pci_f * rf,const unsigned char * mac_addr,u32 * ip_addr,u32 action)2590 irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr,
2591 u32 *ip_addr, u32 action)
2592 {
2593 int arp_index;
2594
2595 arp_index = irdma_arp_table(rf, ip_addr, mac_addr, action);
2596 if (arp_index == -1)
2597 return;
2598
2599 irdma_arp_cqp_op(rf, (u16)arp_index, mac_addr, action);
2600 }
2601
2602 /**
2603 * irdma_send_syn_cqp_callback - do syn/ack after qhash
2604 * @cqp_request: qhash cqp completion
2605 */
2606 static void
irdma_send_syn_cqp_callback(struct irdma_cqp_request * cqp_request)2607 irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
2608 {
2609 struct irdma_cm_node *cm_node = cqp_request->param;
2610
2611 irdma_send_syn(cm_node, 1);
2612 irdma_rem_ref_cmnode(cm_node);
2613 }
2614
2615 /**
2616 * irdma_qhash_info_prepare - fill info for qhash op
2617 * @iwdev: irdma device
2618 * @cqp_info: cqp info
2619 * @cminfo: cm info for qhash
2620 * @etype: type (syn or quad)
2621 * @mtype: type of qhash
2622 */
2623 static void
irdma_qhash_info_prepare(struct irdma_device * iwdev,struct cqp_cmds_info * cqp_info,struct irdma_cm_info * cminfo,enum irdma_quad_entry_type etype,enum irdma_quad_hash_manage_type mtype)2624 irdma_qhash_info_prepare(struct irdma_device *iwdev,
2625 struct cqp_cmds_info *cqp_info,
2626 struct irdma_cm_info *cminfo,
2627 enum irdma_quad_entry_type etype,
2628 enum irdma_quad_hash_manage_type mtype)
2629 {
2630 struct irdma_qhash_table_info *info;
2631
2632 info = &cqp_info->in.u.manage_qhash_table_entry.info;
2633 info->vsi = &iwdev->vsi;
2634 info->manage = mtype;
2635 info->entry_type = etype;
2636 if (cminfo->vlan_id < VLAN_N_VID) {
2637 info->vlan_valid = true;
2638 info->vlan_id = cminfo->vlan_id;
2639 } else {
2640 info->vlan_valid = false;
2641 }
2642 info->ipv4_valid = cminfo->ipv4;
2643 info->user_pri = cminfo->user_pri;
2644 ether_addr_copy(info->mac_addr, if_getlladdr(iwdev->netdev));
2645 info->qp_num = cminfo->qh_qpid;
2646 info->dest_port = cminfo->loc_port;
2647 info->dest_ip[0] = cminfo->loc_addr[0];
2648 info->dest_ip[1] = cminfo->loc_addr[1];
2649 info->dest_ip[2] = cminfo->loc_addr[2];
2650 info->dest_ip[3] = cminfo->loc_addr[3];
2651 if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
2652 etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
2653 etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
2654 etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
2655 etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
2656 info->src_port = cminfo->rem_port;
2657 info->src_ip[0] = cminfo->rem_addr[0];
2658 info->src_ip[1] = cminfo->rem_addr[1];
2659 info->src_ip[2] = cminfo->rem_addr[2];
2660 info->src_ip[3] = cminfo->rem_addr[3];
2661 }
2662 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
2663 cqp_info->post_sq = 1;
2664 }
2665
2666 /**
2667 * irdma_add_qhash_wait_no_lock - add qhash, blocking w/o lock
2668 * @iwdev: irdma device
2669 * @cminfo: cm info for qhash
2670 */
2671 int
irdma_add_qhash_wait_no_lock(struct irdma_device * iwdev,struct irdma_cm_info * cminfo)2672 irdma_add_qhash_wait_no_lock(struct irdma_device *iwdev,
2673 struct irdma_cm_info *cminfo)
2674 {
2675 struct irdma_qhash_table_info *info;
2676 struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2677 struct irdma_cqp_request *cqp_request;
2678 struct cqp_cmds_info *cqp_info;
2679 int cnt = iwdev->rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms * CQP_TIMEOUT_THRESHOLD;
2680 int status;
2681 int ret_val;
2682
2683 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, false);
2684 if (!cqp_request)
2685 return -ENOMEM;
2686
2687 cqp_info = &cqp_request->info;
2688 info = &cqp_info->in.u.manage_qhash_table_entry.info;
2689 irdma_qhash_info_prepare(iwdev, cqp_info, cminfo, IRDMA_QHASH_TYPE_TCP_SYN,
2690 IRDMA_QHASH_MANAGE_TYPE_ADD);
2691 if (info->ipv4_valid)
2692 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
2693 "ADD caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%x rem_addr=%x mac=%x:%x:%x:%x:%x:%x, vlan_id=%d\n",
2694 __builtin_return_address(0), info->src_port,
2695 info->dest_port, info->src_ip[0], info->dest_ip[0],
2696 info->mac_addr[0], info->mac_addr[1],
2697 info->mac_addr[2], info->mac_addr[3],
2698 info->mac_addr[4], info->mac_addr[5],
2699 cminfo->vlan_id);
2700 else
2701 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
2702 "ADD caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%x:%x:%x:%x rem_addr=%x:%x:%x:%x mac=%x:%x:%x:%x:%x:%x, vlan_id=%d\n",
2703 __builtin_return_address(0), info->src_port,
2704 info->dest_port, IRDMA_PRINT_IP6(info->src_ip),
2705 IRDMA_PRINT_IP6(info->dest_ip), info->mac_addr[0],
2706 info->mac_addr[1], info->mac_addr[2],
2707 info->mac_addr[3], info->mac_addr[4],
2708 info->mac_addr[5], cminfo->vlan_id);
2709
2710 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2711 cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
2712 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2713 if (status) {
2714 irdma_put_cqp_request(iwcqp, cqp_request);
2715 irdma_dev_warn(&iwdev->ibdev, "manage_qhash cqp op failure %d\n", status);
2716 return status;
2717 }
2718
2719 do {
2720 irdma_cqp_ce_handler(iwdev->rf, &iwdev->rf->ccq.sc_cq);
2721 mdelay(1);
2722 } while (!READ_ONCE(cqp_request->request_done) && --cnt);
2723
2724 ret_val = cqp_request->compl_info.op_ret_val;
2725 status = (cnt) ? ret_val : -ETIMEDOUT;
2726
2727 irdma_put_cqp_request(iwcqp, cqp_request);
2728
2729 return status;
2730 }
2731
2732 /**
2733 * irdma_manage_qhash - add or modify qhash
2734 * @iwdev: irdma device
2735 * @cminfo: cm info for qhash
2736 * @etype: type (syn or quad)
2737 * @mtype: type of qhash
2738 * @cmnode: cmnode associated with connection
2739 * @wait: wait for completion
2740 */
2741 int
irdma_manage_qhash(struct irdma_device * iwdev,struct irdma_cm_info * cminfo,enum irdma_quad_entry_type etype,enum irdma_quad_hash_manage_type mtype,void * cmnode,bool wait)2742 irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
2743 enum irdma_quad_entry_type etype,
2744 enum irdma_quad_hash_manage_type mtype, void *cmnode,
2745 bool wait)
2746 {
2747 struct irdma_qhash_table_info *info;
2748 struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2749 struct irdma_cqp_request *cqp_request;
2750 struct cqp_cmds_info *cqp_info;
2751 struct irdma_cm_node *cm_node = cmnode;
2752 int status;
2753
2754 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
2755 if (!cqp_request)
2756 return -ENOMEM;
2757
2758 cqp_info = &cqp_request->info;
2759 info = &cqp_info->in.u.manage_qhash_table_entry.info;
2760 irdma_qhash_info_prepare(iwdev, cqp_info, cminfo, etype, mtype);
2761 if (cmnode) {
2762 cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
2763 cqp_request->param = cmnode;
2764 if (!wait)
2765 irdma_add_ref_cmnode(cm_node);
2766 }
2767 if (info->ipv4_valid)
2768 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
2769 "%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%x rem_addr=%x mac=%x:%x:%x:%x:%x:%x, vlan_id=%d cm_node=%p\n",
2770 (!mtype) ? "DELETE" : "ADD",
2771 __builtin_return_address(0), info->src_port,
2772 info->dest_port, info->src_ip[0], info->dest_ip[0],
2773 info->mac_addr[0], info->mac_addr[1],
2774 info->mac_addr[2], info->mac_addr[3],
2775 info->mac_addr[4], info->mac_addr[5],
2776 cminfo->vlan_id, cmnode ? cmnode : NULL);
2777 else
2778 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
2779 "%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%x:%x:%x:%x rem_addr=%x:%x:%x:%x mac=%x:%x:%x:%x:%x:%x, vlan_id=%d cm_node=%p\n",
2780 (!mtype) ? "DELETE" : "ADD",
2781 __builtin_return_address(0), info->src_port,
2782 info->dest_port, IRDMA_PRINT_IP6(info->src_ip),
2783 IRDMA_PRINT_IP6(info->dest_ip), info->mac_addr[0],
2784 info->mac_addr[1], info->mac_addr[2],
2785 info->mac_addr[3], info->mac_addr[4],
2786 info->mac_addr[5], cminfo->vlan_id,
2787 cmnode ? cmnode : NULL);
2788
2789 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2790 cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
2791 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2792 if (status && cm_node && !wait)
2793 irdma_rem_ref_cmnode(cm_node);
2794
2795 irdma_put_cqp_request(iwcqp, cqp_request);
2796
2797 return status;
2798 }
2799
2800 /**
2801 * irdma_hw_flush_wqes - flush qp's wqe
2802 * @rf: RDMA PCI function
2803 * @qp: hardware control qp
2804 * @info: info for flush
2805 * @wait: flag wait for completion
2806 */
2807 int
irdma_hw_flush_wqes(struct irdma_pci_f * rf,struct irdma_sc_qp * qp,struct irdma_qp_flush_info * info,bool wait)2808 irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2809 struct irdma_qp_flush_info *info, bool wait)
2810 {
2811 int status;
2812 struct irdma_qp_flush_info *hw_info;
2813 struct irdma_cqp_request *cqp_request;
2814 struct cqp_cmds_info *cqp_info;
2815 struct irdma_qp *iwqp = qp->qp_uk.back_qp;
2816
2817 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2818 if (!cqp_request)
2819 return -ENOMEM;
2820
2821 cqp_info = &cqp_request->info;
2822 hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
2823 memcpy(hw_info, info, sizeof(*hw_info));
2824 cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2825 cqp_info->post_sq = 1;
2826 cqp_info->in.u.qp_flush_wqes.qp = qp;
2827 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
2828 status = irdma_handle_cqp_op(rf, cqp_request);
2829 if (status) {
2830 qp->qp_uk.sq_flush_complete = true;
2831 qp->qp_uk.rq_flush_complete = true;
2832 irdma_put_cqp_request(&rf->cqp, cqp_request);
2833 return status;
2834 }
2835
2836 if (!wait || cqp_request->compl_info.maj_err_code)
2837 goto put_cqp;
2838
2839 if (info->rq) {
2840 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2841 cqp_request->compl_info.min_err_code == 0) {
2842 /* RQ WQE flush was requested but did not happen */
2843 qp->qp_uk.rq_flush_complete = true;
2844 }
2845 }
2846 if (info->sq) {
2847 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2848 cqp_request->compl_info.min_err_code == 0) {
2849 /* SQ WQE flush was requested but did not happen */
2850 qp->qp_uk.sq_flush_complete = true;
2851 }
2852 }
2853
2854 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_VERBS,
2855 "qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
2856 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
2857 iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
2858 cqp_request->compl_info.maj_err_code,
2859 cqp_request->compl_info.min_err_code);
2860 put_cqp:
2861 irdma_put_cqp_request(&rf->cqp, cqp_request);
2862
2863 return status;
2864 }
2865
2866 /**
2867 * irdma_gen_ae - generate AE
2868 * @rf: RDMA PCI function
2869 * @qp: qp associated with AE
2870 * @info: info for ae
2871 * @wait: wait for completion
2872 */
2873 void
irdma_gen_ae(struct irdma_pci_f * rf,struct irdma_sc_qp * qp,struct irdma_gen_ae_info * info,bool wait)2874 irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2875 struct irdma_gen_ae_info *info, bool wait)
2876 {
2877 struct irdma_gen_ae_info *ae_info;
2878 struct irdma_cqp_request *cqp_request;
2879 struct cqp_cmds_info *cqp_info;
2880
2881 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2882 if (!cqp_request)
2883 return;
2884
2885 cqp_info = &cqp_request->info;
2886 ae_info = &cqp_request->info.in.u.gen_ae.info;
2887 memcpy(ae_info, info, sizeof(*ae_info));
2888 cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
2889 cqp_info->post_sq = 1;
2890 cqp_info->in.u.gen_ae.qp = qp;
2891 cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
2892
2893 irdma_handle_cqp_op(rf, cqp_request);
2894 irdma_put_cqp_request(&rf->cqp, cqp_request);
2895 }
2896
2897 void
irdma_flush_wqes(struct irdma_qp * iwqp,u32 flush_mask)2898 irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
2899 {
2900 struct irdma_qp_flush_info info = {0};
2901 struct irdma_pci_f *rf = iwqp->iwdev->rf;
2902 u8 flush_code = iwqp->sc_qp.flush_code;
2903
2904 if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
2905 return;
2906
2907 if (atomic_cmpxchg(&iwqp->flush_issued, 0, 1))
2908 return;
2909
2910 /* Set flush info fields */
2911 info.sq = flush_mask & IRDMA_FLUSH_SQ;
2912 info.rq = flush_mask & IRDMA_FLUSH_RQ;
2913
2914 /* Generate userflush errors in CQE */
2915 info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2916 info.sq_minor_code = FLUSH_GENERAL_ERR;
2917 info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2918 info.rq_minor_code = FLUSH_GENERAL_ERR;
2919 info.userflushcode = true;
2920
2921 if (flush_mask & IRDMA_REFLUSH) {
2922 if (info.sq)
2923 iwqp->sc_qp.flush_sq = false;
2924 if (info.rq)
2925 iwqp->sc_qp.flush_rq = false;
2926 } else {
2927 if (flush_code) {
2928 if (info.sq && iwqp->sc_qp.sq_flush_code)
2929 info.sq_minor_code = flush_code;
2930 if (info.rq && iwqp->sc_qp.rq_flush_code)
2931 info.rq_minor_code = flush_code;
2932 }
2933 if (irdma_upload_context &&
2934 irdma_upload_qp_context(rf, iwqp->sc_qp.qp_uk.qp_id,
2935 iwqp->sc_qp.qp_uk.qp_type, 0, 1))
2936 irdma_dev_warn(&iwqp->iwdev->ibdev, "failed to upload QP context\n");
2937 if (!iwqp->user_mode)
2938 irdma_sched_qp_flush_work(iwqp);
2939 }
2940
2941 /* Issue flush */
2942 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
2943 flush_mask & IRDMA_FLUSH_WAIT);
2944 }
2945