1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2015 - 2026 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "osdep.h" 36 #include "irdma_hmc.h" 37 #include "irdma_defs.h" 38 #include "irdma_type.h" 39 #include "irdma_protos.h" 40 #include "irdma_puda.h" 41 #include "irdma_ws.h" 42 43 static void 44 irdma_ieq_receive(struct irdma_sc_vsi *vsi, 45 struct irdma_puda_buf *buf); 46 static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid); 47 static void 48 irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, 49 struct irdma_puda_buf *buf, u32 wqe_idx); 50 51 /** 52 * irdma_puda_get_listbuf - get buffer from puda list 53 * @list: list to use for buffers (ILQ or IEQ) 54 */ 55 static struct irdma_puda_buf * 56 irdma_puda_get_listbuf(struct list_head *list) 57 { 58 struct irdma_puda_buf *buf = NULL; 59 60 if (!list_empty(list)) { 61 buf = (struct irdma_puda_buf *)(list)->next; 62 list_del((struct list_head *)&buf->list); 63 } 64 65 return buf; 66 } 67 68 /** 69 * irdma_puda_get_bufpool - return buffer from resource 70 * @rsrc: resource to use for buffer 71 */ 72 struct irdma_puda_buf * 73 irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc) 74 { 75 struct irdma_puda_buf *buf = NULL; 76 struct list_head *list = &rsrc->bufpool; 77 unsigned long flags; 78 79 spin_lock_irqsave(&rsrc->bufpool_lock, flags); 80 buf = irdma_puda_get_listbuf(list); 81 if (buf) { 82 rsrc->avail_buf_count--; 83 buf->vsi = rsrc->vsi; 84 } else { 85 rsrc->stats_buf_alloc_fail++; 86 } 87 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 88 89 return buf; 90 } 91 92 /** 93 * irdma_puda_ret_bufpool - return buffer to rsrc list 94 * @rsrc: resource to use for buffer 95 * @buf: buffer to return to resource 96 */ 97 void 98 irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc, 99 struct irdma_puda_buf *buf) 100 { 101 unsigned long flags; 102 103 buf->do_lpb = false; 104 spin_lock_irqsave(&rsrc->bufpool_lock, flags); 105 list_add(&buf->list, &rsrc->bufpool); 106 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 107 rsrc->avail_buf_count++; 108 } 109 110 /** 111 * irdma_puda_post_recvbuf - set wqe for rcv buffer 112 * @rsrc: resource ptr 113 * @wqe_idx: wqe index to use 114 * @buf: puda buffer for rcv q 115 * @initial: flag if during init time 116 */ 117 static void 118 irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx, 119 struct irdma_puda_buf *buf, bool initial) 120 { 121 __le64 *wqe; 122 struct irdma_sc_qp *qp = &rsrc->qp; 123 u64 offset24 = 0; 124 125 /* Synch buffer for use by device */ 126 dma_sync_single_for_device(hw_to_dev(rsrc->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); 127 qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf; 128 wqe = qp->qp_uk.rq_base[wqe_idx].elem; 129 if (!initial) 130 get_64bit_val(wqe, IRDMA_BYTE_24, &offset24); 131 132 offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1); 133 134 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 135 set_64bit_val(wqe, 0, buf->mem.pa); 136 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 137 set_64bit_val(wqe, IRDMA_BYTE_8, 138 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size)); 139 } else { 140 set_64bit_val(wqe, IRDMA_BYTE_8, 141 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) | 142 offset24); 143 } 144 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 145 146 set_64bit_val(wqe, IRDMA_BYTE_24, offset24); 147 } 148 149 /** 150 * irdma_puda_replenish_rq - post rcv buffers 151 * @rsrc: resource to use for buffer 152 * @initial: flag if during init time 153 */ 154 static int 155 irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial) 156 { 157 u32 i; 158 u32 invalid_cnt = rsrc->rxq_invalid_cnt; 159 struct irdma_puda_buf *buf = NULL; 160 161 for (i = 0; i < invalid_cnt; i++) { 162 buf = irdma_puda_get_bufpool(rsrc); 163 if (!buf) 164 return -ENOBUFS; 165 irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial); 166 rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size); 167 rsrc->rxq_invalid_cnt--; 168 } 169 170 return 0; 171 } 172 173 /** 174 * irdma_puda_alloc_buf - allocate mem for buffer 175 * @dev: iwarp device 176 * @len: length of buffer 177 */ 178 static struct irdma_puda_buf * 179 irdma_puda_alloc_buf(struct irdma_sc_dev *dev, 180 u32 len) 181 { 182 struct irdma_puda_buf *buf; 183 struct irdma_virt_mem buf_mem; 184 185 buf_mem.size = sizeof(*buf); 186 buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL); 187 if (!buf_mem.va) 188 return NULL; 189 190 buf = buf_mem.va; 191 buf->mem.size = len; 192 buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL); 193 if (!buf->mem.va) 194 goto free_virt; 195 buf->mem.pa = dma_map_single(hw_to_dev(dev->hw), buf->mem.va, buf->mem.size, DMA_BIDIRECTIONAL); 196 if (dma_mapping_error(hw_to_dev(dev->hw), buf->mem.pa)) { 197 kfree(buf->mem.va); 198 goto free_virt; 199 } 200 201 buf->buf_mem.va = buf_mem.va; 202 buf->buf_mem.size = buf_mem.size; 203 204 return buf; 205 206 free_virt: 207 kfree(buf_mem.va); 208 return NULL; 209 } 210 211 /** 212 * irdma_puda_dele_buf - delete buffer back to system 213 * @dev: iwarp device 214 * @buf: buffer to free 215 */ 216 static void 217 irdma_puda_dele_buf(struct irdma_sc_dev *dev, 218 struct irdma_puda_buf *buf) 219 { 220 if (!buf->virtdma) { 221 irdma_free_dma_mem(dev->hw, &buf->mem); 222 kfree(buf->buf_mem.va); 223 } 224 } 225 226 /** 227 * irdma_puda_get_next_send_wqe - return next wqe for processing 228 * @qp: puda qp for wqe 229 * @wqe_idx: wqe index for caller 230 */ 231 static __le64 * irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp, 232 u32 *wqe_idx){ 233 int ret_code = 0; 234 235 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); 236 if (!*wqe_idx) 237 qp->swqe_polarity = !qp->swqe_polarity; 238 IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code); 239 if (ret_code) 240 return NULL; 241 242 return qp->sq_base[*wqe_idx].elem; 243 } 244 245 /** 246 * irdma_puda_poll_info - poll cq for completion 247 * @cq: cq for poll 248 * @info: info return for successful completion 249 */ 250 static int 251 irdma_puda_poll_info(struct irdma_sc_cq *cq, 252 struct irdma_puda_cmpl_info *info) 253 { 254 struct irdma_cq_uk *cq_uk = &cq->cq_uk; 255 u64 qword0, qword2, qword3, qword6; 256 __le64 *cqe; 257 __le64 *ext_cqe = NULL; 258 u64 qword7 = 0; 259 u64 comp_ctx; 260 bool valid_bit; 261 bool ext_valid = 0; 262 u32 major_err, minor_err; 263 u32 peek_head; 264 bool error; 265 u8 polarity; 266 267 cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk); 268 get_64bit_val(cqe, IRDMA_BYTE_24, &qword3); 269 valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3); 270 if (valid_bit != cq_uk->polarity) 271 return -ENOENT; 272 273 /* Ensure CQE contents are read after valid bit is checked */ 274 rmb(); 275 276 if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 277 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3); 278 279 if (ext_valid) { 280 peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size; 281 ext_cqe = cq_uk->cq_base[peek_head].buf; 282 get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7); 283 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); 284 if (!peek_head) 285 polarity ^= 1; 286 if (polarity != cq_uk->polarity) 287 return -ENOENT; 288 289 /* Ensure ext CQE contents are read after ext valid bit is checked */ 290 rmb(); 291 292 IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); 293 if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)) 294 cq_uk->polarity = !cq_uk->polarity; 295 /* update cq tail in cq shadow memory also */ 296 IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring); 297 } 298 299 irdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, "PUDA CQE", cqe, 32); 300 if (ext_valid) 301 irdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, "PUDA EXT-CQE", 302 ext_cqe, 32); 303 304 error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3); 305 if (error) { 306 irdma_debug(cq->dev, IRDMA_DEBUG_PUDA, "receive error\n"); 307 major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3)); 308 minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3)); 309 info->compl_error = major_err << 16 | minor_err; 310 return -EIO; 311 } 312 313 get_64bit_val(cqe, IRDMA_BYTE_0, &qword0); 314 get_64bit_val(cqe, IRDMA_BYTE_16, &qword2); 315 316 info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); 317 info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2); 318 if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 319 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3); 320 321 get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx); 322 info->qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx; 323 info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3); 324 325 if (info->q_type == IRDMA_CQE_QTYPE_RQ) { 326 if (ext_valid) { 327 info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7); 328 if (info->vlan_valid) { 329 get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6); 330 info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6); 331 } 332 info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7); 333 if (info->smac_valid) { 334 get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6); 335 info->smac[0] = (u8)((qword6 >> 40) & 0xFF); 336 info->smac[1] = (u8)((qword6 >> 32) & 0xFF); 337 info->smac[2] = (u8)((qword6 >> 24) & 0xFF); 338 info->smac[3] = (u8)((qword6 >> 16) & 0xFF); 339 info->smac[4] = (u8)((qword6 >> 8) & 0xFF); 340 info->smac[5] = (u8)(qword6 & 0xFF); 341 } 342 } 343 344 if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 345 info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3); 346 info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2); 347 info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2); 348 } 349 350 info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0); 351 } 352 353 return 0; 354 } 355 356 /** 357 * irdma_puda_poll_cmpl - processes completion for cq 358 * @dev: iwarp device 359 * @cq: cq getting interrupt 360 * @compl_err: return any completion err 361 */ 362 int 363 irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq, 364 u32 *compl_err) 365 { 366 struct irdma_qp_uk *qp; 367 struct irdma_cq_uk *cq_uk = &cq->cq_uk; 368 struct irdma_puda_cmpl_info info = {0}; 369 int ret = 0; 370 struct irdma_puda_buf *buf; 371 struct irdma_puda_rsrc *rsrc; 372 u8 cq_type = cq->cq_type; 373 unsigned long flags; 374 375 if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) { 376 rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq : 377 cq->vsi->ieq; 378 } else { 379 irdma_debug(dev, IRDMA_DEBUG_PUDA, "qp_type error\n"); 380 return -EFAULT; 381 } 382 383 ret = irdma_puda_poll_info(cq, &info); 384 *compl_err = info.compl_error; 385 if (ret == -ENOENT) 386 return ret; 387 if (ret) 388 goto done; 389 390 qp = info.qp; 391 if (!qp || !rsrc) { 392 ret = -EFAULT; 393 goto done; 394 } 395 396 if (qp->qp_id != rsrc->qp_id) { 397 ret = -EFAULT; 398 goto done; 399 } 400 401 if (info.q_type == IRDMA_CQE_QTYPE_RQ) { 402 buf = (struct irdma_puda_buf *)(uintptr_t) 403 qp->rq_wrid_array[info.wqe_idx]; 404 405 /* reusing so synch the buffer for CPU use */ 406 dma_sync_single_for_cpu(hw_to_dev(dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); 407 /* Get all the tcpip information in the buf header */ 408 ret = irdma_puda_get_tcpip_info(&info, buf); 409 if (ret) { 410 rsrc->stats_rcvd_pkt_err++; 411 if (cq_type == IRDMA_CQ_TYPE_ILQ) { 412 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, 413 info.wqe_idx); 414 } else { 415 irdma_puda_ret_bufpool(rsrc, buf); 416 irdma_puda_replenish_rq(rsrc, false); 417 } 418 goto done; 419 } 420 421 rsrc->stats_pkt_rcvd++; 422 rsrc->compl_rxwqe_idx = info.wqe_idx; 423 irdma_debug(dev, IRDMA_DEBUG_PUDA, "RQ completion\n"); 424 rsrc->receive(rsrc->vsi, buf); 425 if (cq_type == IRDMA_CQ_TYPE_ILQ) 426 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx); 427 else 428 irdma_puda_replenish_rq(rsrc, false); 429 430 } else { 431 irdma_debug(dev, IRDMA_DEBUG_PUDA, "SQ completion\n"); 432 buf = (struct irdma_puda_buf *)(uintptr_t) 433 qp->sq_wrtrk_array[info.wqe_idx].wrid; 434 435 /* reusing so synch the buffer for CPU use */ 436 dma_sync_single_for_cpu(hw_to_dev(dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); 437 IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); 438 buf->queued = false; 439 rsrc->xmit_complete(rsrc->vsi, buf); 440 spin_lock_irqsave(&rsrc->bufpool_lock, flags); 441 rsrc->tx_wqe_avail_cnt++; 442 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 443 if (!list_empty(&rsrc->txpend)) 444 irdma_puda_send_buf(rsrc, NULL); 445 } 446 447 done: 448 IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); 449 if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)) 450 cq_uk->polarity = !cq_uk->polarity; 451 /* update cq tail in cq shadow memory also */ 452 IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring); 453 set_64bit_val(cq_uk->shadow_area, IRDMA_BYTE_0, 454 IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)); 455 456 return ret; 457 } 458 459 /** 460 * irdma_puda_send - complete send wqe for transmit 461 * @qp: puda qp for send 462 * @info: buffer information for transmit 463 */ 464 int 465 irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info) 466 { 467 __le64 *wqe; 468 u32 iplen, l4len; 469 u64 hdr[2]; 470 u32 wqe_idx; 471 u8 iipt; 472 473 /* number of 32 bits DWORDS in header */ 474 l4len = info->tcplen >> 2; 475 if (info->ipv4) { 476 iipt = 3; 477 iplen = 5; 478 } else { 479 iipt = 1; 480 iplen = 10; 481 } 482 483 wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); 484 if (!wqe) 485 return -ENOSPC; 486 487 qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; 488 /* Third line of WQE descriptor */ 489 /* maclen is in words */ 490 491 if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 492 hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */ 493 hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) | 494 FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) | 495 FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) | 496 FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) | 497 FIELD_PREP(IRDMA_UDA_QPSQ_VALID, 498 qp->qp_uk.swqe_polarity); 499 500 /* Forth line of WQE descriptor */ 501 502 set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr); 503 set_64bit_val(wqe, IRDMA_BYTE_8, 504 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) | 505 FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity)); 506 } else { 507 hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) | 508 FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) | 509 FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) | 510 FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) | 511 FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len); 512 513 hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) | 514 FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) | 515 FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) | 516 FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity); 517 518 /* Forth line of WQE descriptor */ 519 520 set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr); 521 set_64bit_val(wqe, IRDMA_BYTE_8, 522 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len)); 523 } 524 525 set_64bit_val(wqe, IRDMA_BYTE_16, hdr[0]); 526 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 527 528 set_64bit_val(wqe, IRDMA_BYTE_24, hdr[1]); 529 530 irdma_debug_buf(qp->dev, IRDMA_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); 531 irdma_uk_qp_post_wr(&qp->qp_uk); 532 return 0; 533 } 534 535 /** 536 * irdma_puda_send_buf - transmit puda buffer 537 * @rsrc: resource to use for buffer 538 * @buf: puda buffer to transmit 539 */ 540 int 541 irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc, 542 struct irdma_puda_buf *buf) 543 { 544 struct irdma_puda_send_info info; 545 int ret = 0; 546 unsigned long flags; 547 548 spin_lock_irqsave(&rsrc->bufpool_lock, flags); 549 if (buf) { 550 if (buf->queued) { 551 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 552 "PUDA: Attempting to re-send queued buf %p\n", 553 buf); 554 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 555 return -EINVAL; 556 } 557 558 buf->queued = true; 559 } 560 /* 561 * if no wqe available or not from a completion and we have pending buffers, we must queue new buffer 562 */ 563 if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) { 564 list_add_tail(&buf->list, &rsrc->txpend); 565 rsrc->stats_sent_pkt_q++; 566 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 567 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) 568 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 569 "adding to txpend\n"); 570 return 0; 571 } 572 rsrc->tx_wqe_avail_cnt--; 573 /* 574 * if we are coming from a completion and have pending buffers then Get one from pending list 575 */ 576 if (!buf) { 577 buf = irdma_puda_get_listbuf(&rsrc->txpend); 578 if (!buf) 579 goto done; 580 } 581 582 info.scratch = buf; 583 info.paddr = buf->mem.pa; 584 info.len = buf->totallen; 585 info.tcplen = buf->tcphlen; 586 info.ipv4 = buf->ipv4; 587 588 if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 589 info.ah_id = buf->ah_id; 590 } else { 591 info.maclen = buf->maclen; 592 info.do_lpb = buf->do_lpb; 593 } 594 595 /* Synch buffer for use by device */ 596 dma_sync_single_for_cpu(hw_to_dev(rsrc->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); 597 ret = irdma_puda_send(&rsrc->qp, &info); 598 if (ret) { 599 rsrc->tx_wqe_avail_cnt++; 600 rsrc->stats_sent_pkt_q++; 601 list_add(&buf->list, &rsrc->txpend); 602 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) 603 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 604 "adding to puda_send\n"); 605 } else { 606 rsrc->stats_pkt_sent++; 607 } 608 done: 609 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 610 return 0; 611 } 612 613 /** 614 * irdma_puda_qp_setctx - during init, set qp's context 615 * @rsrc: qp's resource 616 */ 617 static void 618 irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc) 619 { 620 struct irdma_sc_qp *qp = &rsrc->qp; 621 __le64 *qp_ctx = qp->hw_host_ctx; 622 623 set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa); 624 set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa); 625 set_64bit_val(qp_ctx, IRDMA_BYTE_24, 626 FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) | 627 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size)); 628 set_64bit_val(qp_ctx, IRDMA_BYTE_48, 629 FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size)); 630 set_64bit_val(qp_ctx, IRDMA_BYTE_56, 0); 631 if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 632 set_64bit_val(qp_ctx, IRDMA_BYTE_64, 1); 633 set_64bit_val(qp_ctx, IRDMA_BYTE_136, 634 FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) | 635 FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id)); 636 set_64bit_val(qp_ctx, IRDMA_BYTE_144, 637 FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx)); 638 set_64bit_val(qp_ctx, IRDMA_BYTE_160, 639 FIELD_PREP(IRDMAQPC_PRIVEN, 1) | 640 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid)); 641 set_64bit_val(qp_ctx, IRDMA_BYTE_168, 642 FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp)); 643 set_64bit_val(qp_ctx, IRDMA_BYTE_176, 644 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) | 645 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) | 646 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle)); 647 648 irdma_debug_buf(rsrc->dev, IRDMA_DEBUG_PUDA, "PUDA QP CONTEXT", qp_ctx, 649 IRDMA_QP_CTX_SIZE); 650 } 651 652 /** 653 * irdma_puda_qp_wqe - setup wqe for qp create 654 * @dev: Device 655 * @qp: Resource qp 656 */ 657 static int 658 irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) 659 { 660 struct irdma_sc_cqp *cqp; 661 __le64 *wqe; 662 u64 hdr; 663 struct irdma_ccq_cqe_info compl_info; 664 int status = 0; 665 666 cqp = dev->cqp; 667 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); 668 if (!wqe) 669 return -ENOSPC; 670 671 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 672 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 673 674 hdr = qp->qp_uk.qp_id | 675 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) | 676 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) | 677 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) | 678 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) | 679 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 680 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 681 682 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 683 684 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_PUDA, "PUDA QP CREATE", wqe, 40); 685 irdma_sc_cqp_post_sq(cqp); 686 status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP, 687 &compl_info); 688 689 return status; 690 } 691 692 /** 693 * irdma_puda_qp_create - create qp for resource 694 * @rsrc: resource to use for buffer 695 */ 696 static int 697 irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc) 698 { 699 struct irdma_sc_qp *qp = &rsrc->qp; 700 struct irdma_qp_uk *ukqp = &qp->qp_uk; 701 int ret = 0; 702 u32 sq_size, rq_size; 703 struct irdma_dma_mem *mem; 704 705 sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE; 706 rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE; 707 rsrc->qpmem.size = (sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + 708 IRDMA_QP_CTX_SIZE); 709 rsrc->qpmem.va = irdma_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, 710 rsrc->qpmem.size, IRDMA_HW_PAGE_SIZE); 711 if (!rsrc->qpmem.va) 712 return -ENOMEM; 713 714 mem = &rsrc->qpmem; 715 memset(mem->va, 0, rsrc->qpmem.size); 716 qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ); 717 qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ); 718 qp->pd = &rsrc->sc_pd; 719 qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA; 720 qp->dev = rsrc->dev; 721 qp->qp_uk.back_qp = rsrc; 722 qp->sq_pa = mem->pa; 723 qp->rq_pa = qp->sq_pa + sq_size; 724 qp->vsi = rsrc->vsi; 725 ukqp->sq_base = mem->va; 726 ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size]; 727 ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem; 728 ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs; 729 qp->shadow_area_pa = qp->rq_pa + rq_size; 730 qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE; 731 qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3); 732 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; 733 ukqp->qp_id = rsrc->qp_id; 734 ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array; 735 ukqp->rq_wrid_array = rsrc->rq_wrid_array; 736 ukqp->sq_size = rsrc->sq_size; 737 ukqp->rq_size = rsrc->rq_size; 738 739 IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size); 740 IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size); 741 IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size); 742 ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db; 743 744 ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri); 745 if (ret) { 746 irdma_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem); 747 return ret; 748 } 749 750 irdma_qp_add_qos(qp); 751 irdma_puda_qp_setctx(rsrc); 752 753 qp->qp_state = IRDMA_QP_STATE_RTS; 754 755 if (rsrc->dev->ceq_valid) 756 ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp); 757 else 758 ret = irdma_puda_qp_wqe(rsrc->dev, qp); 759 if (ret) { 760 qp->qp_state = IRDMA_QP_STATE_INVALID; 761 irdma_qp_rem_qos(qp); 762 rsrc->dev->ws_remove(qp->vsi, qp->user_pri); 763 irdma_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem); 764 } 765 766 return ret; 767 } 768 769 /** 770 * irdma_puda_cq_wqe - setup wqe for CQ create 771 * @dev: Device 772 * @cq: resource for cq 773 */ 774 static int 775 irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) 776 { 777 __le64 *wqe; 778 struct irdma_sc_cqp *cqp; 779 u64 hdr; 780 struct irdma_ccq_cqe_info compl_info; 781 int status = 0; 782 783 cqp = dev->cqp; 784 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); 785 if (!wqe) 786 return -ENOSPC; 787 788 set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size); 789 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 790 set_64bit_val(wqe, IRDMA_BYTE_16, 791 FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold)); 792 set_64bit_val(wqe, IRDMA_BYTE_32, cq->cq_pa); 793 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 794 set_64bit_val(wqe, IRDMA_BYTE_56, 795 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) | 796 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx)); 797 798 hdr = cq->cq_uk.cq_id | 799 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) | 800 FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) | 801 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) | 802 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) | 803 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); 804 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 805 806 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 807 808 irdma_debug_buf(dev, IRDMA_DEBUG_PUDA, "PUDA CREATE CQ", wqe, 809 IRDMA_CQP_WQE_SIZE * 8); 810 irdma_sc_cqp_post_sq(dev->cqp); 811 status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ, 812 &compl_info); 813 if (!status) { 814 struct irdma_sc_ceq *ceq = dev->ceq[0]; 815 816 if (ceq && ceq->reg_cq) 817 status = irdma_sc_add_cq_ctx(ceq, cq); 818 } 819 820 return status; 821 } 822 823 /** 824 * irdma_puda_cq_create - create cq for resource 825 * @rsrc: resource for which cq to create 826 */ 827 static int 828 irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc) 829 { 830 struct irdma_sc_dev *dev = rsrc->dev; 831 struct irdma_sc_cq *cq = &rsrc->cq; 832 int ret = 0; 833 u32 cqsize; 834 struct irdma_dma_mem *mem; 835 struct irdma_cq_init_info info = {0}; 836 struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info; 837 838 cq->vsi = rsrc->vsi; 839 cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe)); 840 rsrc->cqmem.size = cqsize + sizeof(struct irdma_cq_shadow_area); 841 rsrc->cqmem.va = irdma_allocate_dma_mem(dev->hw, &rsrc->cqmem, 842 rsrc->cqmem.size, 843 IRDMA_CQ0_ALIGNMENT); 844 if (!rsrc->cqmem.va) 845 return -ENOMEM; 846 847 mem = &rsrc->cqmem; 848 info.dev = dev; 849 info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ? 850 IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ; 851 info.shadow_read_threshold = rsrc->cq_size >> 2; 852 info.cq_base_pa = mem->pa; 853 info.shadow_area_pa = mem->pa + cqsize; 854 init_info->cq_base = mem->va; 855 init_info->shadow_area = (__le64 *) ((u8 *)mem->va + cqsize); 856 init_info->cq_size = rsrc->cq_size; 857 init_info->cq_id = rsrc->cq_id; 858 info.ceqe_mask = true; 859 info.ceq_id_valid = true; 860 info.vsi = rsrc->vsi; 861 862 ret = irdma_sc_cq_init(cq, &info); 863 if (ret) 864 goto error; 865 866 if (rsrc->dev->ceq_valid) 867 ret = irdma_cqp_cq_create_cmd(dev, cq); 868 else 869 ret = irdma_puda_cq_wqe(dev, cq); 870 error: 871 if (ret) 872 irdma_free_dma_mem(dev->hw, &rsrc->cqmem); 873 874 return ret; 875 } 876 877 /** 878 * irdma_puda_free_qp - free qp for resource 879 * @rsrc: resource for which qp to free 880 */ 881 static void 882 irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc) 883 { 884 int ret; 885 struct irdma_ccq_cqe_info compl_info; 886 struct irdma_sc_dev *dev = rsrc->dev; 887 888 if (rsrc->dev->ceq_valid) { 889 irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp); 890 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri); 891 return; 892 } 893 894 ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true); 895 if (ret) 896 irdma_debug(dev, IRDMA_DEBUG_PUDA, 897 "error puda qp destroy wqe, status = %d\n", ret); 898 if (!ret) { 899 ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP, 900 &compl_info); 901 if (ret) 902 irdma_debug(dev, IRDMA_DEBUG_PUDA, 903 "error puda qp destroy failed, status = %d\n", 904 ret); 905 } 906 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri); 907 } 908 909 /** 910 * irdma_puda_free_cq - free cq for resource 911 * @rsrc: resource for which cq to free 912 */ 913 static void 914 irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc) 915 { 916 int ret; 917 struct irdma_ccq_cqe_info compl_info; 918 struct irdma_sc_dev *dev = rsrc->dev; 919 920 if (rsrc->dev->ceq_valid) { 921 irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq); 922 return; 923 } 924 925 ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true); 926 if (ret) 927 irdma_debug(dev, IRDMA_DEBUG_PUDA, "error ieq cq destroy\n"); 928 if (!ret) { 929 ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ, 930 &compl_info); 931 if (ret) 932 irdma_debug(dev, IRDMA_DEBUG_PUDA, 933 "error ieq qp destroy done\n"); 934 } 935 } 936 937 /** 938 * irdma_puda_dele_rsrc - delete all resources during close 939 * @vsi: VSI structure of device 940 * @type: type of resource to dele 941 * @reset: true if reset chip 942 */ 943 void 944 irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type, 945 bool reset) 946 { 947 struct irdma_sc_dev *dev = vsi->dev; 948 struct irdma_puda_rsrc *rsrc; 949 struct irdma_puda_buf *buf = NULL; 950 struct irdma_puda_buf *nextbuf = NULL; 951 struct irdma_virt_mem *vmem; 952 struct irdma_sc_ceq *ceq; 953 954 ceq = vsi->dev->ceq[0]; 955 956 switch (type) { 957 case IRDMA_PUDA_RSRC_TYPE_ILQ: 958 rsrc = vsi->ilq; 959 vmem = &vsi->ilq_mem; 960 vsi->ilq = NULL; 961 if (ceq && ceq->reg_cq) 962 irdma_sc_remove_cq_ctx(ceq, &rsrc->cq); 963 break; 964 case IRDMA_PUDA_RSRC_TYPE_IEQ: 965 rsrc = vsi->ieq; 966 vmem = &vsi->ieq_mem; 967 vsi->ieq = NULL; 968 if (ceq && ceq->reg_cq) 969 irdma_sc_remove_cq_ctx(ceq, &rsrc->cq); 970 break; 971 default: 972 irdma_debug(dev, IRDMA_DEBUG_PUDA, 973 "error resource type = 0x%x\n", type); 974 return; 975 } 976 977 spin_lock_destroy(&rsrc->bufpool_lock); 978 switch (rsrc->cmpl) { 979 case PUDA_HASH_CRC_COMPLETE: 980 irdma_free_hash_desc(rsrc->hash_desc); 981 /* fallthrough */ 982 case PUDA_QP_CREATED: 983 rsrc->qp.qp_state = IRDMA_QP_STATE_INVALID; 984 irdma_qp_rem_qos(&rsrc->qp); 985 986 if (!reset) 987 irdma_puda_free_qp(rsrc); 988 989 irdma_free_dma_mem(dev->hw, &rsrc->qpmem); 990 /* fallthrough */ 991 case PUDA_CQ_CREATED: 992 if (!reset) 993 irdma_puda_free_cq(rsrc); 994 995 irdma_free_dma_mem(dev->hw, &rsrc->cqmem); 996 break; 997 default: 998 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 999 "error no resources\n"); 1000 break; 1001 } 1002 /* Free all allocated puda buffers for both tx and rx */ 1003 buf = rsrc->alloclist; 1004 while (buf) { 1005 nextbuf = buf->next; 1006 irdma_puda_dele_buf(dev, buf); 1007 buf = nextbuf; 1008 rsrc->alloc_buf_count--; 1009 } 1010 1011 kfree(vmem->va); 1012 } 1013 1014 /** 1015 * irdma_puda_allocbufs - allocate buffers for resource 1016 * @rsrc: resource for buffer allocation 1017 * @count: number of buffers to create 1018 */ 1019 static int 1020 irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count) 1021 { 1022 u32 i; 1023 struct irdma_puda_buf *buf; 1024 struct irdma_puda_buf *nextbuf; 1025 struct irdma_virt_mem buf_mem; 1026 struct irdma_dma_mem *dma_mem; 1027 bool virtdma = false; 1028 unsigned long flags; 1029 1030 buf_mem.size = count * sizeof(*buf); 1031 buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL); 1032 if (!buf_mem.va) { 1033 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 1034 "error virt_mem for buf\n"); 1035 rsrc->stats_buf_alloc_fail++; 1036 goto trysmall; 1037 } 1038 1039 /* 1040 * Allocate the large dma chunk and setup dma attributes into first puda buffer. This is required during free 1041 */ 1042 buf = (struct irdma_puda_buf *)buf_mem.va; 1043 buf->mem.va = irdma_allocate_dma_mem(rsrc->dev->hw, &buf->mem, 1044 rsrc->buf_size * count, 1); 1045 if (!buf->mem.va) { 1046 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 1047 "error dma_mem for buf\n"); 1048 kfree(buf_mem.va); 1049 rsrc->stats_buf_alloc_fail++; 1050 goto trysmall; 1051 } 1052 1053 /* 1054 * dma_mem points to start of the large DMA chunk 1055 */ 1056 dma_mem = &buf->mem; 1057 1058 spin_lock_irqsave(&rsrc->bufpool_lock, flags); 1059 for (i = 0; i < count; i++) { 1060 buf = ((struct irdma_puda_buf *)buf_mem.va) + i; 1061 1062 buf->mem.va = (char *)dma_mem->va + (i * rsrc->buf_size); 1063 buf->mem.pa = dma_mem->pa + (i * rsrc->buf_size); 1064 buf->mem.size = rsrc->buf_size; 1065 buf->virtdma = virtdma; 1066 virtdma = true; 1067 1068 buf->buf_mem.va = buf_mem.va; 1069 buf->buf_mem.size = buf_mem.size; 1070 1071 list_add(&buf->list, &rsrc->bufpool); 1072 rsrc->alloc_buf_count++; 1073 if (!rsrc->alloclist) { 1074 rsrc->alloclist = buf; 1075 } else { 1076 nextbuf = rsrc->alloclist; 1077 rsrc->alloclist = buf; 1078 buf->next = nextbuf; 1079 } 1080 } 1081 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 1082 1083 rsrc->avail_buf_count = rsrc->alloc_buf_count; 1084 return 0; 1085 trysmall: 1086 for (i = 0; i < count; i++) { 1087 buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size); 1088 if (!buf) { 1089 rsrc->stats_buf_alloc_fail++; 1090 return -ENOMEM; 1091 } 1092 irdma_puda_ret_bufpool(rsrc, buf); 1093 rsrc->alloc_buf_count++; 1094 if (!rsrc->alloclist) { 1095 rsrc->alloclist = buf; 1096 } else { 1097 nextbuf = rsrc->alloclist; 1098 rsrc->alloclist = buf; 1099 buf->next = nextbuf; 1100 } 1101 } 1102 1103 rsrc->avail_buf_count = rsrc->alloc_buf_count; 1104 1105 return 0; 1106 } 1107 1108 /** 1109 * irdma_puda_create_rsrc - create resource (ilq or ieq) 1110 * @vsi: sc VSI struct 1111 * @info: resource information 1112 */ 1113 int 1114 irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi, 1115 struct irdma_puda_rsrc_info *info) 1116 { 1117 struct irdma_sc_dev *dev = vsi->dev; 1118 int ret = 0; 1119 struct irdma_puda_rsrc *rsrc; 1120 u32 pudasize; 1121 u32 sqwridsize, rqwridsize; 1122 struct irdma_virt_mem *vmem; 1123 1124 info->count = 1; 1125 pudasize = sizeof(*rsrc); 1126 sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info); 1127 rqwridsize = info->rq_size * 8; 1128 switch (info->type) { 1129 case IRDMA_PUDA_RSRC_TYPE_ILQ: 1130 vmem = &vsi->ilq_mem; 1131 break; 1132 case IRDMA_PUDA_RSRC_TYPE_IEQ: 1133 vmem = &vsi->ieq_mem; 1134 break; 1135 default: 1136 return -EOPNOTSUPP; 1137 } 1138 vmem->size = pudasize + sqwridsize + rqwridsize; 1139 vmem->va = kzalloc(vmem->size, GFP_KERNEL); 1140 if (!vmem->va) 1141 return -ENOMEM; 1142 1143 rsrc = vmem->va; 1144 spin_lock_init(&rsrc->bufpool_lock); 1145 switch (info->type) { 1146 case IRDMA_PUDA_RSRC_TYPE_ILQ: 1147 vsi->ilq = vmem->va; 1148 vsi->ilq_count = info->count; 1149 rsrc->receive = info->receive; 1150 rsrc->xmit_complete = info->xmit_complete; 1151 break; 1152 case IRDMA_PUDA_RSRC_TYPE_IEQ: 1153 vsi->ieq_count = info->count; 1154 vsi->ieq = vmem->va; 1155 rsrc->receive = irdma_ieq_receive; 1156 rsrc->xmit_complete = irdma_ieq_tx_compl; 1157 break; 1158 default: 1159 return -EOPNOTSUPP; 1160 } 1161 1162 rsrc->type = info->type; 1163 rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *) 1164 ((u8 *)vmem->va + pudasize); 1165 rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize); 1166 /* Initialize all ieq lists */ 1167 INIT_LIST_HEAD(&rsrc->bufpool); 1168 INIT_LIST_HEAD(&rsrc->txpend); 1169 1170 rsrc->tx_wqe_avail_cnt = info->sq_size - 1; 1171 irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver); 1172 rsrc->qp_id = info->qp_id; 1173 rsrc->cq_id = info->cq_id; 1174 rsrc->sq_size = info->sq_size; 1175 rsrc->rq_size = info->rq_size; 1176 rsrc->cq_size = info->rq_size + info->sq_size; 1177 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1178 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) 1179 rsrc->cq_size += info->rq_size; 1180 } 1181 rsrc->buf_size = info->buf_size; 1182 rsrc->dev = dev; 1183 rsrc->vsi = vsi; 1184 rsrc->stats_idx = info->stats_idx; 1185 rsrc->stats_idx_valid = info->stats_idx_valid; 1186 1187 ret = irdma_puda_cq_create(rsrc); 1188 if (!ret) { 1189 rsrc->cmpl = PUDA_CQ_CREATED; 1190 ret = irdma_puda_qp_create(rsrc); 1191 } 1192 if (ret) { 1193 irdma_debug(dev, IRDMA_DEBUG_PUDA, 1194 "error qp_create type=%d, status=%d\n", rsrc->type, 1195 ret); 1196 goto error; 1197 } 1198 rsrc->cmpl = PUDA_QP_CREATED; 1199 1200 ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size); 1201 if (ret) { 1202 irdma_debug(dev, IRDMA_DEBUG_PUDA, "error alloc_buf\n"); 1203 goto error; 1204 } 1205 1206 rsrc->rxq_invalid_cnt = info->rq_size; 1207 ret = irdma_puda_replenish_rq(rsrc, true); 1208 if (ret) 1209 goto error; 1210 1211 if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) { 1212 if (!irdma_init_hash_desc(&rsrc->hash_desc)) { 1213 rsrc->check_crc = true; 1214 rsrc->cmpl = PUDA_HASH_CRC_COMPLETE; 1215 ret = 0; 1216 } 1217 } 1218 1219 irdma_sc_ccq_arm(&rsrc->cq); 1220 return ret; 1221 1222 error: 1223 irdma_puda_dele_rsrc(vsi, info->type, false); 1224 1225 return ret; 1226 } 1227 1228 /** 1229 * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq 1230 * @qp: ilq's qp resource 1231 * @buf: puda buffer for rcv q 1232 * @wqe_idx: wqe index of completed rcvbuf 1233 */ 1234 static void 1235 irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, 1236 struct irdma_puda_buf *buf, u32 wqe_idx) 1237 { 1238 __le64 *wqe; 1239 u64 offset8, offset24; 1240 1241 /* Synch buffer for use by device */ 1242 dma_sync_single_for_device(hw_to_dev(qp->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); 1243 wqe = qp->qp_uk.rq_base[wqe_idx].elem; 1244 get_64bit_val(wqe, IRDMA_BYTE_24, &offset24); 1245 if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1246 get_64bit_val(wqe, IRDMA_BYTE_8, &offset8); 1247 if (offset24) 1248 offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1); 1249 else 1250 offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1); 1251 set_64bit_val(wqe, IRDMA_BYTE_8, offset8); 1252 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1253 } 1254 if (offset24) 1255 offset24 = 0; 1256 else 1257 offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1); 1258 1259 set_64bit_val(wqe, IRDMA_BYTE_24, offset24); 1260 } 1261 1262 /** 1263 * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker 1264 * @pfpdu: pointer to fpdu 1265 * @datap: pointer to data in the buffer 1266 * @rcv_seq: seqnum of the data buffer 1267 */ 1268 static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap, 1269 u32 rcv_seq){ 1270 u32 marker_seq, end_seq, blk_start; 1271 u8 marker_len = pfpdu->marker_len; 1272 u16 total_len = 0; 1273 u16 fpdu_len; 1274 1275 blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1); 1276 if (!blk_start) { 1277 total_len = marker_len; 1278 marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ; 1279 if (marker_len && *(u32 *)datap) 1280 return 0; 1281 } else { 1282 marker_seq = rcv_seq + blk_start; 1283 } 1284 1285 datap += total_len; 1286 fpdu_len = IRDMA_NTOHS(*(__be16 *) datap); 1287 fpdu_len += IRDMA_IEQ_MPA_FRAMING; 1288 fpdu_len = (fpdu_len + 3) & 0xfffc; 1289 1290 if (fpdu_len > pfpdu->max_fpdu_data) 1291 return 0; 1292 1293 total_len += fpdu_len; 1294 end_seq = rcv_seq + total_len; 1295 while ((int)(marker_seq - end_seq) < 0) { 1296 total_len += marker_len; 1297 end_seq += marker_len; 1298 marker_seq += IRDMA_MRK_BLK_SZ; 1299 } 1300 1301 return total_len; 1302 } 1303 1304 /** 1305 * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf 1306 * @buf: rcv buffer with partial 1307 * @txbuf: tx buffer for sending back 1308 * @buf_offset: rcv buffer offset to copy from 1309 * @txbuf_offset: at offset in tx buf to copy 1310 * @len: length of data to copy 1311 */ 1312 static void 1313 irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf, 1314 struct irdma_puda_buf *txbuf, 1315 u16 buf_offset, u32 txbuf_offset, u32 len) 1316 { 1317 void *mem1 = (u8 *)buf->mem.va + buf_offset; 1318 void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset; 1319 1320 irdma_memcpy(mem2, mem1, len); 1321 } 1322 1323 /** 1324 * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling 1325 * @buf: reeive buffer with partial 1326 * @txbuf: buffer to prepare 1327 */ 1328 static void 1329 irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf, 1330 struct irdma_puda_buf *txbuf) 1331 { 1332 txbuf->tcphlen = buf->tcphlen; 1333 txbuf->ipv4 = buf->ipv4; 1334 1335 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1336 txbuf->hdrlen = txbuf->tcphlen; 1337 irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0, 1338 txbuf->hdrlen); 1339 } else { 1340 txbuf->maclen = buf->maclen; 1341 txbuf->hdrlen = buf->hdrlen; 1342 irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen); 1343 } 1344 } 1345 1346 /** 1347 * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range 1348 * @buf: receive exception buffer 1349 * @fps: first partial sequence number 1350 */ 1351 static void 1352 irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps) 1353 { 1354 u32 offset; 1355 1356 if (buf->seqnum < fps) { 1357 offset = fps - buf->seqnum; 1358 if (offset > buf->datalen) 1359 return; 1360 buf->data += offset; 1361 buf->datalen -= (u16)offset; 1362 buf->seqnum = fps; 1363 } 1364 } 1365 1366 /** 1367 * irdma_ieq_compl_pfpdu - write txbuf with full fpdu 1368 * @ieq: ieq resource 1369 * @rxlist: ieq's received buffer list 1370 * @pbufl: temporary list for buffers for fpddu 1371 * @txbuf: tx buffer for fpdu 1372 * @fpdu_len: total length of fpdu 1373 */ 1374 static void 1375 irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq, 1376 struct list_head *rxlist, 1377 struct list_head *pbufl, 1378 struct irdma_puda_buf *txbuf, u16 fpdu_len) 1379 { 1380 struct irdma_puda_buf *buf; 1381 u32 nextseqnum; 1382 u16 txoffset, bufoffset; 1383 1384 buf = irdma_puda_get_listbuf(pbufl); 1385 if (!buf) 1386 return; 1387 1388 nextseqnum = buf->seqnum + fpdu_len; 1389 irdma_ieq_setup_tx_buf(buf, txbuf); 1390 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1391 txoffset = txbuf->hdrlen; 1392 txbuf->totallen = txbuf->hdrlen + fpdu_len; 1393 txbuf->data = (u8 *)txbuf->mem.va + txoffset; 1394 } else { 1395 txoffset = buf->hdrlen; 1396 txbuf->totallen = buf->hdrlen + fpdu_len; 1397 txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen; 1398 } 1399 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); 1400 1401 do { 1402 if (buf->datalen >= fpdu_len) { 1403 /* copied full fpdu */ 1404 irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, 1405 fpdu_len); 1406 buf->datalen -= fpdu_len; 1407 buf->data += fpdu_len; 1408 buf->seqnum = nextseqnum; 1409 break; 1410 } 1411 /* copy partial fpdu */ 1412 irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, 1413 buf->datalen); 1414 txoffset += buf->datalen; 1415 fpdu_len -= buf->datalen; 1416 irdma_puda_ret_bufpool(ieq, buf); 1417 buf = irdma_puda_get_listbuf(pbufl); 1418 if (!buf) 1419 return; 1420 1421 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); 1422 } while (1); 1423 1424 /* last buffer on the list */ 1425 if (buf->datalen) 1426 list_add(&buf->list, rxlist); 1427 else 1428 irdma_puda_ret_bufpool(ieq, buf); 1429 } 1430 1431 /** 1432 * irdma_ieq_create_pbufl - create buffer list for single fpdu 1433 * @pfpdu: pointer to fpdu 1434 * @rxlist: resource list for receive ieq buffes 1435 * @pbufl: temp. list for buffers for fpddu 1436 * @buf: first receive buffer 1437 * @fpdu_len: total length of fpdu 1438 */ 1439 static int 1440 irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, 1441 struct list_head *rxlist, 1442 struct list_head *pbufl, 1443 struct irdma_puda_buf *buf, u16 fpdu_len) 1444 { 1445 int status = 0; 1446 struct irdma_puda_buf *nextbuf; 1447 u32 nextseqnum; 1448 u16 plen = fpdu_len - buf->datalen; 1449 bool done = false; 1450 1451 nextseqnum = buf->seqnum + buf->datalen; 1452 do { 1453 nextbuf = irdma_puda_get_listbuf(rxlist); 1454 if (!nextbuf) { 1455 status = -ENOBUFS; 1456 break; 1457 } 1458 list_add_tail(&nextbuf->list, pbufl); 1459 if (nextbuf->seqnum != nextseqnum) { 1460 pfpdu->bad_seq_num++; 1461 status = -ERANGE; 1462 break; 1463 } 1464 if (nextbuf->datalen >= plen) { 1465 done = true; 1466 } else { 1467 plen -= nextbuf->datalen; 1468 nextseqnum = nextbuf->seqnum + nextbuf->datalen; 1469 } 1470 1471 } while (!done); 1472 1473 return status; 1474 } 1475 1476 /** 1477 * irdma_ieq_handle_partial - process partial fpdu buffer 1478 * @ieq: ieq resource 1479 * @pfpdu: partial management per user qp 1480 * @buf: receive buffer 1481 * @fpdu_len: fpdu len in the buffer 1482 */ 1483 static int 1484 irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, 1485 struct irdma_pfpdu *pfpdu, 1486 struct irdma_puda_buf *buf, u16 fpdu_len) 1487 { 1488 int status = 0; 1489 u8 *crcptr; 1490 u32 mpacrc; 1491 u32 seqnum = buf->seqnum; 1492 struct list_head pbufl; /* partial buffer list */ 1493 struct irdma_puda_buf *txbuf = NULL; 1494 struct list_head *rxlist = &pfpdu->rxlist; 1495 1496 ieq->partials_handled++; 1497 1498 INIT_LIST_HEAD(&pbufl); 1499 list_add(&buf->list, &pbufl); 1500 1501 status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len); 1502 if (status) 1503 goto error; 1504 1505 txbuf = irdma_puda_get_bufpool(ieq); 1506 if (!txbuf) { 1507 pfpdu->no_tx_bufs++; 1508 status = -ENOBUFS; 1509 goto error; 1510 } 1511 1512 irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len); 1513 irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum); 1514 1515 crcptr = txbuf->data + fpdu_len - 4; 1516 mpacrc = *(u32 *)crcptr; 1517 if (ieq->check_crc) { 1518 status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data, 1519 (fpdu_len - 4), mpacrc); 1520 if (status) { 1521 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ, 1522 "error bad crc\n"); 1523 pfpdu->mpa_crc_err = true; 1524 goto error; 1525 } 1526 } 1527 1528 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ TX BUFFER", 1529 txbuf->mem.va, txbuf->totallen); 1530 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 1531 txbuf->ah_id = pfpdu->ah->ah_info.ah_idx; 1532 txbuf->do_lpb = true; 1533 irdma_puda_send_buf(ieq, txbuf); 1534 pfpdu->rcv_nxt = seqnum + fpdu_len; 1535 return status; 1536 1537 error: 1538 while (!list_empty(&pbufl)) { 1539 buf = (struct irdma_puda_buf *)(&pbufl)->prev; 1540 list_move(&buf->list, rxlist); 1541 } 1542 if (txbuf) 1543 irdma_puda_ret_bufpool(ieq, txbuf); 1544 1545 return status; 1546 } 1547 1548 /** 1549 * irdma_ieq_process_buf - process buffer rcvd for ieq 1550 * @ieq: ieq resource 1551 * @pfpdu: partial management per user qp 1552 * @buf: receive buffer 1553 */ 1554 static int 1555 irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq, 1556 struct irdma_pfpdu *pfpdu, 1557 struct irdma_puda_buf *buf) 1558 { 1559 u16 fpdu_len = 0; 1560 u16 datalen = buf->datalen; 1561 u8 *datap = buf->data; 1562 u8 *crcptr; 1563 u16 ioffset = 0; 1564 u32 mpacrc; 1565 u32 seqnum = buf->seqnum; 1566 u16 len = 0; 1567 u16 full = 0; 1568 bool partial = false; 1569 struct irdma_puda_buf *txbuf; 1570 struct list_head *rxlist = &pfpdu->rxlist; 1571 int ret = 0; 1572 1573 ioffset = (u16)(buf->data - (u8 *)buf->mem.va); 1574 while (datalen) { 1575 fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum); 1576 if (!fpdu_len) { 1577 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ, 1578 "error bad fpdu len\n"); 1579 list_add(&buf->list, rxlist); 1580 pfpdu->mpa_crc_err = true; 1581 return -EINVAL; 1582 } 1583 1584 if (datalen < fpdu_len) { 1585 partial = true; 1586 break; 1587 } 1588 crcptr = datap + fpdu_len - 4; 1589 mpacrc = *(u32 *)crcptr; 1590 if (ieq->check_crc) 1591 ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap, 1592 fpdu_len - 4, mpacrc); 1593 if (ret) { 1594 list_add(&buf->list, rxlist); 1595 irdma_debug(ieq->dev, IRDMA_DEBUG_ERR, 1596 "IRDMA_ERR_MPA_CRC\n"); 1597 pfpdu->mpa_crc_err = true; 1598 return ret; 1599 } 1600 full++; 1601 pfpdu->fpdu_processed++; 1602 ieq->fpdu_processed++; 1603 datap += fpdu_len; 1604 len += fpdu_len; 1605 datalen -= fpdu_len; 1606 } 1607 if (full) { 1608 /* copy full pdu's in the txbuf and send them out */ 1609 txbuf = irdma_puda_get_bufpool(ieq); 1610 if (!txbuf) { 1611 pfpdu->no_tx_bufs++; 1612 list_add(&buf->list, rxlist); 1613 return -ENOBUFS; 1614 } 1615 /* modify txbuf's buffer header */ 1616 irdma_ieq_setup_tx_buf(buf, txbuf); 1617 /* copy full fpdu's to new buffer */ 1618 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1619 irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset, 1620 txbuf->hdrlen, len); 1621 txbuf->totallen = txbuf->hdrlen + len; 1622 txbuf->ah_id = pfpdu->ah->ah_info.ah_idx; 1623 } else { 1624 irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset, 1625 buf->hdrlen, len); 1626 txbuf->totallen = buf->hdrlen + len; 1627 } 1628 irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum); 1629 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ TX BUFFER", 1630 txbuf->mem.va, txbuf->totallen); 1631 txbuf->do_lpb = true; 1632 irdma_puda_send_buf(ieq, txbuf); 1633 1634 if (!datalen) { 1635 pfpdu->rcv_nxt = buf->seqnum + len; 1636 irdma_puda_ret_bufpool(ieq, buf); 1637 return 0; 1638 } 1639 buf->data = datap; 1640 buf->seqnum = seqnum + len; 1641 buf->datalen = datalen; 1642 pfpdu->rcv_nxt = buf->seqnum; 1643 } 1644 if (partial) 1645 return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len); 1646 1647 return 0; 1648 } 1649 1650 /** 1651 * irdma_ieq_process_fpdus - process fpdu's buffers on its list 1652 * @qp: qp for which partial fpdus 1653 * @ieq: ieq resource 1654 */ 1655 void 1656 irdma_ieq_process_fpdus(struct irdma_sc_qp *qp, 1657 struct irdma_puda_rsrc *ieq) 1658 { 1659 struct irdma_pfpdu *pfpdu = &qp->pfpdu; 1660 struct list_head *rxlist = &pfpdu->rxlist; 1661 struct irdma_puda_buf *buf; 1662 int status; 1663 1664 do { 1665 if (list_empty(rxlist)) 1666 break; 1667 buf = irdma_puda_get_listbuf(rxlist); 1668 if (!buf) { 1669 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ, 1670 "error no buf\n"); 1671 break; 1672 } 1673 if (buf->seqnum != pfpdu->rcv_nxt) { 1674 /* This could be out of order or missing packet */ 1675 pfpdu->out_of_order++; 1676 list_add(&buf->list, rxlist); 1677 break; 1678 } 1679 /* keep processing buffers from the head of the list */ 1680 status = irdma_ieq_process_buf(ieq, pfpdu, buf); 1681 if (status && pfpdu->mpa_crc_err) { 1682 while (!list_empty(rxlist)) { 1683 buf = irdma_puda_get_listbuf(rxlist); 1684 irdma_puda_ret_bufpool(ieq, buf); 1685 pfpdu->crc_err++; 1686 ieq->crc_err++; 1687 } 1688 /* create CQP for AE */ 1689 irdma_ieq_mpa_crc_ae(ieq->dev, qp); 1690 } 1691 } while (!status); 1692 } 1693 1694 /** 1695 * irdma_ieq_create_ah - create an address handle for IEQ 1696 * @qp: qp pointer 1697 * @buf: buf received on IEQ used to create AH 1698 */ 1699 static int 1700 irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf) 1701 { 1702 struct irdma_ah_info ah_info = {0}; 1703 1704 qp->pfpdu.ah_buf = buf; 1705 irdma_puda_ieq_get_ah_info(qp, &ah_info); 1706 return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false, 1707 IRDMA_PUDA_RSRC_TYPE_IEQ, qp, 1708 &qp->pfpdu.ah); 1709 } 1710 1711 /** 1712 * irdma_ieq_handle_exception - handle qp's exception 1713 * @ieq: ieq resource 1714 * @qp: qp receiving excpetion 1715 * @buf: receive buffer 1716 */ 1717 static void 1718 irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq, 1719 struct irdma_sc_qp *qp, 1720 struct irdma_puda_buf *buf) 1721 { 1722 struct irdma_pfpdu *pfpdu = &qp->pfpdu; 1723 u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; 1724 u32 rcv_wnd = hw_host_ctx[23]; 1725 1726 /* first partial seq # in q2 */ 1727 u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET); 1728 struct list_head *rxlist = &pfpdu->rxlist; 1729 struct list_head *plist; 1730 struct irdma_puda_buf *tmpbuf = NULL; 1731 unsigned long flags = 0; 1732 u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev; 1733 1734 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ RX BUFFER", buf->mem.va, 1735 buf->totallen); 1736 1737 spin_lock_irqsave(&pfpdu->lock, flags); 1738 pfpdu->total_ieq_bufs++; 1739 if (pfpdu->mpa_crc_err) { 1740 pfpdu->crc_err++; 1741 goto error; 1742 } 1743 if (pfpdu->mode && fps != pfpdu->fps) { 1744 /* clean up qp as it is new partial sequence */ 1745 irdma_ieq_cleanup_qp(ieq, qp); 1746 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ, 1747 "restarting new partial\n"); 1748 pfpdu->mode = false; 1749 } 1750 1751 if (!pfpdu->mode) { 1752 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "Q2 BUFFER", 1753 (u64 *)qp->q2_buf, 128); 1754 /* First_Partial_Sequence_Number check */ 1755 pfpdu->rcv_nxt = fps; 1756 pfpdu->fps = fps; 1757 pfpdu->mode = true; 1758 pfpdu->max_fpdu_data = (buf->ipv4) ? 1759 (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) : 1760 (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6); 1761 pfpdu->pmode_count++; 1762 ieq->pmode_count++; 1763 INIT_LIST_HEAD(rxlist); 1764 irdma_ieq_check_first_buf(buf, fps); 1765 } 1766 1767 if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) { 1768 pfpdu->bad_seq_num++; 1769 ieq->bad_seq_num++; 1770 goto error; 1771 } 1772 1773 if (!list_empty(rxlist)) { 1774 tmpbuf = (struct irdma_puda_buf *)(rxlist)->next; 1775 while ((struct list_head *)tmpbuf != rxlist) { 1776 if (buf->seqnum == tmpbuf->seqnum) 1777 goto error; 1778 if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) 1779 break; 1780 plist = &tmpbuf->list; 1781 tmpbuf = (struct irdma_puda_buf *)(plist)->next; 1782 } 1783 /* Insert buf before tmpbuf */ 1784 list_add_tail(&buf->list, &tmpbuf->list); 1785 } else { 1786 list_add_tail(&buf->list, rxlist); 1787 } 1788 pfpdu->nextseqnum = buf->seqnum + buf->datalen; 1789 pfpdu->lastrcv_buf = buf; 1790 if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) { 1791 irdma_ieq_create_ah(qp, buf); 1792 if (!pfpdu->ah) 1793 goto error; 1794 goto exit; 1795 } 1796 if (hw_rev == IRDMA_GEN_1) 1797 irdma_ieq_process_fpdus(qp, ieq); 1798 else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid) 1799 irdma_ieq_process_fpdus(qp, ieq); 1800 exit: 1801 spin_unlock_irqrestore(&pfpdu->lock, flags); 1802 1803 return; 1804 1805 error: 1806 irdma_puda_ret_bufpool(ieq, buf); 1807 spin_unlock_irqrestore(&pfpdu->lock, flags); 1808 } 1809 1810 /** 1811 * irdma_ieq_receive - received exception buffer 1812 * @vsi: VSI of device 1813 * @buf: exception buffer received 1814 */ 1815 static void 1816 irdma_ieq_receive(struct irdma_sc_vsi *vsi, 1817 struct irdma_puda_buf *buf) 1818 { 1819 struct irdma_puda_rsrc *ieq = vsi->ieq; 1820 struct irdma_sc_qp *qp = NULL; 1821 u32 wqe_idx = ieq->compl_rxwqe_idx; 1822 1823 qp = irdma_ieq_get_qp(vsi->dev, buf); 1824 if (!qp) { 1825 ieq->stats_bad_qp_id++; 1826 irdma_puda_ret_bufpool(ieq, buf); 1827 } else { 1828 irdma_ieq_handle_exception(ieq, qp, buf); 1829 } 1830 /* 1831 * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq() on which wqe_idx to start replenish rq 1832 */ 1833 if (!ieq->rxq_invalid_cnt) 1834 ieq->rx_wqe_idx = wqe_idx; 1835 ieq->rxq_invalid_cnt++; 1836 } 1837 1838 /** 1839 * irdma_ieq_tx_compl - put back after sending completed exception buffer 1840 * @vsi: sc VSI struct 1841 * @sqwrid: pointer to puda buffer 1842 */ 1843 static void 1844 irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid) 1845 { 1846 struct irdma_puda_rsrc *ieq = vsi->ieq; 1847 struct irdma_puda_buf *buf = sqwrid; 1848 1849 irdma_puda_ret_bufpool(ieq, buf); 1850 } 1851 1852 /** 1853 * irdma_ieq_cleanup_qp - qp is being destroyed 1854 * @ieq: ieq resource 1855 * @qp: all pending fpdu buffers 1856 */ 1857 void 1858 irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp) 1859 { 1860 struct irdma_puda_buf *buf; 1861 struct irdma_pfpdu *pfpdu = &qp->pfpdu; 1862 struct list_head *rxlist = &pfpdu->rxlist; 1863 1864 if (qp->pfpdu.ah) { 1865 irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah); 1866 qp->pfpdu.ah = NULL; 1867 qp->pfpdu.ah_buf = NULL; 1868 } 1869 1870 if (!pfpdu->mode) 1871 return; 1872 1873 while (!list_empty(rxlist)) { 1874 buf = irdma_puda_get_listbuf(rxlist); 1875 irdma_puda_ret_bufpool(ieq, buf); 1876 } 1877 } 1878