1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/pci.h> 35 #include <rdma/ib_addr.h> 36 #include <rdma/ib_umem.h> 37 #include <rdma/uverbs_ioctl.h> 38 #include "hns_roce_common.h" 39 #include "hns_roce_device.h" 40 #include "hns_roce_hem.h" 41 42 static struct hns_roce_qp *hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, 43 u32 qpn) 44 { 45 struct device *dev = hr_dev->dev; 46 struct hns_roce_qp *qp; 47 unsigned long flags; 48 49 xa_lock_irqsave(&hr_dev->qp_table_xa, flags); 50 qp = __hns_roce_qp_lookup(hr_dev, qpn); 51 if (qp) 52 refcount_inc(&qp->refcount); 53 xa_unlock_irqrestore(&hr_dev->qp_table_xa, flags); 54 55 if (!qp) 56 dev_warn(dev, "async event for bogus QP %08x\n", qpn); 57 58 return qp; 59 } 60 61 static void flush_work_handle(struct work_struct *work) 62 { 63 struct hns_roce_work *flush_work = container_of(work, 64 struct hns_roce_work, work); 65 struct hns_roce_qp *hr_qp = container_of(flush_work, 66 struct hns_roce_qp, flush_work); 67 struct device *dev = flush_work->hr_dev->dev; 68 struct ib_qp_attr attr; 69 int attr_mask; 70 int ret; 71 72 attr_mask = IB_QP_STATE; 73 attr.qp_state = IB_QPS_ERR; 74 75 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { 76 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); 77 if (ret) 78 dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n", 79 ret); 80 } 81 82 /* 83 * make sure we signal QP destroy leg that flush QP was completed 84 * so that it can safely proceed ahead now and destroy QP 85 */ 86 if (refcount_dec_and_test(&hr_qp->refcount)) 87 complete(&hr_qp->free); 88 } 89 90 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 91 { 92 struct hns_roce_work *flush_work = &hr_qp->flush_work; 93 unsigned long flags; 94 95 spin_lock_irqsave(&hr_qp->flush_lock, flags); 96 /* Exit directly after destroy_qp() */ 97 if (test_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag)) { 98 spin_unlock_irqrestore(&hr_qp->flush_lock, flags); 99 return; 100 } 101 102 refcount_inc(&hr_qp->refcount); 103 queue_work(hr_dev->irq_workq, &flush_work->work); 104 spin_unlock_irqrestore(&hr_qp->flush_lock, flags); 105 } 106 107 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp) 108 { 109 /* 110 * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state 111 * gets into errored mode. Hence, as a workaround to this 112 * hardware limitation, driver needs to assist in flushing. But 113 * the flushing operation uses mailbox to convey the QP state to 114 * the hardware and which can sleep due to the mutex protection 115 * around the mailbox calls. Hence, use the deferred flush for 116 * now. 117 */ 118 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) 119 init_flush_work(dev, qp); 120 } 121 122 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) 123 { 124 struct hns_roce_qp *qp; 125 126 qp = hns_roce_qp_lookup(hr_dev, qpn); 127 if (!qp) 128 return; 129 130 qp->event(qp, (enum hns_roce_event)event_type); 131 132 if (refcount_dec_and_test(&qp->refcount)) 133 complete(&qp->free); 134 } 135 136 void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn) 137 { 138 struct hns_roce_qp *qp; 139 140 qp = hns_roce_qp_lookup(hr_dev, qpn); 141 if (!qp) 142 return; 143 144 qp->state = IB_QPS_ERR; 145 flush_cqe(hr_dev, qp); 146 147 if (refcount_dec_and_test(&qp->refcount)) 148 complete(&qp->free); 149 } 150 151 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, 152 enum hns_roce_event type) 153 { 154 struct ib_qp *ibqp = &hr_qp->ibqp; 155 struct ib_event event; 156 157 if (ibqp->event_handler) { 158 event.device = ibqp->device; 159 event.element.qp = ibqp; 160 switch (type) { 161 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 162 event.event = IB_EVENT_PATH_MIG; 163 break; 164 case HNS_ROCE_EVENT_TYPE_COMM_EST: 165 event.event = IB_EVENT_COMM_EST; 166 break; 167 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 168 event.event = IB_EVENT_SQ_DRAINED; 169 break; 170 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 171 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 172 break; 173 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 174 event.event = IB_EVENT_QP_FATAL; 175 break; 176 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 177 event.event = IB_EVENT_PATH_MIG_ERR; 178 break; 179 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 180 event.event = IB_EVENT_QP_REQ_ERR; 181 break; 182 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 183 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: 184 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: 185 event.event = IB_EVENT_QP_ACCESS_ERR; 186 break; 187 default: 188 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", 189 type, hr_qp->qpn); 190 return; 191 } 192 ibqp->event_handler(&event, ibqp->qp_context); 193 } 194 } 195 196 static u8 get_affinity_cq_bank(u8 qp_bank) 197 { 198 return (qp_bank >> 1) & CQ_BANKID_MASK; 199 } 200 201 static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr, 202 struct hns_roce_bank *bank) 203 { 204 #define INVALID_LOAD_QPNUM 0xFFFFFFFF 205 struct ib_cq *scq = init_attr->send_cq; 206 u32 least_load = INVALID_LOAD_QPNUM; 207 unsigned long cqn = 0; 208 u8 bankid = 0; 209 u32 bankcnt; 210 u8 i; 211 212 if (scq) 213 cqn = to_hr_cq(scq)->cqn; 214 215 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) { 216 if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK))) 217 continue; 218 219 bankcnt = bank[i].inuse; 220 if (bankcnt < least_load) { 221 least_load = bankcnt; 222 bankid = i; 223 } 224 } 225 226 return bankid; 227 } 228 229 static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid, 230 unsigned long *qpn) 231 { 232 int id; 233 234 id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL); 235 if (id < 0) { 236 id = ida_alloc_range(&bank->ida, bank->min, bank->max, 237 GFP_KERNEL); 238 if (id < 0) 239 return id; 240 } 241 242 /* the QPN should keep increasing until the max value is reached. */ 243 bank->next = (id + 1) > bank->max ? bank->min : id + 1; 244 245 /* the lower 3 bits is bankid */ 246 *qpn = (id << 3) | bankid; 247 248 return 0; 249 } 250 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 251 struct ib_qp_init_attr *init_attr) 252 { 253 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 254 unsigned long num = 0; 255 u8 bankid; 256 int ret; 257 258 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { 259 num = 1; 260 } else { 261 mutex_lock(&qp_table->bank_mutex); 262 bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank); 263 264 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, 265 &num); 266 if (ret) { 267 ibdev_err(&hr_dev->ib_dev, 268 "failed to alloc QPN, ret = %d\n", ret); 269 mutex_unlock(&qp_table->bank_mutex); 270 return ret; 271 } 272 273 qp_table->bank[bankid].inuse++; 274 mutex_unlock(&qp_table->bank_mutex); 275 } 276 277 hr_qp->qpn = num; 278 279 return 0; 280 } 281 282 static void add_qp_to_list(struct hns_roce_dev *hr_dev, 283 struct hns_roce_qp *hr_qp, 284 struct ib_cq *send_cq, struct ib_cq *recv_cq) 285 { 286 struct hns_roce_cq *hr_send_cq, *hr_recv_cq; 287 unsigned long flags; 288 289 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; 290 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; 291 292 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); 293 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); 294 295 list_add_tail(&hr_qp->node, &hr_dev->qp_list); 296 if (hr_send_cq) 297 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); 298 if (hr_recv_cq) 299 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); 300 301 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); 302 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); 303 } 304 305 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, 306 struct hns_roce_qp *hr_qp, 307 struct ib_qp_init_attr *init_attr) 308 { 309 struct xarray *xa = &hr_dev->qp_table_xa; 310 int ret; 311 312 if (!hr_qp->qpn) 313 return -EINVAL; 314 315 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); 316 if (ret) 317 dev_err(hr_dev->dev, "failed to xa store for QPC\n"); 318 else 319 /* add QP to device's QP list for softwc */ 320 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, 321 init_attr->recv_cq); 322 323 return ret; 324 } 325 326 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 327 { 328 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 329 struct device *dev = hr_dev->dev; 330 int ret; 331 332 if (!hr_qp->qpn) 333 return -EINVAL; 334 335 /* Alloc memory for QPC */ 336 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); 337 if (ret) { 338 dev_err(dev, "failed to get QPC table\n"); 339 goto err_out; 340 } 341 342 /* Alloc memory for IRRL */ 343 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 344 if (ret) { 345 dev_err(dev, "failed to get IRRL table\n"); 346 goto err_put_qp; 347 } 348 349 if (hr_dev->caps.trrl_entry_sz) { 350 /* Alloc memory for TRRL */ 351 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, 352 hr_qp->qpn); 353 if (ret) { 354 dev_err(dev, "failed to get TRRL table\n"); 355 goto err_put_irrl; 356 } 357 } 358 359 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 360 /* Alloc memory for SCC CTX */ 361 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, 362 hr_qp->qpn); 363 if (ret) { 364 dev_err(dev, "failed to get SCC CTX table\n"); 365 goto err_put_trrl; 366 } 367 } 368 369 return 0; 370 371 err_put_trrl: 372 if (hr_dev->caps.trrl_entry_sz) 373 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 374 375 err_put_irrl: 376 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 377 378 err_put_qp: 379 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 380 381 err_out: 382 return ret; 383 } 384 385 static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp) 386 { 387 rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); 388 } 389 390 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 391 { 392 struct xarray *xa = &hr_dev->qp_table_xa; 393 unsigned long flags; 394 395 list_del(&hr_qp->node); 396 397 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) 398 list_del(&hr_qp->sq_node); 399 400 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI && 401 hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) 402 list_del(&hr_qp->rq_node); 403 404 xa_lock_irqsave(xa, flags); 405 __xa_erase(xa, hr_qp->qpn); 406 xa_unlock_irqrestore(xa, flags); 407 } 408 409 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 410 { 411 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 412 413 if (hr_dev->caps.trrl_entry_sz) 414 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 415 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 416 } 417 418 static inline u8 get_qp_bankid(unsigned long qpn) 419 { 420 /* The lower 3 bits of QPN are used to hash to different banks */ 421 return (u8)(qpn & GENMASK(2, 0)); 422 } 423 424 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 425 { 426 u8 bankid; 427 428 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) 429 return; 430 431 if (hr_qp->qpn < hr_dev->caps.reserved_qps) 432 return; 433 434 bankid = get_qp_bankid(hr_qp->qpn); 435 436 ida_free(&hr_dev->qp_table.bank[bankid].ida, 437 hr_qp->qpn / HNS_ROCE_QP_BANK_NUM); 438 439 mutex_lock(&hr_dev->qp_table.bank_mutex); 440 hr_dev->qp_table.bank[bankid].inuse--; 441 mutex_unlock(&hr_dev->qp_table.bank_mutex); 442 } 443 444 static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp, 445 bool user) 446 { 447 u32 max_sge = dev->caps.max_rq_sg; 448 449 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 450 return max_sge; 451 452 /* Reserve SGEs only for HIP08 in kernel; The userspace driver will 453 * calculate number of max_sge with reserved SGEs when allocating wqe 454 * buf, so there is no need to do this again in kernel. But the number 455 * may exceed the capacity of SGEs recorded in the firmware, so the 456 * kernel driver should just adapt the value accordingly. 457 */ 458 if (user) 459 max_sge = roundup_pow_of_two(max_sge + 1); 460 else 461 hr_qp->rq.rsv_sge = 1; 462 463 return max_sge; 464 } 465 466 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, 467 struct hns_roce_qp *hr_qp, int has_rq, bool user) 468 { 469 u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user); 470 u32 cnt; 471 472 /* If srq exist, set zero for relative number of rq */ 473 if (!has_rq) { 474 hr_qp->rq.wqe_cnt = 0; 475 hr_qp->rq.max_gs = 0; 476 cap->max_recv_wr = 0; 477 cap->max_recv_sge = 0; 478 479 return 0; 480 } 481 482 /* Check the validity of QP support capacity */ 483 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || 484 cap->max_recv_sge > max_sge) { 485 ibdev_err(&hr_dev->ib_dev, 486 "RQ config error, depth = %u, sge = %u\n", 487 cap->max_recv_wr, cap->max_recv_sge); 488 return -EINVAL; 489 } 490 491 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); 492 if (cnt > hr_dev->caps.max_wqes) { 493 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", 494 cap->max_recv_wr); 495 return -EINVAL; 496 } 497 498 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + 499 hr_qp->rq.rsv_sge); 500 501 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * 502 hr_qp->rq.max_gs); 503 504 hr_qp->rq.wqe_cnt = cnt; 505 506 cap->max_recv_wr = cnt; 507 cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; 508 509 return 0; 510 } 511 512 static u32 get_max_inline_data(struct hns_roce_dev *hr_dev, 513 struct ib_qp_cap *cap) 514 { 515 if (cap->max_inline_data) { 516 cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data); 517 return min(cap->max_inline_data, 518 hr_dev->caps.max_sq_inline); 519 } 520 521 return 0; 522 } 523 524 static void update_inline_data(struct hns_roce_qp *hr_qp, 525 struct ib_qp_cap *cap) 526 { 527 u32 sge_num = hr_qp->sq.ext_sge_cnt; 528 529 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { 530 if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI || 531 hr_qp->ibqp.qp_type == IB_QPT_UD)) 532 sge_num = max((u32)HNS_ROCE_SGE_IN_WQE, sge_num); 533 534 cap->max_inline_data = max(cap->max_inline_data, 535 sge_num * HNS_ROCE_SGE_SIZE); 536 } 537 538 hr_qp->max_inline_data = cap->max_inline_data; 539 } 540 541 static u32 get_sge_num_from_max_send_sge(bool is_ud_or_gsi, 542 u32 max_send_sge) 543 { 544 unsigned int std_sge_num; 545 unsigned int min_sge; 546 547 std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE; 548 min_sge = is_ud_or_gsi ? 1 : 0; 549 return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) : 550 min_sge; 551 } 552 553 static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi, 554 u32 max_inline_data) 555 { 556 unsigned int inline_sge; 557 558 if (!max_inline_data) 559 return 0; 560 561 /* 562 * if max_inline_data less than 563 * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE, 564 * In addition to ud's mode, no need to extend sge. 565 */ 566 inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE; 567 if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE) 568 inline_sge = 0; 569 570 return inline_sge; 571 } 572 573 static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, 574 struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap) 575 { 576 bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI || 577 hr_qp->ibqp.qp_type == IB_QPT_UD); 578 unsigned int std_sge_num; 579 u32 inline_ext_sge = 0; 580 u32 ext_wqe_sge_cnt; 581 u32 total_sge_cnt; 582 583 cap->max_inline_data = get_max_inline_data(hr_dev, cap); 584 585 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; 586 std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE; 587 ext_wqe_sge_cnt = get_sge_num_from_max_send_sge(is_ud_or_gsi, 588 cap->max_send_sge); 589 590 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { 591 inline_ext_sge = max(ext_wqe_sge_cnt, 592 get_sge_num_from_max_inl_data(is_ud_or_gsi, 593 cap->max_inline_data)); 594 hr_qp->sq.ext_sge_cnt = inline_ext_sge ? 595 roundup_pow_of_two(inline_ext_sge) : 0; 596 597 hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num)); 598 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); 599 600 ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt; 601 } else { 602 hr_qp->sq.max_gs = max(1U, cap->max_send_sge); 603 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); 604 hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs; 605 } 606 607 /* If the number of extended sge is not zero, they MUST use the 608 * space of HNS_HW_PAGE_SIZE at least. 609 */ 610 if (ext_wqe_sge_cnt) { 611 total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * ext_wqe_sge_cnt); 612 hr_qp->sge.sge_cnt = max(total_sge_cnt, 613 (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE); 614 } 615 616 update_inline_data(hr_qp, cap); 617 } 618 619 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, 620 struct ib_qp_cap *cap, 621 struct hns_roce_ib_create_qp *ucmd) 622 { 623 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); 624 u8 max_sq_stride = ilog2(roundup_sq_stride); 625 626 /* Sanity check SQ size before proceeding */ 627 if (ucmd->log_sq_stride > max_sq_stride || 628 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { 629 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); 630 return -EINVAL; 631 } 632 633 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { 634 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", 635 cap->max_send_sge); 636 return -EINVAL; 637 } 638 639 return 0; 640 } 641 642 static int set_user_sq_size(struct hns_roce_dev *hr_dev, 643 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, 644 struct hns_roce_ib_create_qp *ucmd) 645 { 646 struct ib_device *ibdev = &hr_dev->ib_dev; 647 u32 cnt = 0; 648 int ret; 649 650 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || 651 cnt > hr_dev->caps.max_wqes) 652 return -EINVAL; 653 654 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); 655 if (ret) { 656 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n", 657 ret); 658 return ret; 659 } 660 661 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); 662 663 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; 664 hr_qp->sq.wqe_cnt = cnt; 665 cap->max_send_sge = hr_qp->sq.max_gs; 666 667 return 0; 668 } 669 670 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, 671 struct hns_roce_qp *hr_qp, 672 struct hns_roce_buf_attr *buf_attr) 673 { 674 int buf_size; 675 int idx = 0; 676 677 hr_qp->buff_size = 0; 678 679 /* SQ WQE */ 680 hr_qp->sq.offset = 0; 681 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, 682 hr_qp->sq.wqe_shift); 683 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 684 buf_attr->region[idx].size = buf_size; 685 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; 686 idx++; 687 hr_qp->buff_size += buf_size; 688 } 689 690 /* extend SGE WQE in SQ */ 691 hr_qp->sge.offset = hr_qp->buff_size; 692 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, 693 hr_qp->sge.sge_shift); 694 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 695 buf_attr->region[idx].size = buf_size; 696 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; 697 idx++; 698 hr_qp->buff_size += buf_size; 699 } 700 701 /* RQ WQE */ 702 hr_qp->rq.offset = hr_qp->buff_size; 703 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, 704 hr_qp->rq.wqe_shift); 705 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 706 buf_attr->region[idx].size = buf_size; 707 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; 708 idx++; 709 hr_qp->buff_size += buf_size; 710 } 711 712 if (hr_qp->buff_size < 1) 713 return -EINVAL; 714 715 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; 716 buf_attr->region_count = idx; 717 718 return 0; 719 } 720 721 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, 722 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) 723 { 724 struct ib_device *ibdev = &hr_dev->ib_dev; 725 u32 cnt; 726 727 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || 728 cap->max_send_sge > hr_dev->caps.max_sq_sg) { 729 ibdev_err(ibdev, "failed to check SQ WR or SGE num.\n"); 730 return -EINVAL; 731 } 732 733 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); 734 if (cnt > hr_dev->caps.max_wqes) { 735 ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n", 736 cnt); 737 return -EINVAL; 738 } 739 740 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); 741 hr_qp->sq.wqe_cnt = cnt; 742 743 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); 744 745 /* sync the parameters of kernel QP to user's configuration */ 746 cap->max_send_wr = cnt; 747 cap->max_send_sge = hr_qp->sq.max_gs; 748 749 return 0; 750 } 751 752 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) 753 { 754 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) 755 return 0; 756 757 return 1; 758 } 759 760 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) 761 { 762 if (attr->qp_type == IB_QPT_XRC_INI || 763 attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 764 !attr->cap.max_recv_wr) 765 return 0; 766 767 return 1; 768 } 769 770 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 771 struct ib_qp_init_attr *init_attr, 772 struct ib_udata *udata, unsigned long addr) 773 { 774 struct ib_device *ibdev = &hr_dev->ib_dev; 775 struct hns_roce_buf_attr buf_attr = {}; 776 int ret; 777 778 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); 779 if (ret) { 780 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret); 781 goto err_inline; 782 } 783 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, 784 PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, 785 udata, addr); 786 if (ret) { 787 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); 788 goto err_inline; 789 } 790 791 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) 792 hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; 793 794 return 0; 795 796 err_inline: 797 798 return ret; 799 } 800 801 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 802 { 803 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); 804 } 805 806 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev, 807 struct ib_qp_init_attr *init_attr, 808 struct ib_udata *udata, 809 struct hns_roce_ib_create_qp_resp *resp, 810 struct hns_roce_ib_create_qp *ucmd) 811 { 812 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && 813 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && 814 hns_roce_qp_has_sq(init_attr) && 815 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); 816 } 817 818 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev, 819 struct ib_qp_init_attr *init_attr, 820 struct ib_udata *udata, 821 struct hns_roce_ib_create_qp_resp *resp) 822 { 823 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && 824 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && 825 hns_roce_qp_has_rq(init_attr)); 826 } 827 828 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, 829 struct ib_qp_init_attr *init_attr) 830 { 831 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && 832 hns_roce_qp_has_rq(init_attr)); 833 } 834 835 static int qp_mmap_entry(struct hns_roce_qp *hr_qp, 836 struct hns_roce_dev *hr_dev, 837 struct ib_udata *udata, 838 struct hns_roce_ib_create_qp_resp *resp) 839 { 840 struct hns_roce_ucontext *uctx = 841 rdma_udata_to_drv_context(udata, 842 struct hns_roce_ucontext, ibucontext); 843 struct rdma_user_mmap_entry *rdma_entry; 844 u64 address; 845 846 address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; 847 848 hr_qp->dwqe_mmap_entry = 849 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, 850 HNS_ROCE_DWQE_SIZE, 851 HNS_ROCE_MMAP_TYPE_DWQE); 852 853 if (!hr_qp->dwqe_mmap_entry) { 854 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); 855 return -ENOMEM; 856 } 857 858 rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; 859 resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); 860 861 return 0; 862 } 863 864 static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, 865 struct hns_roce_qp *hr_qp, 866 struct ib_qp_init_attr *init_attr, 867 struct ib_udata *udata, 868 struct hns_roce_ib_create_qp *ucmd, 869 struct hns_roce_ib_create_qp_resp *resp) 870 { 871 bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd); 872 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, 873 struct hns_roce_ucontext, ibucontext); 874 bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp); 875 struct ib_device *ibdev = &hr_dev->ib_dev; 876 int ret; 877 878 if (has_sdb) { 879 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); 880 if (ret) { 881 ibdev_err(ibdev, 882 "failed to map user SQ doorbell, ret = %d.\n", 883 ret); 884 goto err_out; 885 } 886 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; 887 } 888 889 if (has_rdb) { 890 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); 891 if (ret) { 892 ibdev_err(ibdev, 893 "failed to map user RQ doorbell, ret = %d.\n", 894 ret); 895 goto err_sdb; 896 } 897 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 898 } 899 900 return 0; 901 902 err_sdb: 903 if (has_sdb) 904 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 905 err_out: 906 return ret; 907 } 908 909 static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev, 910 struct hns_roce_qp *hr_qp, 911 struct ib_qp_init_attr *init_attr) 912 { 913 struct ib_device *ibdev = &hr_dev->ib_dev; 914 int ret; 915 916 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 917 hr_qp->sq.db_reg = hr_dev->mem_base + 918 HNS_ROCE_DWQE_SIZE * hr_qp->qpn; 919 else 920 hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset + 921 DB_REG_OFFSET * hr_dev->priv_uar.index; 922 923 hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset + 924 DB_REG_OFFSET * hr_dev->priv_uar.index; 925 926 if (kernel_qp_has_rdb(hr_dev, init_attr)) { 927 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); 928 if (ret) { 929 ibdev_err(ibdev, 930 "failed to alloc kernel RQ doorbell, ret = %d.\n", 931 ret); 932 return ret; 933 } 934 *hr_qp->rdb.db_record = 0; 935 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 936 } 937 938 return 0; 939 } 940 941 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 942 struct ib_qp_init_attr *init_attr, 943 struct ib_udata *udata, 944 struct hns_roce_ib_create_qp *ucmd, 945 struct hns_roce_ib_create_qp_resp *resp) 946 { 947 int ret; 948 949 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE) 950 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; 951 952 if (udata) { 953 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { 954 ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); 955 if (ret) 956 return ret; 957 } 958 959 ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, 960 resp); 961 if (ret) 962 goto err_remove_qp; 963 } else { 964 ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); 965 if (ret) 966 return ret; 967 } 968 969 return 0; 970 971 err_remove_qp: 972 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) 973 qp_user_mmap_entry_remove(hr_qp); 974 975 return ret; 976 } 977 978 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 979 struct ib_udata *udata) 980 { 981 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( 982 udata, struct hns_roce_ucontext, ibucontext); 983 984 if (udata) { 985 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 986 hns_roce_db_unmap_user(uctx, &hr_qp->rdb); 987 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) 988 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 989 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) 990 qp_user_mmap_entry_remove(hr_qp); 991 } else { 992 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 993 hns_roce_free_db(hr_dev, &hr_qp->rdb); 994 } 995 } 996 997 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, 998 struct hns_roce_qp *hr_qp) 999 { 1000 struct ib_device *ibdev = &hr_dev->ib_dev; 1001 u64 *sq_wrid = NULL; 1002 u64 *rq_wrid = NULL; 1003 int ret; 1004 1005 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); 1006 if (ZERO_OR_NULL_PTR(sq_wrid)) { 1007 ibdev_err(ibdev, "failed to alloc SQ wrid.\n"); 1008 return -ENOMEM; 1009 } 1010 1011 if (hr_qp->rq.wqe_cnt) { 1012 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); 1013 if (ZERO_OR_NULL_PTR(rq_wrid)) { 1014 ibdev_err(ibdev, "failed to alloc RQ wrid.\n"); 1015 ret = -ENOMEM; 1016 goto err_sq; 1017 } 1018 } 1019 1020 hr_qp->sq.wrid = sq_wrid; 1021 hr_qp->rq.wrid = rq_wrid; 1022 return 0; 1023 err_sq: 1024 kfree(sq_wrid); 1025 1026 return ret; 1027 } 1028 1029 static void free_kernel_wrid(struct hns_roce_qp *hr_qp) 1030 { 1031 kfree(hr_qp->rq.wrid); 1032 kfree(hr_qp->sq.wrid); 1033 } 1034 1035 static void default_congest_type(struct hns_roce_dev *hr_dev, 1036 struct hns_roce_qp *hr_qp) 1037 { 1038 if (hr_qp->ibqp.qp_type == IB_QPT_UD || 1039 hr_qp->ibqp.qp_type == IB_QPT_GSI) 1040 hr_qp->cong_type = CONG_TYPE_DCQCN; 1041 else 1042 hr_qp->cong_type = hr_dev->caps.default_cong_type; 1043 } 1044 1045 static int set_congest_type(struct hns_roce_qp *hr_qp, 1046 struct hns_roce_ib_create_qp *ucmd) 1047 { 1048 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); 1049 1050 switch (ucmd->cong_type_flags) { 1051 case HNS_ROCE_CREATE_QP_FLAGS_DCQCN: 1052 hr_qp->cong_type = CONG_TYPE_DCQCN; 1053 break; 1054 case HNS_ROCE_CREATE_QP_FLAGS_LDCP: 1055 hr_qp->cong_type = CONG_TYPE_LDCP; 1056 break; 1057 case HNS_ROCE_CREATE_QP_FLAGS_HC3: 1058 hr_qp->cong_type = CONG_TYPE_HC3; 1059 break; 1060 case HNS_ROCE_CREATE_QP_FLAGS_DIP: 1061 hr_qp->cong_type = CONG_TYPE_DIP; 1062 break; 1063 default: 1064 return -EINVAL; 1065 } 1066 1067 if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap)) 1068 return -EOPNOTSUPP; 1069 1070 if (hr_qp->ibqp.qp_type == IB_QPT_UD && 1071 hr_qp->cong_type != CONG_TYPE_DCQCN) 1072 return -EOPNOTSUPP; 1073 1074 return 0; 1075 } 1076 1077 static int set_congest_param(struct hns_roce_dev *hr_dev, 1078 struct hns_roce_qp *hr_qp, 1079 struct hns_roce_ib_create_qp *ucmd) 1080 { 1081 if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE) 1082 return set_congest_type(hr_qp, ucmd); 1083 1084 default_congest_type(hr_dev, hr_qp); 1085 1086 return 0; 1087 } 1088 1089 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 1090 struct ib_qp_init_attr *init_attr, 1091 struct ib_udata *udata, 1092 struct hns_roce_ib_create_qp *ucmd) 1093 { 1094 struct ib_device *ibdev = &hr_dev->ib_dev; 1095 struct hns_roce_ucontext *uctx; 1096 int ret; 1097 1098 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 1099 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; 1100 else 1101 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; 1102 1103 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, 1104 hns_roce_qp_has_rq(init_attr), !!udata); 1105 if (ret) { 1106 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n", 1107 ret); 1108 return ret; 1109 } 1110 1111 if (udata) { 1112 ret = ib_copy_from_udata(ucmd, udata, 1113 min(udata->inlen, sizeof(*ucmd))); 1114 if (ret) { 1115 ibdev_err(ibdev, 1116 "failed to copy QP ucmd, ret = %d\n", ret); 1117 return ret; 1118 } 1119 1120 uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext, 1121 ibucontext); 1122 hr_qp->config = uctx->config; 1123 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); 1124 if (ret) { 1125 ibdev_err(ibdev, 1126 "failed to set user SQ size, ret = %d.\n", 1127 ret); 1128 return ret; 1129 } 1130 1131 ret = set_congest_param(hr_dev, hr_qp, ucmd); 1132 } else { 1133 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 1134 hr_qp->config = HNS_ROCE_EXSGE_FLAGS; 1135 default_congest_type(hr_dev, hr_qp); 1136 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); 1137 if (ret) 1138 ibdev_err(ibdev, 1139 "failed to set kernel SQ size, ret = %d.\n", 1140 ret); 1141 } 1142 1143 return ret; 1144 } 1145 1146 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, 1147 struct ib_qp_init_attr *init_attr, 1148 struct ib_udata *udata, 1149 struct hns_roce_qp *hr_qp) 1150 { 1151 struct hns_roce_work *flush_work = &hr_qp->flush_work; 1152 struct hns_roce_ib_create_qp_resp resp = {}; 1153 struct ib_device *ibdev = &hr_dev->ib_dev; 1154 struct hns_roce_ib_create_qp ucmd = {}; 1155 int ret; 1156 1157 mutex_init(&hr_qp->mutex); 1158 spin_lock_init(&hr_qp->sq.lock); 1159 spin_lock_init(&hr_qp->rq.lock); 1160 spin_lock_init(&hr_qp->flush_lock); 1161 1162 hr_qp->state = IB_QPS_RESET; 1163 hr_qp->flush_flag = 0; 1164 flush_work->hr_dev = hr_dev; 1165 INIT_WORK(&flush_work->work, flush_work_handle); 1166 1167 if (init_attr->create_flags) 1168 return -EOPNOTSUPP; 1169 1170 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); 1171 if (ret) { 1172 ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret); 1173 goto err_out; 1174 } 1175 1176 if (!udata) { 1177 ret = alloc_kernel_wrid(hr_dev, hr_qp); 1178 if (ret) { 1179 ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n", 1180 ret); 1181 goto err_out; 1182 } 1183 } 1184 1185 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); 1186 if (ret) { 1187 ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret); 1188 goto err_buf; 1189 } 1190 1191 ret = alloc_qpn(hr_dev, hr_qp, init_attr); 1192 if (ret) { 1193 ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret); 1194 goto err_qpn; 1195 } 1196 1197 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); 1198 if (ret) { 1199 ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n", 1200 ret); 1201 goto err_db; 1202 } 1203 1204 ret = alloc_qpc(hr_dev, hr_qp); 1205 if (ret) { 1206 ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n", 1207 ret); 1208 goto err_qpc; 1209 } 1210 1211 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); 1212 if (ret) { 1213 ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret); 1214 goto err_store; 1215 } 1216 1217 if (udata) { 1218 resp.cap_flags = hr_qp->en_flags; 1219 ret = ib_copy_to_udata(udata, &resp, 1220 min(udata->outlen, sizeof(resp))); 1221 if (ret) { 1222 ibdev_err(ibdev, "copy qp resp failed!\n"); 1223 goto err_flow_ctrl; 1224 } 1225 } 1226 1227 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 1228 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); 1229 if (ret) 1230 goto err_flow_ctrl; 1231 } 1232 1233 hr_qp->ibqp.qp_num = hr_qp->qpn; 1234 hr_qp->event = hns_roce_ib_qp_event; 1235 refcount_set(&hr_qp->refcount, 1); 1236 init_completion(&hr_qp->free); 1237 1238 return 0; 1239 1240 err_flow_ctrl: 1241 hns_roce_qp_remove(hr_dev, hr_qp); 1242 err_store: 1243 free_qpc(hr_dev, hr_qp); 1244 err_qpc: 1245 free_qp_db(hr_dev, hr_qp, udata); 1246 err_db: 1247 free_qpn(hr_dev, hr_qp); 1248 err_qpn: 1249 free_qp_buf(hr_dev, hr_qp); 1250 err_buf: 1251 free_kernel_wrid(hr_qp); 1252 err_out: 1253 mutex_destroy(&hr_qp->mutex); 1254 return ret; 1255 } 1256 1257 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 1258 struct ib_udata *udata) 1259 { 1260 if (refcount_dec_and_test(&hr_qp->refcount)) 1261 complete(&hr_qp->free); 1262 wait_for_completion(&hr_qp->free); 1263 1264 free_qpc(hr_dev, hr_qp); 1265 free_qpn(hr_dev, hr_qp); 1266 free_qp_buf(hr_dev, hr_qp); 1267 free_kernel_wrid(hr_qp); 1268 free_qp_db(hr_dev, hr_qp, udata); 1269 mutex_destroy(&hr_qp->mutex); 1270 } 1271 1272 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, 1273 bool is_user) 1274 { 1275 switch (type) { 1276 case IB_QPT_XRC_INI: 1277 case IB_QPT_XRC_TGT: 1278 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) 1279 goto out; 1280 break; 1281 case IB_QPT_UD: 1282 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && 1283 is_user) 1284 goto out; 1285 break; 1286 case IB_QPT_RC: 1287 case IB_QPT_GSI: 1288 break; 1289 default: 1290 goto out; 1291 } 1292 1293 return 0; 1294 1295 out: 1296 ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type); 1297 1298 return -EOPNOTSUPP; 1299 } 1300 1301 int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, 1302 struct ib_udata *udata) 1303 { 1304 struct ib_device *ibdev = qp->device; 1305 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); 1306 struct hns_roce_qp *hr_qp = to_hr_qp(qp); 1307 int ret; 1308 1309 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); 1310 if (ret) 1311 goto err_out; 1312 1313 if (init_attr->qp_type == IB_QPT_XRC_TGT) 1314 hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn; 1315 1316 if (init_attr->qp_type == IB_QPT_GSI) { 1317 hr_qp->port = init_attr->port_num - 1; 1318 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 1319 } 1320 1321 ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp); 1322 if (ret) 1323 ibdev_err(ibdev, "create QP type %d failed(%d)\n", 1324 init_attr->qp_type, ret); 1325 1326 err_out: 1327 if (ret) 1328 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_CREATE_ERR_CNT]); 1329 1330 return ret; 1331 } 1332 1333 int to_hr_qp_type(int qp_type) 1334 { 1335 switch (qp_type) { 1336 case IB_QPT_RC: 1337 return SERV_TYPE_RC; 1338 case IB_QPT_UD: 1339 case IB_QPT_GSI: 1340 return SERV_TYPE_UD; 1341 case IB_QPT_XRC_INI: 1342 case IB_QPT_XRC_TGT: 1343 return SERV_TYPE_XRC; 1344 default: 1345 return -1; 1346 } 1347 } 1348 1349 static int check_mtu_validate(struct hns_roce_dev *hr_dev, 1350 struct hns_roce_qp *hr_qp, 1351 struct ib_qp_attr *attr, int attr_mask) 1352 { 1353 enum ib_mtu active_mtu; 1354 int p; 1355 1356 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1357 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); 1358 1359 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && 1360 attr->path_mtu > hr_dev->caps.max_mtu) || 1361 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { 1362 ibdev_err(&hr_dev->ib_dev, 1363 "attr path_mtu(%d)invalid while modify qp", 1364 attr->path_mtu); 1365 return -EINVAL; 1366 } 1367 1368 return 0; 1369 } 1370 1371 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1372 int attr_mask) 1373 { 1374 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 1375 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 1376 int p; 1377 1378 if ((attr_mask & IB_QP_PORT) && 1379 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { 1380 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n", 1381 attr->port_num); 1382 return -EINVAL; 1383 } 1384 1385 if (attr_mask & IB_QP_PKEY_INDEX) { 1386 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1387 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { 1388 ibdev_err(&hr_dev->ib_dev, 1389 "invalid attr, pkey_index = %u.\n", 1390 attr->pkey_index); 1391 return -EINVAL; 1392 } 1393 } 1394 1395 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1396 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { 1397 ibdev_err(&hr_dev->ib_dev, 1398 "invalid attr, max_rd_atomic = %u.\n", 1399 attr->max_rd_atomic); 1400 return -EINVAL; 1401 } 1402 1403 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1404 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { 1405 ibdev_err(&hr_dev->ib_dev, 1406 "invalid attr, max_dest_rd_atomic = %u.\n", 1407 attr->max_dest_rd_atomic); 1408 return -EINVAL; 1409 } 1410 1411 if (attr_mask & IB_QP_PATH_MTU) 1412 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); 1413 1414 return 0; 1415 } 1416 1417 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1418 int attr_mask, struct ib_udata *udata) 1419 { 1420 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 1421 struct hns_roce_ib_modify_qp_resp resp = {}; 1422 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 1423 enum ib_qp_state cur_state, new_state; 1424 int ret = -EINVAL; 1425 1426 mutex_lock(&hr_qp->mutex); 1427 1428 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) 1429 goto out; 1430 1431 cur_state = hr_qp->state; 1432 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1433 1434 if (ibqp->uobject && 1435 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { 1436 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) { 1437 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); 1438 1439 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 1440 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); 1441 } else { 1442 ibdev_warn(&hr_dev->ib_dev, 1443 "flush cqe is not supported in userspace!\n"); 1444 goto out; 1445 } 1446 } 1447 1448 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 1449 attr_mask)) { 1450 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); 1451 goto out; 1452 } 1453 1454 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); 1455 if (ret) 1456 goto out; 1457 1458 if (cur_state == new_state && cur_state == IB_QPS_RESET) 1459 goto out; 1460 1461 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, 1462 new_state, udata); 1463 if (ret) 1464 goto out; 1465 1466 if (udata && udata->outlen) { 1467 resp.tc_mode = hr_qp->tc_mode; 1468 resp.priority = hr_qp->sl; 1469 ret = ib_copy_to_udata(udata, &resp, 1470 min(udata->outlen, sizeof(resp))); 1471 if (ret) 1472 ibdev_err_ratelimited(&hr_dev->ib_dev, 1473 "failed to copy modify qp resp.\n"); 1474 } 1475 1476 out: 1477 mutex_unlock(&hr_qp->mutex); 1478 if (ret) 1479 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_MODIFY_ERR_CNT]); 1480 1481 return ret; 1482 } 1483 1484 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) 1485 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1486 { 1487 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1488 __acquire(&send_cq->lock); 1489 __acquire(&recv_cq->lock); 1490 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1491 spin_lock(&send_cq->lock); 1492 __acquire(&recv_cq->lock); 1493 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { 1494 spin_lock(&recv_cq->lock); 1495 __acquire(&send_cq->lock); 1496 } else if (send_cq == recv_cq) { 1497 spin_lock(&send_cq->lock); 1498 __acquire(&recv_cq->lock); 1499 } else if (send_cq->cqn < recv_cq->cqn) { 1500 spin_lock(&send_cq->lock); 1501 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1502 } else { 1503 spin_lock(&recv_cq->lock); 1504 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1505 } 1506 } 1507 1508 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 1509 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) 1510 __releases(&recv_cq->lock) 1511 { 1512 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1513 __release(&recv_cq->lock); 1514 __release(&send_cq->lock); 1515 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1516 __release(&recv_cq->lock); 1517 spin_unlock(&send_cq->lock); 1518 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { 1519 __release(&send_cq->lock); 1520 spin_unlock(&recv_cq->lock); 1521 } else if (send_cq == recv_cq) { 1522 __release(&recv_cq->lock); 1523 spin_unlock(&send_cq->lock); 1524 } else if (send_cq->cqn < recv_cq->cqn) { 1525 spin_unlock(&recv_cq->lock); 1526 spin_unlock(&send_cq->lock); 1527 } else { 1528 spin_unlock(&send_cq->lock); 1529 spin_unlock(&recv_cq->lock); 1530 } 1531 } 1532 1533 static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) 1534 { 1535 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); 1536 } 1537 1538 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n) 1539 { 1540 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); 1541 } 1542 1543 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n) 1544 { 1545 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); 1546 } 1547 1548 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n) 1549 { 1550 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); 1551 } 1552 1553 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, 1554 struct ib_cq *ib_cq) 1555 { 1556 struct hns_roce_cq *hr_cq; 1557 u32 cur; 1558 1559 cur = hr_wq->head - hr_wq->tail; 1560 if (likely(cur + nreq < hr_wq->wqe_cnt)) 1561 return false; 1562 1563 hr_cq = to_hr_cq(ib_cq); 1564 spin_lock(&hr_cq->lock); 1565 cur = hr_wq->head - hr_wq->tail; 1566 spin_unlock(&hr_cq->lock); 1567 1568 return cur + nreq >= hr_wq->wqe_cnt; 1569 } 1570 1571 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) 1572 { 1573 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 1574 unsigned int reserved_from_bot; 1575 unsigned int i; 1576 1577 mutex_init(&qp_table->scc_mutex); 1578 mutex_init(&qp_table->bank_mutex); 1579 xa_init(&hr_dev->qp_table_xa); 1580 xa_init(&qp_table->dip_xa); 1581 1582 reserved_from_bot = hr_dev->caps.reserved_qps; 1583 1584 for (i = 0; i < reserved_from_bot; i++) { 1585 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++; 1586 hr_dev->qp_table.bank[get_qp_bankid(i)].min++; 1587 } 1588 1589 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) { 1590 ida_init(&hr_dev->qp_table.bank[i].ida); 1591 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps / 1592 HNS_ROCE_QP_BANK_NUM - 1; 1593 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min; 1594 } 1595 1596 return 0; 1597 } 1598 1599 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) 1600 { 1601 int i; 1602 1603 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) 1604 ida_destroy(&hr_dev->qp_table.bank[i].ida); 1605 xa_destroy(&hr_dev->qp_table.dip_xa); 1606 xa_destroy(&hr_dev->qp_table_xa); 1607 mutex_destroy(&hr_dev->qp_table.bank_mutex); 1608 mutex_destroy(&hr_dev->qp_table.scc_mutex); 1609 } 1610