129c8d9ebSAdit Ranadive /* 229c8d9ebSAdit Ranadive * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. 329c8d9ebSAdit Ranadive * 429c8d9ebSAdit Ranadive * This program is free software; you can redistribute it and/or 529c8d9ebSAdit Ranadive * modify it under the terms of EITHER the GNU General Public License 629c8d9ebSAdit Ranadive * version 2 as published by the Free Software Foundation or the BSD 729c8d9ebSAdit Ranadive * 2-Clause License. This program is distributed in the hope that it 829c8d9ebSAdit Ranadive * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED 929c8d9ebSAdit Ranadive * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. 1029c8d9ebSAdit Ranadive * See the GNU General Public License version 2 for more details at 1129c8d9ebSAdit Ranadive * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. 1229c8d9ebSAdit Ranadive * 1329c8d9ebSAdit Ranadive * You should have received a copy of the GNU General Public License 1429c8d9ebSAdit Ranadive * along with this program available in the file COPYING in the main 1529c8d9ebSAdit Ranadive * directory of this source tree. 1629c8d9ebSAdit Ranadive * 1729c8d9ebSAdit Ranadive * The BSD 2-Clause License 1829c8d9ebSAdit Ranadive * 1929c8d9ebSAdit Ranadive * Redistribution and use in source and binary forms, with or 2029c8d9ebSAdit Ranadive * without modification, are permitted provided that the following 2129c8d9ebSAdit Ranadive * conditions are met: 2229c8d9ebSAdit Ranadive * 2329c8d9ebSAdit Ranadive * - Redistributions of source code must retain the above 2429c8d9ebSAdit Ranadive * copyright notice, this list of conditions and the following 2529c8d9ebSAdit Ranadive * disclaimer. 2629c8d9ebSAdit Ranadive * 2729c8d9ebSAdit Ranadive * - Redistributions in binary form must reproduce the above 2829c8d9ebSAdit Ranadive * copyright notice, this list of conditions and the following 2929c8d9ebSAdit Ranadive * disclaimer in the documentation and/or other materials 3029c8d9ebSAdit Ranadive * provided with the distribution. 3129c8d9ebSAdit Ranadive * 3229c8d9ebSAdit Ranadive * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 3329c8d9ebSAdit Ranadive * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 3429c8d9ebSAdit Ranadive * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 3529c8d9ebSAdit Ranadive * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 3629c8d9ebSAdit Ranadive * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 3729c8d9ebSAdit Ranadive * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 3829c8d9ebSAdit Ranadive * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 3929c8d9ebSAdit Ranadive * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 4029c8d9ebSAdit Ranadive * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 4129c8d9ebSAdit Ranadive * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 4229c8d9ebSAdit Ranadive * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 4329c8d9ebSAdit Ranadive * OF THE POSSIBILITY OF SUCH DAMAGE. 4429c8d9ebSAdit Ranadive */ 4529c8d9ebSAdit Ranadive 4629c8d9ebSAdit Ranadive #include <asm/page.h> 4729c8d9ebSAdit Ranadive #include <linux/io.h> 4829c8d9ebSAdit Ranadive #include <linux/wait.h> 4929c8d9ebSAdit Ranadive #include <rdma/ib_addr.h> 5029c8d9ebSAdit Ranadive #include <rdma/ib_smi.h> 5129c8d9ebSAdit Ranadive #include <rdma/ib_user_verbs.h> 5229c8d9ebSAdit Ranadive 5329c8d9ebSAdit Ranadive #include "pvrdma.h" 5429c8d9ebSAdit Ranadive 55a52dc3a1SBryan Tan static void __pvrdma_destroy_qp(struct pvrdma_dev *dev, 56a52dc3a1SBryan Tan struct pvrdma_qp *qp); 57a52dc3a1SBryan Tan 5829c8d9ebSAdit Ranadive static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, 5929c8d9ebSAdit Ranadive struct pvrdma_cq **recv_cq) 6029c8d9ebSAdit Ranadive { 6129c8d9ebSAdit Ranadive *send_cq = to_vcq(qp->ibqp.send_cq); 6229c8d9ebSAdit Ranadive *recv_cq = to_vcq(qp->ibqp.recv_cq); 6329c8d9ebSAdit Ranadive } 6429c8d9ebSAdit Ranadive 6529c8d9ebSAdit Ranadive static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, 6629c8d9ebSAdit Ranadive unsigned long *scq_flags, 6729c8d9ebSAdit Ranadive unsigned long *rcq_flags) 6829c8d9ebSAdit Ranadive __acquires(scq->cq_lock) __acquires(rcq->cq_lock) 6929c8d9ebSAdit Ranadive { 7029c8d9ebSAdit Ranadive if (scq == rcq) { 7129c8d9ebSAdit Ranadive spin_lock_irqsave(&scq->cq_lock, *scq_flags); 7229c8d9ebSAdit Ranadive __acquire(rcq->cq_lock); 7329c8d9ebSAdit Ranadive } else if (scq->cq_handle < rcq->cq_handle) { 7429c8d9ebSAdit Ranadive spin_lock_irqsave(&scq->cq_lock, *scq_flags); 7529c8d9ebSAdit Ranadive spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags, 7629c8d9ebSAdit Ranadive SINGLE_DEPTH_NESTING); 7729c8d9ebSAdit Ranadive } else { 7829c8d9ebSAdit Ranadive spin_lock_irqsave(&rcq->cq_lock, *rcq_flags); 7929c8d9ebSAdit Ranadive spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags, 8029c8d9ebSAdit Ranadive SINGLE_DEPTH_NESTING); 8129c8d9ebSAdit Ranadive } 8229c8d9ebSAdit Ranadive } 8329c8d9ebSAdit Ranadive 8429c8d9ebSAdit Ranadive static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, 8529c8d9ebSAdit Ranadive unsigned long *scq_flags, 8629c8d9ebSAdit Ranadive unsigned long *rcq_flags) 8729c8d9ebSAdit Ranadive __releases(scq->cq_lock) __releases(rcq->cq_lock) 8829c8d9ebSAdit Ranadive { 8929c8d9ebSAdit Ranadive if (scq == rcq) { 9029c8d9ebSAdit Ranadive __release(rcq->cq_lock); 9129c8d9ebSAdit Ranadive spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); 9229c8d9ebSAdit Ranadive } else if (scq->cq_handle < rcq->cq_handle) { 9329c8d9ebSAdit Ranadive spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags); 9429c8d9ebSAdit Ranadive spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); 9529c8d9ebSAdit Ranadive } else { 9629c8d9ebSAdit Ranadive spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); 9729c8d9ebSAdit Ranadive spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags); 9829c8d9ebSAdit Ranadive } 9929c8d9ebSAdit Ranadive } 10029c8d9ebSAdit Ranadive 10129c8d9ebSAdit Ranadive static void pvrdma_reset_qp(struct pvrdma_qp *qp) 10229c8d9ebSAdit Ranadive { 10329c8d9ebSAdit Ranadive struct pvrdma_cq *scq, *rcq; 10429c8d9ebSAdit Ranadive unsigned long scq_flags, rcq_flags; 10529c8d9ebSAdit Ranadive 10629c8d9ebSAdit Ranadive /* Clean up cqes */ 10729c8d9ebSAdit Ranadive get_cqs(qp, &scq, &rcq); 10829c8d9ebSAdit Ranadive pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags); 10929c8d9ebSAdit Ranadive 11029c8d9ebSAdit Ranadive _pvrdma_flush_cqe(qp, scq); 11129c8d9ebSAdit Ranadive if (scq != rcq) 11229c8d9ebSAdit Ranadive _pvrdma_flush_cqe(qp, rcq); 11329c8d9ebSAdit Ranadive 11429c8d9ebSAdit Ranadive pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); 11529c8d9ebSAdit Ranadive 11629c8d9ebSAdit Ranadive /* 11729c8d9ebSAdit Ranadive * Reset queuepair. The checks are because usermode queuepairs won't 11829c8d9ebSAdit Ranadive * have kernel ringstates. 11929c8d9ebSAdit Ranadive */ 12029c8d9ebSAdit Ranadive if (qp->rq.ring) { 12129c8d9ebSAdit Ranadive atomic_set(&qp->rq.ring->cons_head, 0); 12229c8d9ebSAdit Ranadive atomic_set(&qp->rq.ring->prod_tail, 0); 12329c8d9ebSAdit Ranadive } 12429c8d9ebSAdit Ranadive if (qp->sq.ring) { 12529c8d9ebSAdit Ranadive atomic_set(&qp->sq.ring->cons_head, 0); 12629c8d9ebSAdit Ranadive atomic_set(&qp->sq.ring->prod_tail, 0); 12729c8d9ebSAdit Ranadive } 12829c8d9ebSAdit Ranadive } 12929c8d9ebSAdit Ranadive 13029c8d9ebSAdit Ranadive static int pvrdma_set_rq_size(struct pvrdma_dev *dev, 13129c8d9ebSAdit Ranadive struct ib_qp_cap *req_cap, 13229c8d9ebSAdit Ranadive struct pvrdma_qp *qp) 13329c8d9ebSAdit Ranadive { 13429c8d9ebSAdit Ranadive if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr || 13529c8d9ebSAdit Ranadive req_cap->max_recv_sge > dev->dsr->caps.max_sge) { 13629c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, "recv queue size invalid\n"); 13729c8d9ebSAdit Ranadive return -EINVAL; 13829c8d9ebSAdit Ranadive } 13929c8d9ebSAdit Ranadive 14029c8d9ebSAdit Ranadive qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr)); 14129c8d9ebSAdit Ranadive qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge)); 14229c8d9ebSAdit Ranadive 14329c8d9ebSAdit Ranadive /* Write back */ 14429c8d9ebSAdit Ranadive req_cap->max_recv_wr = qp->rq.wqe_cnt; 14529c8d9ebSAdit Ranadive req_cap->max_recv_sge = qp->rq.max_sg; 14629c8d9ebSAdit Ranadive 14729c8d9ebSAdit Ranadive qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) + 14829c8d9ebSAdit Ranadive sizeof(struct pvrdma_sge) * 14929c8d9ebSAdit Ranadive qp->rq.max_sg); 15029c8d9ebSAdit Ranadive qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) / 15129c8d9ebSAdit Ranadive PAGE_SIZE; 15229c8d9ebSAdit Ranadive 15329c8d9ebSAdit Ranadive return 0; 15429c8d9ebSAdit Ranadive } 15529c8d9ebSAdit Ranadive 15629c8d9ebSAdit Ranadive static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap, 1571dd70ea3SYuval Shaia struct pvrdma_qp *qp) 15829c8d9ebSAdit Ranadive { 15929c8d9ebSAdit Ranadive if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr || 16029c8d9ebSAdit Ranadive req_cap->max_send_sge > dev->dsr->caps.max_sge) { 16129c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, "send queue size invalid\n"); 16229c8d9ebSAdit Ranadive return -EINVAL; 16329c8d9ebSAdit Ranadive } 16429c8d9ebSAdit Ranadive 16529c8d9ebSAdit Ranadive qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr)); 16629c8d9ebSAdit Ranadive qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge)); 16729c8d9ebSAdit Ranadive 16829c8d9ebSAdit Ranadive /* Write back */ 16929c8d9ebSAdit Ranadive req_cap->max_send_wr = qp->sq.wqe_cnt; 17029c8d9ebSAdit Ranadive req_cap->max_send_sge = qp->sq.max_sg; 17129c8d9ebSAdit Ranadive 17229c8d9ebSAdit Ranadive qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) + 17329c8d9ebSAdit Ranadive sizeof(struct pvrdma_sge) * 17429c8d9ebSAdit Ranadive qp->sq.max_sg); 17529c8d9ebSAdit Ranadive /* Note: one extra page for the header. */ 176e51c2fb0SAdit Ranadive qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES + 177e51c2fb0SAdit Ranadive (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / 178e51c2fb0SAdit Ranadive PAGE_SIZE; 17929c8d9ebSAdit Ranadive 18029c8d9ebSAdit Ranadive return 0; 18129c8d9ebSAdit Ranadive } 18229c8d9ebSAdit Ranadive 18329c8d9ebSAdit Ranadive /** 18429c8d9ebSAdit Ranadive * pvrdma_create_qp - create queue pair 18529c8d9ebSAdit Ranadive * @pd: protection domain 18629c8d9ebSAdit Ranadive * @init_attr: queue pair attributes 18729c8d9ebSAdit Ranadive * @udata: user data 18829c8d9ebSAdit Ranadive * 18929c8d9ebSAdit Ranadive * @return: the ib_qp pointer on success, otherwise returns an errno. 19029c8d9ebSAdit Ranadive */ 19129c8d9ebSAdit Ranadive struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, 19229c8d9ebSAdit Ranadive struct ib_qp_init_attr *init_attr, 19329c8d9ebSAdit Ranadive struct ib_udata *udata) 19429c8d9ebSAdit Ranadive { 19529c8d9ebSAdit Ranadive struct pvrdma_qp *qp = NULL; 19629c8d9ebSAdit Ranadive struct pvrdma_dev *dev = to_vdev(pd->device); 19729c8d9ebSAdit Ranadive union pvrdma_cmd_req req; 19829c8d9ebSAdit Ranadive union pvrdma_cmd_resp rsp; 19929c8d9ebSAdit Ranadive struct pvrdma_cmd_create_qp *cmd = &req.create_qp; 20029c8d9ebSAdit Ranadive struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp; 201a52dc3a1SBryan Tan struct pvrdma_cmd_create_qp_resp_v2 *resp_v2 = &rsp.create_qp_resp_v2; 20229c8d9ebSAdit Ranadive struct pvrdma_create_qp ucmd; 203a52dc3a1SBryan Tan struct pvrdma_create_qp_resp qp_resp = {}; 20429c8d9ebSAdit Ranadive unsigned long flags; 20529c8d9ebSAdit Ranadive int ret; 2068b10ba78SBryan Tan bool is_srq = !!init_attr->srq; 20729c8d9ebSAdit Ranadive 20829c8d9ebSAdit Ranadive if (init_attr->create_flags) { 20929c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, 21029c8d9ebSAdit Ranadive "invalid create queuepair flags %#x\n", 21129c8d9ebSAdit Ranadive init_attr->create_flags); 2121f11a761SJason Gunthorpe return ERR_PTR(-EOPNOTSUPP); 21329c8d9ebSAdit Ranadive } 21429c8d9ebSAdit Ranadive 21529c8d9ebSAdit Ranadive if (init_attr->qp_type != IB_QPT_RC && 21629c8d9ebSAdit Ranadive init_attr->qp_type != IB_QPT_UD && 21729c8d9ebSAdit Ranadive init_attr->qp_type != IB_QPT_GSI) { 21829c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n", 21929c8d9ebSAdit Ranadive init_attr->qp_type); 220bb8865f4SKamal Heib return ERR_PTR(-EOPNOTSUPP); 22129c8d9ebSAdit Ranadive } 22229c8d9ebSAdit Ranadive 2238b10ba78SBryan Tan if (is_srq && !dev->dsr->caps.max_srq) { 2248b10ba78SBryan Tan dev_warn(&dev->pdev->dev, 2258b10ba78SBryan Tan "SRQs not supported by device\n"); 2268b10ba78SBryan Tan return ERR_PTR(-EINVAL); 2278b10ba78SBryan Tan } 2288b10ba78SBryan Tan 22929c8d9ebSAdit Ranadive if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp)) 23029c8d9ebSAdit Ranadive return ERR_PTR(-ENOMEM); 23129c8d9ebSAdit Ranadive 23229c8d9ebSAdit Ranadive switch (init_attr->qp_type) { 23329c8d9ebSAdit Ranadive case IB_QPT_GSI: 23429c8d9ebSAdit Ranadive if (init_attr->port_num == 0 || 235b925c555SLeon Romanovsky init_attr->port_num > pd->device->phys_port_cnt) { 23629c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n"); 23729c8d9ebSAdit Ranadive ret = -EINVAL; 23829c8d9ebSAdit Ranadive goto err_qp; 23929c8d9ebSAdit Ranadive } 240df561f66SGustavo A. R. Silva fallthrough; 24129c8d9ebSAdit Ranadive case IB_QPT_RC: 24229c8d9ebSAdit Ranadive case IB_QPT_UD: 24329c8d9ebSAdit Ranadive qp = kzalloc(sizeof(*qp), GFP_KERNEL); 24429c8d9ebSAdit Ranadive if (!qp) { 24529c8d9ebSAdit Ranadive ret = -ENOMEM; 24629c8d9ebSAdit Ranadive goto err_qp; 24729c8d9ebSAdit Ranadive } 24829c8d9ebSAdit Ranadive 24929c8d9ebSAdit Ranadive spin_lock_init(&qp->sq.lock); 25029c8d9ebSAdit Ranadive spin_lock_init(&qp->rq.lock); 25129c8d9ebSAdit Ranadive mutex_init(&qp->mutex); 252a61eb613SBryan Tan refcount_set(&qp->refcnt, 1); 253e3524b26SBryan Tan init_completion(&qp->free); 25429c8d9ebSAdit Ranadive 25529c8d9ebSAdit Ranadive qp->state = IB_QPS_RESET; 256e00b64f7SShamir Rabinovitch qp->is_kernel = !udata; 25729c8d9ebSAdit Ranadive 2585aef7cf2SBryan Tan if (!qp->is_kernel) { 25929c8d9ebSAdit Ranadive dev_dbg(&dev->pdev->dev, 26029c8d9ebSAdit Ranadive "create queuepair from user space\n"); 26129c8d9ebSAdit Ranadive 26229c8d9ebSAdit Ranadive if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { 26329c8d9ebSAdit Ranadive ret = -EFAULT; 26429c8d9ebSAdit Ranadive goto err_qp; 26529c8d9ebSAdit Ranadive } 26629c8d9ebSAdit Ranadive 267a52dc3a1SBryan Tan /* Userspace supports qpn and qp handles? */ 268a52dc3a1SBryan Tan if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION && 269a52dc3a1SBryan Tan udata->outlen < sizeof(qp_resp)) { 270a52dc3a1SBryan Tan dev_warn(&dev->pdev->dev, 271a52dc3a1SBryan Tan "create queuepair not supported\n"); 272a52dc3a1SBryan Tan ret = -EOPNOTSUPP; 273a52dc3a1SBryan Tan goto err_qp; 274a52dc3a1SBryan Tan } 275a52dc3a1SBryan Tan 2768b10ba78SBryan Tan if (!is_srq) { 27729c8d9ebSAdit Ranadive /* set qp->sq.wqe_cnt, shift, buf_size.. */ 278c320e527SMoni Shoua qp->rumem = 279c320e527SMoni Shoua ib_umem_get(pd->device, ucmd.rbuf_addr, 28072b894b0SChristoph Hellwig ucmd.rbuf_size, 0); 28129c8d9ebSAdit Ranadive if (IS_ERR(qp->rumem)) { 28229c8d9ebSAdit Ranadive ret = PTR_ERR(qp->rumem); 28329c8d9ebSAdit Ranadive goto err_qp; 28429c8d9ebSAdit Ranadive } 2858b10ba78SBryan Tan qp->srq = NULL; 2868b10ba78SBryan Tan } else { 2878b10ba78SBryan Tan qp->rumem = NULL; 2888b10ba78SBryan Tan qp->srq = to_vsrq(init_attr->srq); 2898b10ba78SBryan Tan } 29029c8d9ebSAdit Ranadive 291c320e527SMoni Shoua qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr, 29272b894b0SChristoph Hellwig ucmd.sbuf_size, 0); 29329c8d9ebSAdit Ranadive if (IS_ERR(qp->sumem)) { 2948b10ba78SBryan Tan if (!is_srq) 29529c8d9ebSAdit Ranadive ib_umem_release(qp->rumem); 29629c8d9ebSAdit Ranadive ret = PTR_ERR(qp->sumem); 29729c8d9ebSAdit Ranadive goto err_qp; 29829c8d9ebSAdit Ranadive } 29929c8d9ebSAdit Ranadive 30087aebd3fSJason Gunthorpe qp->npages_send = 30187aebd3fSJason Gunthorpe ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE); 3028b10ba78SBryan Tan if (!is_srq) 30387aebd3fSJason Gunthorpe qp->npages_recv = ib_umem_num_dma_blocks( 30487aebd3fSJason Gunthorpe qp->rumem, PAGE_SIZE); 3058b10ba78SBryan Tan else 3068b10ba78SBryan Tan qp->npages_recv = 0; 30729c8d9ebSAdit Ranadive qp->npages = qp->npages_send + qp->npages_recv; 30829c8d9ebSAdit Ranadive } else { 30929c8d9ebSAdit Ranadive ret = pvrdma_set_sq_size(to_vdev(pd->device), 3101dd70ea3SYuval Shaia &init_attr->cap, qp); 31129c8d9ebSAdit Ranadive if (ret) 31229c8d9ebSAdit Ranadive goto err_qp; 31329c8d9ebSAdit Ranadive 31429c8d9ebSAdit Ranadive ret = pvrdma_set_rq_size(to_vdev(pd->device), 31529c8d9ebSAdit Ranadive &init_attr->cap, qp); 31629c8d9ebSAdit Ranadive if (ret) 31729c8d9ebSAdit Ranadive goto err_qp; 31829c8d9ebSAdit Ranadive 31929c8d9ebSAdit Ranadive qp->npages = qp->npages_send + qp->npages_recv; 32029c8d9ebSAdit Ranadive 32129c8d9ebSAdit Ranadive /* Skip header page. */ 322e51c2fb0SAdit Ranadive qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE; 32329c8d9ebSAdit Ranadive 32429c8d9ebSAdit Ranadive /* Recv queue pages are after send pages. */ 32529c8d9ebSAdit Ranadive qp->rq.offset = qp->npages_send * PAGE_SIZE; 32629c8d9ebSAdit Ranadive } 32729c8d9ebSAdit Ranadive 32829c8d9ebSAdit Ranadive if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { 32929c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, 33029c8d9ebSAdit Ranadive "overflow pages in queuepair\n"); 33129c8d9ebSAdit Ranadive ret = -EINVAL; 33229c8d9ebSAdit Ranadive goto err_umem; 33329c8d9ebSAdit Ranadive } 33429c8d9ebSAdit Ranadive 33529c8d9ebSAdit Ranadive ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages, 33629c8d9ebSAdit Ranadive qp->is_kernel); 33729c8d9ebSAdit Ranadive if (ret) { 33829c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, 33929c8d9ebSAdit Ranadive "could not allocate page directory\n"); 34029c8d9ebSAdit Ranadive goto err_umem; 34129c8d9ebSAdit Ranadive } 34229c8d9ebSAdit Ranadive 34329c8d9ebSAdit Ranadive if (!qp->is_kernel) { 34429c8d9ebSAdit Ranadive pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0); 3458b10ba78SBryan Tan if (!is_srq) 3468b10ba78SBryan Tan pvrdma_page_dir_insert_umem(&qp->pdir, 3478b10ba78SBryan Tan qp->rumem, 34829c8d9ebSAdit Ranadive qp->npages_send); 34929c8d9ebSAdit Ranadive } else { 35029c8d9ebSAdit Ranadive /* Ring state is always the first page. */ 35129c8d9ebSAdit Ranadive qp->sq.ring = qp->pdir.pages[0]; 3528b10ba78SBryan Tan qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1]; 35329c8d9ebSAdit Ranadive } 35429c8d9ebSAdit Ranadive break; 35529c8d9ebSAdit Ranadive default: 35629c8d9ebSAdit Ranadive ret = -EINVAL; 35729c8d9ebSAdit Ranadive goto err_qp; 35829c8d9ebSAdit Ranadive } 35929c8d9ebSAdit Ranadive 36029c8d9ebSAdit Ranadive /* Not supported */ 36129c8d9ebSAdit Ranadive init_attr->cap.max_inline_data = 0; 36229c8d9ebSAdit Ranadive 36329c8d9ebSAdit Ranadive memset(cmd, 0, sizeof(*cmd)); 36429c8d9ebSAdit Ranadive cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP; 36529c8d9ebSAdit Ranadive cmd->pd_handle = to_vpd(pd)->pd_handle; 36629c8d9ebSAdit Ranadive cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; 36729c8d9ebSAdit Ranadive cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle; 3688b10ba78SBryan Tan if (is_srq) 3698b10ba78SBryan Tan cmd->srq_handle = to_vsrq(init_attr->srq)->srq_handle; 3708b10ba78SBryan Tan else 3718b10ba78SBryan Tan cmd->srq_handle = 0; 37229c8d9ebSAdit Ranadive cmd->max_send_wr = init_attr->cap.max_send_wr; 37329c8d9ebSAdit Ranadive cmd->max_recv_wr = init_attr->cap.max_recv_wr; 37429c8d9ebSAdit Ranadive cmd->max_send_sge = init_attr->cap.max_send_sge; 37529c8d9ebSAdit Ranadive cmd->max_recv_sge = init_attr->cap.max_recv_sge; 37629c8d9ebSAdit Ranadive cmd->max_inline_data = init_attr->cap.max_inline_data; 37729c8d9ebSAdit Ranadive cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; 37829c8d9ebSAdit Ranadive cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); 3798b10ba78SBryan Tan cmd->is_srq = is_srq; 3808b10ba78SBryan Tan cmd->lkey = 0; 38129c8d9ebSAdit Ranadive cmd->access_flags = IB_ACCESS_LOCAL_WRITE; 38229c8d9ebSAdit Ranadive cmd->total_chunks = qp->npages; 383e51c2fb0SAdit Ranadive cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES; 38429c8d9ebSAdit Ranadive cmd->pdir_dma = qp->pdir.dir_dma; 38529c8d9ebSAdit Ranadive 38629c8d9ebSAdit Ranadive dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", 38729c8d9ebSAdit Ranadive cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge, 38829c8d9ebSAdit Ranadive cmd->max_recv_sge); 38929c8d9ebSAdit Ranadive 39029c8d9ebSAdit Ranadive ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP); 39129c8d9ebSAdit Ranadive if (ret < 0) { 39229c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, 39329c8d9ebSAdit Ranadive "could not create queuepair, error: %d\n", ret); 39429c8d9ebSAdit Ranadive goto err_pdir; 39529c8d9ebSAdit Ranadive } 39629c8d9ebSAdit Ranadive 39729c8d9ebSAdit Ranadive /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */ 39829c8d9ebSAdit Ranadive qp->port = init_attr->port_num; 399a52dc3a1SBryan Tan 400a52dc3a1SBryan Tan if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION) { 401a52dc3a1SBryan Tan qp->ibqp.qp_num = resp_v2->qpn; 402a52dc3a1SBryan Tan qp->qp_handle = resp_v2->qp_handle; 403a52dc3a1SBryan Tan } else { 40429c8d9ebSAdit Ranadive qp->ibqp.qp_num = resp->qpn; 405a52dc3a1SBryan Tan qp->qp_handle = resp->qpn; 406a52dc3a1SBryan Tan } 407a52dc3a1SBryan Tan 40829c8d9ebSAdit Ranadive spin_lock_irqsave(&dev->qp_tbl_lock, flags); 40929c8d9ebSAdit Ranadive dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp; 41029c8d9ebSAdit Ranadive spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); 41129c8d9ebSAdit Ranadive 412a52dc3a1SBryan Tan if (udata) { 413a52dc3a1SBryan Tan qp_resp.qpn = qp->ibqp.qp_num; 414a52dc3a1SBryan Tan qp_resp.qp_handle = qp->qp_handle; 415a52dc3a1SBryan Tan 416a52dc3a1SBryan Tan if (ib_copy_to_udata(udata, &qp_resp, 417a52dc3a1SBryan Tan min(udata->outlen, sizeof(qp_resp)))) { 418a52dc3a1SBryan Tan dev_warn(&dev->pdev->dev, 419a52dc3a1SBryan Tan "failed to copy back udata\n"); 420a52dc3a1SBryan Tan __pvrdma_destroy_qp(dev, qp); 421a52dc3a1SBryan Tan return ERR_PTR(-EINVAL); 422a52dc3a1SBryan Tan } 423a52dc3a1SBryan Tan } 424a52dc3a1SBryan Tan 42529c8d9ebSAdit Ranadive return &qp->ibqp; 42629c8d9ebSAdit Ranadive 42729c8d9ebSAdit Ranadive err_pdir: 42829c8d9ebSAdit Ranadive pvrdma_page_dir_cleanup(dev, &qp->pdir); 42929c8d9ebSAdit Ranadive err_umem: 43029c8d9ebSAdit Ranadive ib_umem_release(qp->rumem); 43129c8d9ebSAdit Ranadive ib_umem_release(qp->sumem); 43229c8d9ebSAdit Ranadive err_qp: 43329c8d9ebSAdit Ranadive kfree(qp); 43429c8d9ebSAdit Ranadive atomic_dec(&dev->num_qps); 43529c8d9ebSAdit Ranadive 43629c8d9ebSAdit Ranadive return ERR_PTR(ret); 43729c8d9ebSAdit Ranadive } 43829c8d9ebSAdit Ranadive 439a52dc3a1SBryan Tan static void _pvrdma_free_qp(struct pvrdma_qp *qp) 44029c8d9ebSAdit Ranadive { 441a52dc3a1SBryan Tan unsigned long flags; 44229c8d9ebSAdit Ranadive struct pvrdma_dev *dev = to_vdev(qp->ibqp.device); 44329c8d9ebSAdit Ranadive 44429c8d9ebSAdit Ranadive spin_lock_irqsave(&dev->qp_tbl_lock, flags); 44529c8d9ebSAdit Ranadive dev->qp_tbl[qp->qp_handle] = NULL; 44629c8d9ebSAdit Ranadive spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); 44729c8d9ebSAdit Ranadive 448a61eb613SBryan Tan if (refcount_dec_and_test(&qp->refcnt)) 449e3524b26SBryan Tan complete(&qp->free); 450e3524b26SBryan Tan wait_for_completion(&qp->free); 45129c8d9ebSAdit Ranadive 45217748056SBryan Tan ib_umem_release(qp->rumem); 45317748056SBryan Tan ib_umem_release(qp->sumem); 45417748056SBryan Tan 45529c8d9ebSAdit Ranadive pvrdma_page_dir_cleanup(dev, &qp->pdir); 45629c8d9ebSAdit Ranadive 45729c8d9ebSAdit Ranadive kfree(qp); 45829c8d9ebSAdit Ranadive 45929c8d9ebSAdit Ranadive atomic_dec(&dev->num_qps); 46029c8d9ebSAdit Ranadive } 46129c8d9ebSAdit Ranadive 462a52dc3a1SBryan Tan static void pvrdma_free_qp(struct pvrdma_qp *qp) 46329c8d9ebSAdit Ranadive { 464a52dc3a1SBryan Tan struct pvrdma_cq *scq; 465a52dc3a1SBryan Tan struct pvrdma_cq *rcq; 466a52dc3a1SBryan Tan unsigned long scq_flags, rcq_flags; 467a52dc3a1SBryan Tan 468a52dc3a1SBryan Tan /* In case cq is polling */ 469a52dc3a1SBryan Tan get_cqs(qp, &scq, &rcq); 470a52dc3a1SBryan Tan pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags); 471a52dc3a1SBryan Tan 472a52dc3a1SBryan Tan _pvrdma_flush_cqe(qp, scq); 473a52dc3a1SBryan Tan if (scq != rcq) 474a52dc3a1SBryan Tan _pvrdma_flush_cqe(qp, rcq); 475a52dc3a1SBryan Tan 476a52dc3a1SBryan Tan /* 477a52dc3a1SBryan Tan * We're now unlocking the CQs before clearing out the qp handle this 478a52dc3a1SBryan Tan * should still be safe. We have destroyed the backend QP and flushed 479a52dc3a1SBryan Tan * the CQEs so there should be no other completions for this QP. 480a52dc3a1SBryan Tan */ 481a52dc3a1SBryan Tan pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); 482a52dc3a1SBryan Tan 483a52dc3a1SBryan Tan _pvrdma_free_qp(qp); 484a52dc3a1SBryan Tan } 485a52dc3a1SBryan Tan 486a52dc3a1SBryan Tan static inline void _pvrdma_destroy_qp_work(struct pvrdma_dev *dev, 487a52dc3a1SBryan Tan u32 qp_handle) 488a52dc3a1SBryan Tan { 48929c8d9ebSAdit Ranadive union pvrdma_cmd_req req; 49029c8d9ebSAdit Ranadive struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp; 49129c8d9ebSAdit Ranadive int ret; 49229c8d9ebSAdit Ranadive 49329c8d9ebSAdit Ranadive memset(cmd, 0, sizeof(*cmd)); 49429c8d9ebSAdit Ranadive cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP; 495a52dc3a1SBryan Tan cmd->qp_handle = qp_handle; 49629c8d9ebSAdit Ranadive 497a52dc3a1SBryan Tan ret = pvrdma_cmd_post(dev, &req, NULL, 0); 49829c8d9ebSAdit Ranadive if (ret < 0) 499a52dc3a1SBryan Tan dev_warn(&dev->pdev->dev, 50029c8d9ebSAdit Ranadive "destroy queuepair failed, error: %d\n", ret); 501a52dc3a1SBryan Tan } 50229c8d9ebSAdit Ranadive 503a52dc3a1SBryan Tan /** 504a52dc3a1SBryan Tan * pvrdma_destroy_qp - destroy a queue pair 505a52dc3a1SBryan Tan * @qp: the queue pair to destroy 506a52dc3a1SBryan Tan * @udata: user data or null for kernel object 507a52dc3a1SBryan Tan * 508a52dc3a1SBryan Tan * @return: always 0. 509a52dc3a1SBryan Tan */ 510a52dc3a1SBryan Tan int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) 511a52dc3a1SBryan Tan { 512a52dc3a1SBryan Tan struct pvrdma_qp *vqp = to_vqp(qp); 513a52dc3a1SBryan Tan 514a52dc3a1SBryan Tan _pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle); 51529c8d9ebSAdit Ranadive pvrdma_free_qp(vqp); 51629c8d9ebSAdit Ranadive 51729c8d9ebSAdit Ranadive return 0; 51829c8d9ebSAdit Ranadive } 51929c8d9ebSAdit Ranadive 520a52dc3a1SBryan Tan static void __pvrdma_destroy_qp(struct pvrdma_dev *dev, 521a52dc3a1SBryan Tan struct pvrdma_qp *qp) 522a52dc3a1SBryan Tan { 523a52dc3a1SBryan Tan _pvrdma_destroy_qp_work(dev, qp->qp_handle); 524a52dc3a1SBryan Tan _pvrdma_free_qp(qp); 525a52dc3a1SBryan Tan } 526a52dc3a1SBryan Tan 52729c8d9ebSAdit Ranadive /** 52829c8d9ebSAdit Ranadive * pvrdma_modify_qp - modify queue pair attributes 52929c8d9ebSAdit Ranadive * @ibqp: the queue pair 53029c8d9ebSAdit Ranadive * @attr: the new queue pair's attributes 53129c8d9ebSAdit Ranadive * @attr_mask: attributes mask 53229c8d9ebSAdit Ranadive * @udata: user data 53329c8d9ebSAdit Ranadive * 53429c8d9ebSAdit Ranadive * @returns 0 on success, otherwise returns an errno. 53529c8d9ebSAdit Ranadive */ 53629c8d9ebSAdit Ranadive int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 53729c8d9ebSAdit Ranadive int attr_mask, struct ib_udata *udata) 53829c8d9ebSAdit Ranadive { 53929c8d9ebSAdit Ranadive struct pvrdma_dev *dev = to_vdev(ibqp->device); 54029c8d9ebSAdit Ranadive struct pvrdma_qp *qp = to_vqp(ibqp); 54129c8d9ebSAdit Ranadive union pvrdma_cmd_req req; 54229c8d9ebSAdit Ranadive union pvrdma_cmd_resp rsp; 54329c8d9ebSAdit Ranadive struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp; 544fbf1795cSLeon Romanovsky enum ib_qp_state cur_state, next_state; 54529c8d9ebSAdit Ranadive int ret; 54629c8d9ebSAdit Ranadive 54726e990baSJason Gunthorpe if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 54826e990baSJason Gunthorpe return -EOPNOTSUPP; 54926e990baSJason Gunthorpe 55029c8d9ebSAdit Ranadive /* Sanity checking. Should need lock here */ 55129c8d9ebSAdit Ranadive mutex_lock(&qp->mutex); 55229c8d9ebSAdit Ranadive cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state : 55329c8d9ebSAdit Ranadive qp->state; 55429c8d9ebSAdit Ranadive next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state; 55529c8d9ebSAdit Ranadive 55629c8d9ebSAdit Ranadive if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type, 557d31131bbSKamal Heib attr_mask)) { 55829c8d9ebSAdit Ranadive ret = -EINVAL; 55929c8d9ebSAdit Ranadive goto out; 56029c8d9ebSAdit Ranadive } 56129c8d9ebSAdit Ranadive 56229c8d9ebSAdit Ranadive if (attr_mask & IB_QP_PORT) { 56329c8d9ebSAdit Ranadive if (attr->port_num == 0 || 56429c8d9ebSAdit Ranadive attr->port_num > ibqp->device->phys_port_cnt) { 56529c8d9ebSAdit Ranadive ret = -EINVAL; 56629c8d9ebSAdit Ranadive goto out; 56729c8d9ebSAdit Ranadive } 56829c8d9ebSAdit Ranadive } 56929c8d9ebSAdit Ranadive 57029c8d9ebSAdit Ranadive if (attr_mask & IB_QP_MIN_RNR_TIMER) { 57129c8d9ebSAdit Ranadive if (attr->min_rnr_timer > 31) { 57229c8d9ebSAdit Ranadive ret = -EINVAL; 57329c8d9ebSAdit Ranadive goto out; 57429c8d9ebSAdit Ranadive } 57529c8d9ebSAdit Ranadive } 57629c8d9ebSAdit Ranadive 57729c8d9ebSAdit Ranadive if (attr_mask & IB_QP_PKEY_INDEX) { 57829c8d9ebSAdit Ranadive if (attr->pkey_index >= dev->dsr->caps.max_pkeys) { 57929c8d9ebSAdit Ranadive ret = -EINVAL; 58029c8d9ebSAdit Ranadive goto out; 58129c8d9ebSAdit Ranadive } 58229c8d9ebSAdit Ranadive } 58329c8d9ebSAdit Ranadive 58429c8d9ebSAdit Ranadive if (attr_mask & IB_QP_QKEY) 58529c8d9ebSAdit Ranadive qp->qkey = attr->qkey; 58629c8d9ebSAdit Ranadive 58729c8d9ebSAdit Ranadive if (cur_state == next_state && cur_state == IB_QPS_RESET) { 58829c8d9ebSAdit Ranadive ret = 0; 58929c8d9ebSAdit Ranadive goto out; 59029c8d9ebSAdit Ranadive } 59129c8d9ebSAdit Ranadive 59229c8d9ebSAdit Ranadive qp->state = next_state; 59329c8d9ebSAdit Ranadive memset(cmd, 0, sizeof(*cmd)); 59429c8d9ebSAdit Ranadive cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP; 59529c8d9ebSAdit Ranadive cmd->qp_handle = qp->qp_handle; 59629c8d9ebSAdit Ranadive cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask); 59729c8d9ebSAdit Ranadive cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state); 59829c8d9ebSAdit Ranadive cmd->attrs.cur_qp_state = 59929c8d9ebSAdit Ranadive ib_qp_state_to_pvrdma(attr->cur_qp_state); 60029c8d9ebSAdit Ranadive cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu); 60129c8d9ebSAdit Ranadive cmd->attrs.path_mig_state = 60229c8d9ebSAdit Ranadive ib_mig_state_to_pvrdma(attr->path_mig_state); 60329c8d9ebSAdit Ranadive cmd->attrs.qkey = attr->qkey; 60429c8d9ebSAdit Ranadive cmd->attrs.rq_psn = attr->rq_psn; 60529c8d9ebSAdit Ranadive cmd->attrs.sq_psn = attr->sq_psn; 60629c8d9ebSAdit Ranadive cmd->attrs.dest_qp_num = attr->dest_qp_num; 60729c8d9ebSAdit Ranadive cmd->attrs.qp_access_flags = 60829c8d9ebSAdit Ranadive ib_access_flags_to_pvrdma(attr->qp_access_flags); 60929c8d9ebSAdit Ranadive cmd->attrs.pkey_index = attr->pkey_index; 61029c8d9ebSAdit Ranadive cmd->attrs.alt_pkey_index = attr->alt_pkey_index; 61129c8d9ebSAdit Ranadive cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify; 61229c8d9ebSAdit Ranadive cmd->attrs.sq_draining = attr->sq_draining; 61329c8d9ebSAdit Ranadive cmd->attrs.max_rd_atomic = attr->max_rd_atomic; 61429c8d9ebSAdit Ranadive cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic; 61529c8d9ebSAdit Ranadive cmd->attrs.min_rnr_timer = attr->min_rnr_timer; 61629c8d9ebSAdit Ranadive cmd->attrs.port_num = attr->port_num; 61729c8d9ebSAdit Ranadive cmd->attrs.timeout = attr->timeout; 61829c8d9ebSAdit Ranadive cmd->attrs.retry_cnt = attr->retry_cnt; 61929c8d9ebSAdit Ranadive cmd->attrs.rnr_retry = attr->rnr_retry; 62029c8d9ebSAdit Ranadive cmd->attrs.alt_port_num = attr->alt_port_num; 62129c8d9ebSAdit Ranadive cmd->attrs.alt_timeout = attr->alt_timeout; 62229c8d9ebSAdit Ranadive ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap); 623f988653aSDasaratharaman Chandramouli rdma_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr); 624f988653aSDasaratharaman Chandramouli rdma_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr); 62529c8d9ebSAdit Ranadive 62629c8d9ebSAdit Ranadive ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP); 62729c8d9ebSAdit Ranadive if (ret < 0) { 62829c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, 62929c8d9ebSAdit Ranadive "could not modify queuepair, error: %d\n", ret); 63029c8d9ebSAdit Ranadive } else if (rsp.hdr.err > 0) { 63129c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, 63229c8d9ebSAdit Ranadive "cannot modify queuepair, error: %d\n", rsp.hdr.err); 63329c8d9ebSAdit Ranadive ret = -EINVAL; 63429c8d9ebSAdit Ranadive } 63529c8d9ebSAdit Ranadive 63629c8d9ebSAdit Ranadive if (ret == 0 && next_state == IB_QPS_RESET) 63729c8d9ebSAdit Ranadive pvrdma_reset_qp(qp); 63829c8d9ebSAdit Ranadive 63929c8d9ebSAdit Ranadive out: 64029c8d9ebSAdit Ranadive mutex_unlock(&qp->mutex); 64129c8d9ebSAdit Ranadive 64229c8d9ebSAdit Ranadive return ret; 64329c8d9ebSAdit Ranadive } 64429c8d9ebSAdit Ranadive 6456332dee8SAdit Ranadive static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n) 64629c8d9ebSAdit Ranadive { 64729c8d9ebSAdit Ranadive return pvrdma_page_dir_get_ptr(&qp->pdir, 64829c8d9ebSAdit Ranadive qp->sq.offset + n * qp->sq.wqe_size); 64929c8d9ebSAdit Ranadive } 65029c8d9ebSAdit Ranadive 6516332dee8SAdit Ranadive static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n) 65229c8d9ebSAdit Ranadive { 65329c8d9ebSAdit Ranadive return pvrdma_page_dir_get_ptr(&qp->pdir, 65429c8d9ebSAdit Ranadive qp->rq.offset + n * qp->rq.wqe_size); 65529c8d9ebSAdit Ranadive } 65629c8d9ebSAdit Ranadive 657f696bf6dSBart Van Assche static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, 658f696bf6dSBart Van Assche const struct ib_reg_wr *wr) 65929c8d9ebSAdit Ranadive { 66029c8d9ebSAdit Ranadive struct pvrdma_user_mr *mr = to_vmr(wr->mr); 66129c8d9ebSAdit Ranadive 66229c8d9ebSAdit Ranadive wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova; 66329c8d9ebSAdit Ranadive wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma; 66429c8d9ebSAdit Ranadive wqe_hdr->wr.fast_reg.page_shift = mr->page_shift; 66529c8d9ebSAdit Ranadive wqe_hdr->wr.fast_reg.page_list_len = mr->npages; 66629c8d9ebSAdit Ranadive wqe_hdr->wr.fast_reg.length = mr->ibmr.length; 66729c8d9ebSAdit Ranadive wqe_hdr->wr.fast_reg.access_flags = wr->access; 66829c8d9ebSAdit Ranadive wqe_hdr->wr.fast_reg.rkey = wr->key; 66929c8d9ebSAdit Ranadive 67029c8d9ebSAdit Ranadive return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages, 67129c8d9ebSAdit Ranadive mr->npages); 67229c8d9ebSAdit Ranadive } 67329c8d9ebSAdit Ranadive 67429c8d9ebSAdit Ranadive /** 67529c8d9ebSAdit Ranadive * pvrdma_post_send - post send work request entries on a QP 67629c8d9ebSAdit Ranadive * @ibqp: the QP 67729c8d9ebSAdit Ranadive * @wr: work request list to post 67829c8d9ebSAdit Ranadive * @bad_wr: the first bad WR returned 67929c8d9ebSAdit Ranadive * 68029c8d9ebSAdit Ranadive * @return: 0 on success, otherwise errno returned. 68129c8d9ebSAdit Ranadive */ 682d34ac5cdSBart Van Assche int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 683d34ac5cdSBart Van Assche const struct ib_send_wr **bad_wr) 68429c8d9ebSAdit Ranadive { 68529c8d9ebSAdit Ranadive struct pvrdma_qp *qp = to_vqp(ibqp); 68629c8d9ebSAdit Ranadive struct pvrdma_dev *dev = to_vdev(ibqp->device); 68729c8d9ebSAdit Ranadive unsigned long flags; 68829c8d9ebSAdit Ranadive struct pvrdma_sq_wqe_hdr *wqe_hdr; 68929c8d9ebSAdit Ranadive struct pvrdma_sge *sge; 6906332dee8SAdit Ranadive int i, ret; 69129c8d9ebSAdit Ranadive 69229c8d9ebSAdit Ranadive /* 69329c8d9ebSAdit Ranadive * In states lower than RTS, we can fail immediately. In other states, 69429c8d9ebSAdit Ranadive * just post and let the device figure it out. 69529c8d9ebSAdit Ranadive */ 69629c8d9ebSAdit Ranadive if (qp->state < IB_QPS_RTS) { 69729c8d9ebSAdit Ranadive *bad_wr = wr; 69829c8d9ebSAdit Ranadive return -EINVAL; 69929c8d9ebSAdit Ranadive } 70029c8d9ebSAdit Ranadive 70129c8d9ebSAdit Ranadive spin_lock_irqsave(&qp->sq.lock, flags); 70229c8d9ebSAdit Ranadive 7036332dee8SAdit Ranadive while (wr) { 7046332dee8SAdit Ranadive unsigned int tail = 0; 70529c8d9ebSAdit Ranadive 70629c8d9ebSAdit Ranadive if (unlikely(!pvrdma_idx_ring_has_space( 70729c8d9ebSAdit Ranadive qp->sq.ring, qp->sq.wqe_cnt, &tail))) { 70829c8d9ebSAdit Ranadive dev_warn_ratelimited(&dev->pdev->dev, 70929c8d9ebSAdit Ranadive "send queue is full\n"); 71029c8d9ebSAdit Ranadive *bad_wr = wr; 71129c8d9ebSAdit Ranadive ret = -ENOMEM; 71229c8d9ebSAdit Ranadive goto out; 71329c8d9ebSAdit Ranadive } 71429c8d9ebSAdit Ranadive 71529c8d9ebSAdit Ranadive if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) { 71629c8d9ebSAdit Ranadive dev_warn_ratelimited(&dev->pdev->dev, 71729c8d9ebSAdit Ranadive "send SGE overflow\n"); 71829c8d9ebSAdit Ranadive *bad_wr = wr; 71929c8d9ebSAdit Ranadive ret = -EINVAL; 72029c8d9ebSAdit Ranadive goto out; 72129c8d9ebSAdit Ranadive } 72229c8d9ebSAdit Ranadive 72329c8d9ebSAdit Ranadive if (unlikely(wr->opcode < 0)) { 72429c8d9ebSAdit Ranadive dev_warn_ratelimited(&dev->pdev->dev, 72529c8d9ebSAdit Ranadive "invalid send opcode\n"); 72629c8d9ebSAdit Ranadive *bad_wr = wr; 72729c8d9ebSAdit Ranadive ret = -EINVAL; 72829c8d9ebSAdit Ranadive goto out; 72929c8d9ebSAdit Ranadive } 73029c8d9ebSAdit Ranadive 73129c8d9ebSAdit Ranadive /* 73229c8d9ebSAdit Ranadive * Only support UD, RC. 73329c8d9ebSAdit Ranadive * Need to check opcode table for thorough checking. 73429c8d9ebSAdit Ranadive * opcode _UD _UC _RC 73529c8d9ebSAdit Ranadive * _SEND x x x 73629c8d9ebSAdit Ranadive * _SEND_WITH_IMM x x x 73729c8d9ebSAdit Ranadive * _RDMA_WRITE x x 73829c8d9ebSAdit Ranadive * _RDMA_WRITE_WITH_IMM x x 73929c8d9ebSAdit Ranadive * _LOCAL_INV x x 74029c8d9ebSAdit Ranadive * _SEND_WITH_INV x x 74129c8d9ebSAdit Ranadive * _RDMA_READ x 74229c8d9ebSAdit Ranadive * _ATOMIC_CMP_AND_SWP x 74329c8d9ebSAdit Ranadive * _ATOMIC_FETCH_AND_ADD x 74429c8d9ebSAdit Ranadive * _MASK_ATOMIC_CMP_AND_SWP x 74529c8d9ebSAdit Ranadive * _MASK_ATOMIC_FETCH_AND_ADD x 74629c8d9ebSAdit Ranadive * _REG_MR x 74729c8d9ebSAdit Ranadive * 74829c8d9ebSAdit Ranadive */ 74929c8d9ebSAdit Ranadive if (qp->ibqp.qp_type != IB_QPT_UD && 75029c8d9ebSAdit Ranadive qp->ibqp.qp_type != IB_QPT_RC && 75129c8d9ebSAdit Ranadive wr->opcode != IB_WR_SEND) { 75229c8d9ebSAdit Ranadive dev_warn_ratelimited(&dev->pdev->dev, 75329c8d9ebSAdit Ranadive "unsupported queuepair type\n"); 75429c8d9ebSAdit Ranadive *bad_wr = wr; 75529c8d9ebSAdit Ranadive ret = -EINVAL; 75629c8d9ebSAdit Ranadive goto out; 75729c8d9ebSAdit Ranadive } else if (qp->ibqp.qp_type == IB_QPT_UD || 75829c8d9ebSAdit Ranadive qp->ibqp.qp_type == IB_QPT_GSI) { 75929c8d9ebSAdit Ranadive if (wr->opcode != IB_WR_SEND && 76029c8d9ebSAdit Ranadive wr->opcode != IB_WR_SEND_WITH_IMM) { 76129c8d9ebSAdit Ranadive dev_warn_ratelimited(&dev->pdev->dev, 76229c8d9ebSAdit Ranadive "invalid send opcode\n"); 76329c8d9ebSAdit Ranadive *bad_wr = wr; 76429c8d9ebSAdit Ranadive ret = -EINVAL; 76529c8d9ebSAdit Ranadive goto out; 76629c8d9ebSAdit Ranadive } 76729c8d9ebSAdit Ranadive } 76829c8d9ebSAdit Ranadive 7696332dee8SAdit Ranadive wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail); 77029c8d9ebSAdit Ranadive memset(wqe_hdr, 0, sizeof(*wqe_hdr)); 77129c8d9ebSAdit Ranadive wqe_hdr->wr_id = wr->wr_id; 77229c8d9ebSAdit Ranadive wqe_hdr->num_sge = wr->num_sge; 77329c8d9ebSAdit Ranadive wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode); 77429c8d9ebSAdit Ranadive wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags); 77529c8d9ebSAdit Ranadive if (wr->opcode == IB_WR_SEND_WITH_IMM || 77629c8d9ebSAdit Ranadive wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 77729c8d9ebSAdit Ranadive wqe_hdr->ex.imm_data = wr->ex.imm_data; 77829c8d9ebSAdit Ranadive 7796325e01bSAdit Ranadive if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { 7806325e01bSAdit Ranadive *bad_wr = wr; 7816325e01bSAdit Ranadive ret = -EINVAL; 7826325e01bSAdit Ranadive goto out; 7836325e01bSAdit Ranadive } 7846325e01bSAdit Ranadive 78529c8d9ebSAdit Ranadive switch (qp->ibqp.qp_type) { 78629c8d9ebSAdit Ranadive case IB_QPT_GSI: 78729c8d9ebSAdit Ranadive case IB_QPT_UD: 78829c8d9ebSAdit Ranadive if (unlikely(!ud_wr(wr)->ah)) { 78929c8d9ebSAdit Ranadive dev_warn_ratelimited(&dev->pdev->dev, 79029c8d9ebSAdit Ranadive "invalid address handle\n"); 79129c8d9ebSAdit Ranadive *bad_wr = wr; 79229c8d9ebSAdit Ranadive ret = -EINVAL; 79329c8d9ebSAdit Ranadive goto out; 79429c8d9ebSAdit Ranadive } 79529c8d9ebSAdit Ranadive 79629c8d9ebSAdit Ranadive /* 79729c8d9ebSAdit Ranadive * Use qkey from qp context if high order bit set, 79829c8d9ebSAdit Ranadive * otherwise from work request. 79929c8d9ebSAdit Ranadive */ 80029c8d9ebSAdit Ranadive wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn; 80129c8d9ebSAdit Ranadive wqe_hdr->wr.ud.remote_qkey = 80229c8d9ebSAdit Ranadive ud_wr(wr)->remote_qkey & 0x80000000 ? 80329c8d9ebSAdit Ranadive qp->qkey : ud_wr(wr)->remote_qkey; 80429c8d9ebSAdit Ranadive wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av; 80529c8d9ebSAdit Ranadive 80629c8d9ebSAdit Ranadive break; 80729c8d9ebSAdit Ranadive case IB_QPT_RC: 80829c8d9ebSAdit Ranadive switch (wr->opcode) { 80929c8d9ebSAdit Ranadive case IB_WR_RDMA_READ: 81029c8d9ebSAdit Ranadive case IB_WR_RDMA_WRITE: 81129c8d9ebSAdit Ranadive case IB_WR_RDMA_WRITE_WITH_IMM: 81229c8d9ebSAdit Ranadive wqe_hdr->wr.rdma.remote_addr = 81329c8d9ebSAdit Ranadive rdma_wr(wr)->remote_addr; 81429c8d9ebSAdit Ranadive wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey; 81529c8d9ebSAdit Ranadive break; 81629c8d9ebSAdit Ranadive case IB_WR_LOCAL_INV: 81729c8d9ebSAdit Ranadive case IB_WR_SEND_WITH_INV: 81829c8d9ebSAdit Ranadive wqe_hdr->ex.invalidate_rkey = 81929c8d9ebSAdit Ranadive wr->ex.invalidate_rkey; 82029c8d9ebSAdit Ranadive break; 82129c8d9ebSAdit Ranadive case IB_WR_ATOMIC_CMP_AND_SWP: 82229c8d9ebSAdit Ranadive case IB_WR_ATOMIC_FETCH_AND_ADD: 82329c8d9ebSAdit Ranadive wqe_hdr->wr.atomic.remote_addr = 82429c8d9ebSAdit Ranadive atomic_wr(wr)->remote_addr; 82529c8d9ebSAdit Ranadive wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey; 82629c8d9ebSAdit Ranadive wqe_hdr->wr.atomic.compare_add = 82729c8d9ebSAdit Ranadive atomic_wr(wr)->compare_add; 82829c8d9ebSAdit Ranadive if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) 82929c8d9ebSAdit Ranadive wqe_hdr->wr.atomic.swap = 83029c8d9ebSAdit Ranadive atomic_wr(wr)->swap; 83129c8d9ebSAdit Ranadive break; 83229c8d9ebSAdit Ranadive case IB_WR_REG_MR: 83329c8d9ebSAdit Ranadive ret = set_reg_seg(wqe_hdr, reg_wr(wr)); 83429c8d9ebSAdit Ranadive if (ret < 0) { 83529c8d9ebSAdit Ranadive dev_warn_ratelimited(&dev->pdev->dev, 83629c8d9ebSAdit Ranadive "Failed to set fast register work request\n"); 83729c8d9ebSAdit Ranadive *bad_wr = wr; 83829c8d9ebSAdit Ranadive goto out; 83929c8d9ebSAdit Ranadive } 84029c8d9ebSAdit Ranadive break; 84129c8d9ebSAdit Ranadive default: 84229c8d9ebSAdit Ranadive break; 84329c8d9ebSAdit Ranadive } 84429c8d9ebSAdit Ranadive 84529c8d9ebSAdit Ranadive break; 84629c8d9ebSAdit Ranadive default: 84729c8d9ebSAdit Ranadive dev_warn_ratelimited(&dev->pdev->dev, 84829c8d9ebSAdit Ranadive "invalid queuepair type\n"); 84929c8d9ebSAdit Ranadive ret = -EINVAL; 85029c8d9ebSAdit Ranadive *bad_wr = wr; 85129c8d9ebSAdit Ranadive goto out; 85229c8d9ebSAdit Ranadive } 85329c8d9ebSAdit Ranadive 85429c8d9ebSAdit Ranadive sge = (struct pvrdma_sge *)(wqe_hdr + 1); 85529c8d9ebSAdit Ranadive for (i = 0; i < wr->num_sge; i++) { 85629c8d9ebSAdit Ranadive /* Need to check wqe_size 0 or max size */ 85729c8d9ebSAdit Ranadive sge->addr = wr->sg_list[i].addr; 85829c8d9ebSAdit Ranadive sge->length = wr->sg_list[i].length; 85929c8d9ebSAdit Ranadive sge->lkey = wr->sg_list[i].lkey; 86029c8d9ebSAdit Ranadive sge++; 86129c8d9ebSAdit Ranadive } 86229c8d9ebSAdit Ranadive 86329c8d9ebSAdit Ranadive /* Make sure wqe is written before index update */ 86429c8d9ebSAdit Ranadive smp_wmb(); 86529c8d9ebSAdit Ranadive 86629c8d9ebSAdit Ranadive /* Update shared sq ring */ 86729c8d9ebSAdit Ranadive pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, 86829c8d9ebSAdit Ranadive qp->sq.wqe_cnt); 8696332dee8SAdit Ranadive 8706332dee8SAdit Ranadive wr = wr->next; 87129c8d9ebSAdit Ranadive } 87229c8d9ebSAdit Ranadive 87329c8d9ebSAdit Ranadive ret = 0; 87429c8d9ebSAdit Ranadive 87529c8d9ebSAdit Ranadive out: 87629c8d9ebSAdit Ranadive spin_unlock_irqrestore(&qp->sq.lock, flags); 87729c8d9ebSAdit Ranadive 87829c8d9ebSAdit Ranadive if (!ret) 87929c8d9ebSAdit Ranadive pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle); 88029c8d9ebSAdit Ranadive 88129c8d9ebSAdit Ranadive return ret; 88229c8d9ebSAdit Ranadive } 88329c8d9ebSAdit Ranadive 88429c8d9ebSAdit Ranadive /** 885*ae360f41SLeon Romanovsky * pvrdma_post_recv - post receive work request entries on a QP 88629c8d9ebSAdit Ranadive * @ibqp: the QP 88729c8d9ebSAdit Ranadive * @wr: the work request list to post 88829c8d9ebSAdit Ranadive * @bad_wr: the first bad WR returned 88929c8d9ebSAdit Ranadive * 89029c8d9ebSAdit Ranadive * @return: 0 on success, otherwise errno returned. 89129c8d9ebSAdit Ranadive */ 892d34ac5cdSBart Van Assche int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 893d34ac5cdSBart Van Assche const struct ib_recv_wr **bad_wr) 89429c8d9ebSAdit Ranadive { 89529c8d9ebSAdit Ranadive struct pvrdma_dev *dev = to_vdev(ibqp->device); 89629c8d9ebSAdit Ranadive unsigned long flags; 89729c8d9ebSAdit Ranadive struct pvrdma_qp *qp = to_vqp(ibqp); 89829c8d9ebSAdit Ranadive struct pvrdma_rq_wqe_hdr *wqe_hdr; 89929c8d9ebSAdit Ranadive struct pvrdma_sge *sge; 90029c8d9ebSAdit Ranadive int ret = 0; 90129c8d9ebSAdit Ranadive int i; 90229c8d9ebSAdit Ranadive 90329c8d9ebSAdit Ranadive /* 90429c8d9ebSAdit Ranadive * In the RESET state, we can fail immediately. For other states, 90529c8d9ebSAdit Ranadive * just post and let the device figure it out. 90629c8d9ebSAdit Ranadive */ 90729c8d9ebSAdit Ranadive if (qp->state == IB_QPS_RESET) { 90829c8d9ebSAdit Ranadive *bad_wr = wr; 90929c8d9ebSAdit Ranadive return -EINVAL; 91029c8d9ebSAdit Ranadive } 91129c8d9ebSAdit Ranadive 9128b10ba78SBryan Tan if (qp->srq) { 9138b10ba78SBryan Tan dev_warn(&dev->pdev->dev, "QP associated with SRQ\n"); 9148b10ba78SBryan Tan *bad_wr = wr; 9158b10ba78SBryan Tan return -EINVAL; 9168b10ba78SBryan Tan } 9178b10ba78SBryan Tan 91829c8d9ebSAdit Ranadive spin_lock_irqsave(&qp->rq.lock, flags); 91929c8d9ebSAdit Ranadive 9206332dee8SAdit Ranadive while (wr) { 9216332dee8SAdit Ranadive unsigned int tail = 0; 92229c8d9ebSAdit Ranadive 92329c8d9ebSAdit Ranadive if (unlikely(wr->num_sge > qp->rq.max_sg || 92429c8d9ebSAdit Ranadive wr->num_sge < 0)) { 92529c8d9ebSAdit Ranadive ret = -EINVAL; 92629c8d9ebSAdit Ranadive *bad_wr = wr; 92729c8d9ebSAdit Ranadive dev_warn_ratelimited(&dev->pdev->dev, 92829c8d9ebSAdit Ranadive "recv SGE overflow\n"); 92929c8d9ebSAdit Ranadive goto out; 93029c8d9ebSAdit Ranadive } 93129c8d9ebSAdit Ranadive 93229c8d9ebSAdit Ranadive if (unlikely(!pvrdma_idx_ring_has_space( 93329c8d9ebSAdit Ranadive qp->rq.ring, qp->rq.wqe_cnt, &tail))) { 93429c8d9ebSAdit Ranadive ret = -ENOMEM; 93529c8d9ebSAdit Ranadive *bad_wr = wr; 93629c8d9ebSAdit Ranadive dev_warn_ratelimited(&dev->pdev->dev, 93729c8d9ebSAdit Ranadive "recv queue full\n"); 93829c8d9ebSAdit Ranadive goto out; 93929c8d9ebSAdit Ranadive } 94029c8d9ebSAdit Ranadive 9416332dee8SAdit Ranadive wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail); 94229c8d9ebSAdit Ranadive wqe_hdr->wr_id = wr->wr_id; 94329c8d9ebSAdit Ranadive wqe_hdr->num_sge = wr->num_sge; 94429c8d9ebSAdit Ranadive wqe_hdr->total_len = 0; 94529c8d9ebSAdit Ranadive 94629c8d9ebSAdit Ranadive sge = (struct pvrdma_sge *)(wqe_hdr + 1); 94729c8d9ebSAdit Ranadive for (i = 0; i < wr->num_sge; i++) { 94829c8d9ebSAdit Ranadive sge->addr = wr->sg_list[i].addr; 94929c8d9ebSAdit Ranadive sge->length = wr->sg_list[i].length; 95029c8d9ebSAdit Ranadive sge->lkey = wr->sg_list[i].lkey; 95129c8d9ebSAdit Ranadive sge++; 95229c8d9ebSAdit Ranadive } 95329c8d9ebSAdit Ranadive 95429c8d9ebSAdit Ranadive /* Make sure wqe is written before index update */ 95529c8d9ebSAdit Ranadive smp_wmb(); 95629c8d9ebSAdit Ranadive 95729c8d9ebSAdit Ranadive /* Update shared rq ring */ 95829c8d9ebSAdit Ranadive pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, 95929c8d9ebSAdit Ranadive qp->rq.wqe_cnt); 9606332dee8SAdit Ranadive 9616332dee8SAdit Ranadive wr = wr->next; 96229c8d9ebSAdit Ranadive } 96329c8d9ebSAdit Ranadive 96429c8d9ebSAdit Ranadive spin_unlock_irqrestore(&qp->rq.lock, flags); 96529c8d9ebSAdit Ranadive 96629c8d9ebSAdit Ranadive pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle); 96729c8d9ebSAdit Ranadive 96829c8d9ebSAdit Ranadive return ret; 96929c8d9ebSAdit Ranadive 97029c8d9ebSAdit Ranadive out: 97129c8d9ebSAdit Ranadive spin_unlock_irqrestore(&qp->rq.lock, flags); 97229c8d9ebSAdit Ranadive 97329c8d9ebSAdit Ranadive return ret; 97429c8d9ebSAdit Ranadive } 97529c8d9ebSAdit Ranadive 97629c8d9ebSAdit Ranadive /** 97729c8d9ebSAdit Ranadive * pvrdma_query_qp - query a queue pair's attributes 97829c8d9ebSAdit Ranadive * @ibqp: the queue pair to query 97929c8d9ebSAdit Ranadive * @attr: the queue pair's attributes 98029c8d9ebSAdit Ranadive * @attr_mask: attributes mask 98129c8d9ebSAdit Ranadive * @init_attr: initial queue pair attributes 98229c8d9ebSAdit Ranadive * 98329c8d9ebSAdit Ranadive * @returns 0 on success, otherwise returns an errno. 98429c8d9ebSAdit Ranadive */ 98529c8d9ebSAdit Ranadive int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 98629c8d9ebSAdit Ranadive int attr_mask, struct ib_qp_init_attr *init_attr) 98729c8d9ebSAdit Ranadive { 98829c8d9ebSAdit Ranadive struct pvrdma_dev *dev = to_vdev(ibqp->device); 98929c8d9ebSAdit Ranadive struct pvrdma_qp *qp = to_vqp(ibqp); 99029c8d9ebSAdit Ranadive union pvrdma_cmd_req req; 99129c8d9ebSAdit Ranadive union pvrdma_cmd_resp rsp; 99229c8d9ebSAdit Ranadive struct pvrdma_cmd_query_qp *cmd = &req.query_qp; 99329c8d9ebSAdit Ranadive struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp; 99429c8d9ebSAdit Ranadive int ret = 0; 99529c8d9ebSAdit Ranadive 99629c8d9ebSAdit Ranadive mutex_lock(&qp->mutex); 99729c8d9ebSAdit Ranadive 99829c8d9ebSAdit Ranadive if (qp->state == IB_QPS_RESET) { 99929c8d9ebSAdit Ranadive attr->qp_state = IB_QPS_RESET; 100029c8d9ebSAdit Ranadive goto out; 100129c8d9ebSAdit Ranadive } 100229c8d9ebSAdit Ranadive 100329c8d9ebSAdit Ranadive memset(cmd, 0, sizeof(*cmd)); 100429c8d9ebSAdit Ranadive cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP; 100529c8d9ebSAdit Ranadive cmd->qp_handle = qp->qp_handle; 100629c8d9ebSAdit Ranadive cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask); 100729c8d9ebSAdit Ranadive 100829c8d9ebSAdit Ranadive ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP); 100929c8d9ebSAdit Ranadive if (ret < 0) { 101029c8d9ebSAdit Ranadive dev_warn(&dev->pdev->dev, 101129c8d9ebSAdit Ranadive "could not query queuepair, error: %d\n", ret); 101229c8d9ebSAdit Ranadive goto out; 101329c8d9ebSAdit Ranadive } 101429c8d9ebSAdit Ranadive 101529c8d9ebSAdit Ranadive attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state); 101629c8d9ebSAdit Ranadive attr->cur_qp_state = 101729c8d9ebSAdit Ranadive pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state); 101829c8d9ebSAdit Ranadive attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu); 101929c8d9ebSAdit Ranadive attr->path_mig_state = 102029c8d9ebSAdit Ranadive pvrdma_mig_state_to_ib(resp->attrs.path_mig_state); 102129c8d9ebSAdit Ranadive attr->qkey = resp->attrs.qkey; 102229c8d9ebSAdit Ranadive attr->rq_psn = resp->attrs.rq_psn; 102329c8d9ebSAdit Ranadive attr->sq_psn = resp->attrs.sq_psn; 102429c8d9ebSAdit Ranadive attr->dest_qp_num = resp->attrs.dest_qp_num; 102529c8d9ebSAdit Ranadive attr->qp_access_flags = 102629c8d9ebSAdit Ranadive pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags); 102729c8d9ebSAdit Ranadive attr->pkey_index = resp->attrs.pkey_index; 102829c8d9ebSAdit Ranadive attr->alt_pkey_index = resp->attrs.alt_pkey_index; 102929c8d9ebSAdit Ranadive attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify; 103029c8d9ebSAdit Ranadive attr->sq_draining = resp->attrs.sq_draining; 103129c8d9ebSAdit Ranadive attr->max_rd_atomic = resp->attrs.max_rd_atomic; 103229c8d9ebSAdit Ranadive attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic; 103329c8d9ebSAdit Ranadive attr->min_rnr_timer = resp->attrs.min_rnr_timer; 103429c8d9ebSAdit Ranadive attr->port_num = resp->attrs.port_num; 103529c8d9ebSAdit Ranadive attr->timeout = resp->attrs.timeout; 103629c8d9ebSAdit Ranadive attr->retry_cnt = resp->attrs.retry_cnt; 103729c8d9ebSAdit Ranadive attr->rnr_retry = resp->attrs.rnr_retry; 103829c8d9ebSAdit Ranadive attr->alt_port_num = resp->attrs.alt_port_num; 103929c8d9ebSAdit Ranadive attr->alt_timeout = resp->attrs.alt_timeout; 104029c8d9ebSAdit Ranadive pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap); 1041f988653aSDasaratharaman Chandramouli pvrdma_ah_attr_to_rdma(&attr->ah_attr, &resp->attrs.ah_attr); 1042f988653aSDasaratharaman Chandramouli pvrdma_ah_attr_to_rdma(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr); 104329c8d9ebSAdit Ranadive 104429c8d9ebSAdit Ranadive qp->state = attr->qp_state; 104529c8d9ebSAdit Ranadive 104629c8d9ebSAdit Ranadive ret = 0; 104729c8d9ebSAdit Ranadive 104829c8d9ebSAdit Ranadive out: 104929c8d9ebSAdit Ranadive attr->cur_qp_state = attr->qp_state; 105029c8d9ebSAdit Ranadive 105129c8d9ebSAdit Ranadive init_attr->event_handler = qp->ibqp.event_handler; 105229c8d9ebSAdit Ranadive init_attr->qp_context = qp->ibqp.qp_context; 105329c8d9ebSAdit Ranadive init_attr->send_cq = qp->ibqp.send_cq; 105429c8d9ebSAdit Ranadive init_attr->recv_cq = qp->ibqp.recv_cq; 105529c8d9ebSAdit Ranadive init_attr->srq = qp->ibqp.srq; 105629c8d9ebSAdit Ranadive init_attr->xrcd = NULL; 105729c8d9ebSAdit Ranadive init_attr->cap = attr->cap; 105829c8d9ebSAdit Ranadive init_attr->sq_sig_type = 0; 105929c8d9ebSAdit Ranadive init_attr->qp_type = qp->ibqp.qp_type; 106029c8d9ebSAdit Ranadive init_attr->create_flags = 0; 106129c8d9ebSAdit Ranadive init_attr->port_num = qp->port; 106229c8d9ebSAdit Ranadive 106329c8d9ebSAdit Ranadive mutex_unlock(&qp->mutex); 106429c8d9ebSAdit Ranadive return ret; 106529c8d9ebSAdit Ranadive } 1066