xref: /linux/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c (revision 4f6b838c378a52ea3ae0b15f12ca8a20849072fa)
171ee6730SDevesh Sharma /* This file is part of the Emulex RoCE Device Driver for
271ee6730SDevesh Sharma  * RoCE (RDMA over Converged Ethernet) adapters.
371ee6730SDevesh Sharma  * Copyright (C) 2012-2015 Emulex. All rights reserved.
471ee6730SDevesh Sharma  * EMULEX and SLI are trademarks of Emulex.
571ee6730SDevesh Sharma  * www.emulex.com
671ee6730SDevesh Sharma  *
771ee6730SDevesh Sharma  * This software is available to you under a choice of one of two licenses.
871ee6730SDevesh Sharma  * You may choose to be licensed under the terms of the GNU General Public
971ee6730SDevesh Sharma  * License (GPL) Version 2, available from the file COPYING in the main
1071ee6730SDevesh Sharma  * directory of this source tree, or the BSD license below:
1171ee6730SDevesh Sharma  *
1271ee6730SDevesh Sharma  * Redistribution and use in source and binary forms, with or without
1371ee6730SDevesh Sharma  * modification, are permitted provided that the following conditions
1471ee6730SDevesh Sharma  * are met:
1571ee6730SDevesh Sharma  *
1671ee6730SDevesh Sharma  * - Redistributions of source code must retain the above copyright notice,
1771ee6730SDevesh Sharma  *   this list of conditions and the following disclaimer.
1871ee6730SDevesh Sharma  *
1971ee6730SDevesh Sharma  * - Redistributions in binary form must reproduce the above copyright
2071ee6730SDevesh Sharma  *   notice, this list of conditions and the following disclaimer in
2171ee6730SDevesh Sharma  *   the documentation and/or other materials provided with the distribution.
2271ee6730SDevesh Sharma  *
2371ee6730SDevesh Sharma  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2471ee6730SDevesh Sharma  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
2571ee6730SDevesh Sharma  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2671ee6730SDevesh Sharma  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
2771ee6730SDevesh Sharma  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2871ee6730SDevesh Sharma  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2971ee6730SDevesh Sharma  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
3071ee6730SDevesh Sharma  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
3171ee6730SDevesh Sharma  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
3271ee6730SDevesh Sharma  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
3371ee6730SDevesh Sharma  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34fe2caefcSParav Pandit  *
35fe2caefcSParav Pandit  * Contact Information:
36fe2caefcSParav Pandit  * linux-drivers@emulex.com
37fe2caefcSParav Pandit  *
38fe2caefcSParav Pandit  * Emulex
39fe2caefcSParav Pandit  * 3333 Susan Street
40fe2caefcSParav Pandit  * Costa Mesa, CA 92626
4171ee6730SDevesh Sharma  */
42fe2caefcSParav Pandit 
43fe2caefcSParav Pandit #include <linux/dma-mapping.h>
44fe2caefcSParav Pandit #include <rdma/ib_verbs.h>
45fe2caefcSParav Pandit #include <rdma/ib_user_verbs.h>
46fe2caefcSParav Pandit #include <rdma/iw_cm.h>
47fe2caefcSParav Pandit #include <rdma/ib_umem.h>
48fe2caefcSParav Pandit #include <rdma/ib_addr.h>
49cc36929eSSomnath Kotur #include <rdma/ib_cache.h>
50ff23dfa1SShamir Rabinovitch #include <rdma/uverbs_ioctl.h>
51fe2caefcSParav Pandit 
52fe2caefcSParav Pandit #include "ocrdma.h"
53fe2caefcSParav Pandit #include "ocrdma_hw.h"
54fe2caefcSParav Pandit #include "ocrdma_verbs.h"
55a7fe7380SLeon Romanovsky #include <rdma/ocrdma-abi.h>
56fe2caefcSParav Pandit 
57fe2caefcSParav Pandit int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
58fe2caefcSParav Pandit {
59b1889407SGal Pressman 	if (index > 0)
60fe2caefcSParav Pandit 		return -EINVAL;
61fe2caefcSParav Pandit 
62fe2caefcSParav Pandit 	*pkey = 0xffff;
63fe2caefcSParav Pandit 	return 0;
64fe2caefcSParav Pandit }
65fe2caefcSParav Pandit 
662528e33eSMatan Barak int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
672528e33eSMatan Barak 			struct ib_udata *uhw)
68fe2caefcSParav Pandit {
69fe2caefcSParav Pandit 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
70fe2caefcSParav Pandit 
712528e33eSMatan Barak 	if (uhw->inlen || uhw->outlen)
722528e33eSMatan Barak 		return -EINVAL;
732528e33eSMatan Barak 
74fe2caefcSParav Pandit 	memset(attr, 0, sizeof *attr);
75fe2caefcSParav Pandit 	memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
76fe2caefcSParav Pandit 	       min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
77fe2caefcSParav Pandit 	ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
78033edd4dSMitesh Ahuja 	attr->max_mr_size = dev->attr.max_mr_size;
79fe2caefcSParav Pandit 	attr->page_size_cap = 0xffff000;
80fe2caefcSParav Pandit 	attr->vendor_id = dev->nic_info.pdev->vendor;
81fe2caefcSParav Pandit 	attr->vendor_part_id = dev->nic_info.pdev->device;
8296c51abeSMitesh Ahuja 	attr->hw_ver = dev->asic_id;
83fe2caefcSParav Pandit 	attr->max_qp = dev->attr.max_qp;
84d3cb6c0bSNaresh Gottumukkala 	attr->max_ah = OCRDMA_MAX_AH;
85fe2caefcSParav Pandit 	attr->max_qp_wr = dev->attr.max_wqe;
86fe2caefcSParav Pandit 
87fe2caefcSParav Pandit 	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
88fe2caefcSParav Pandit 					IB_DEVICE_RC_RNR_NAK_GEN |
89fe2caefcSParav Pandit 					IB_DEVICE_SHUTDOWN_PORT |
90fe2caefcSParav Pandit 					IB_DEVICE_SYS_IMAGE_GUID |
912b51a9b9SNaresh Gottumukkala 					IB_DEVICE_LOCAL_DMA_LKEY |
922b51a9b9SNaresh Gottumukkala 					IB_DEVICE_MEM_MGT_EXTENSIONS;
9333023fb8SSteve Wise 	attr->max_send_sge = dev->attr.max_send_sge;
9433023fb8SSteve Wise 	attr->max_recv_sge = dev->attr.max_recv_sge;
953c199b45SSelvin Xavier 	attr->max_sge_rd = dev->attr.max_rdma_sge;
96fe2caefcSParav Pandit 	attr->max_cq = dev->attr.max_cq;
97fe2caefcSParav Pandit 	attr->max_cqe = dev->attr.max_cqe;
98fe2caefcSParav Pandit 	attr->max_mr = dev->attr.max_mr;
99ac578aefSSelvin Xavier 	attr->max_mw = dev->attr.max_mw;
100fe2caefcSParav Pandit 	attr->max_pd = dev->attr.max_pd;
101fe2caefcSParav Pandit 	attr->atomic_cap = 0;
102fe2caefcSParav Pandit 	attr->max_qp_rd_atom =
103fe2caefcSParav Pandit 	    min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
104fe2caefcSParav Pandit 	attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
1057c33880cSNaresh Gottumukkala 	attr->max_srq = dev->attr.max_srq;
106d1e09ebfSRoland Dreier 	attr->max_srq_sge = dev->attr.max_srq_sge;
107fe2caefcSParav Pandit 	attr->max_srq_wr = dev->attr.max_rqe;
108fe2caefcSParav Pandit 	attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
109d6a488f2SDevesh Sharma 	attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
110fe2caefcSParav Pandit 	attr->max_pkeys = 1;
111fe2caefcSParav Pandit 	return 0;
112fe2caefcSParav Pandit }
113fe2caefcSParav Pandit 
114f24ceba6SNaresh Gottumukkala static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
115*376ceb31SAharon Landau 					    u16 *ib_speed, u8 *ib_width)
116f24ceba6SNaresh Gottumukkala {
117f24ceba6SNaresh Gottumukkala 	int status;
118f24ceba6SNaresh Gottumukkala 	u8 speed;
119f24ceba6SNaresh Gottumukkala 
1203b1ea430SDevesh Sharma 	status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
121f24ceba6SNaresh Gottumukkala 	if (status)
122f24ceba6SNaresh Gottumukkala 		speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
123f24ceba6SNaresh Gottumukkala 
124f24ceba6SNaresh Gottumukkala 	switch (speed) {
125f24ceba6SNaresh Gottumukkala 	case OCRDMA_PHYS_LINK_SPEED_1GBPS:
126f24ceba6SNaresh Gottumukkala 		*ib_speed = IB_SPEED_SDR;
127f24ceba6SNaresh Gottumukkala 		*ib_width = IB_WIDTH_1X;
128f24ceba6SNaresh Gottumukkala 		break;
129f24ceba6SNaresh Gottumukkala 
130f24ceba6SNaresh Gottumukkala 	case OCRDMA_PHYS_LINK_SPEED_10GBPS:
131f24ceba6SNaresh Gottumukkala 		*ib_speed = IB_SPEED_QDR;
132f24ceba6SNaresh Gottumukkala 		*ib_width = IB_WIDTH_1X;
133f24ceba6SNaresh Gottumukkala 		break;
134f24ceba6SNaresh Gottumukkala 
135f24ceba6SNaresh Gottumukkala 	case OCRDMA_PHYS_LINK_SPEED_20GBPS:
136f24ceba6SNaresh Gottumukkala 		*ib_speed = IB_SPEED_DDR;
137f24ceba6SNaresh Gottumukkala 		*ib_width = IB_WIDTH_4X;
138f24ceba6SNaresh Gottumukkala 		break;
139f24ceba6SNaresh Gottumukkala 
140f24ceba6SNaresh Gottumukkala 	case OCRDMA_PHYS_LINK_SPEED_40GBPS:
141f24ceba6SNaresh Gottumukkala 		*ib_speed = IB_SPEED_QDR;
142f24ceba6SNaresh Gottumukkala 		*ib_width = IB_WIDTH_4X;
143f24ceba6SNaresh Gottumukkala 		break;
144f24ceba6SNaresh Gottumukkala 
145f24ceba6SNaresh Gottumukkala 	default:
146f24ceba6SNaresh Gottumukkala 		/* Unsupported */
147f24ceba6SNaresh Gottumukkala 		*ib_speed = IB_SPEED_SDR;
148f24ceba6SNaresh Gottumukkala 		*ib_width = IB_WIDTH_1X;
1492b50176dSJoe Perches 	}
150f24ceba6SNaresh Gottumukkala }
151f24ceba6SNaresh Gottumukkala 
152fe2caefcSParav Pandit int ocrdma_query_port(struct ib_device *ibdev,
153fe2caefcSParav Pandit 		      u8 port, struct ib_port_attr *props)
154fe2caefcSParav Pandit {
155fe2caefcSParav Pandit 	enum ib_port_state port_state;
156fe2caefcSParav Pandit 	struct ocrdma_dev *dev;
157fe2caefcSParav Pandit 	struct net_device *netdev;
158fe2caefcSParav Pandit 
159c4550c63SOr Gerlitz 	/* props being zeroed by the caller, avoid zeroing it here */
160fe2caefcSParav Pandit 	dev = get_ocrdma_dev(ibdev);
161fe2caefcSParav Pandit 	netdev = dev->nic_info.netdev;
162fe2caefcSParav Pandit 	if (netif_running(netdev) && netif_oper_up(netdev)) {
163fe2caefcSParav Pandit 		port_state = IB_PORT_ACTIVE;
16472a7720fSKamal Heib 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
165fe2caefcSParav Pandit 	} else {
166fe2caefcSParav Pandit 		port_state = IB_PORT_DOWN;
16772a7720fSKamal Heib 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
168fe2caefcSParav Pandit 	}
169fe2caefcSParav Pandit 	props->max_mtu = IB_MTU_4096;
170fe2caefcSParav Pandit 	props->active_mtu = iboe_get_mtu(netdev->mtu);
171fe2caefcSParav Pandit 	props->lid = 0;
172fe2caefcSParav Pandit 	props->lmc = 0;
173fe2caefcSParav Pandit 	props->sm_lid = 0;
174fe2caefcSParav Pandit 	props->sm_sl = 0;
175fe2caefcSParav Pandit 	props->state = port_state;
1762f944c0fSJason Gunthorpe 	props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
1772f944c0fSJason Gunthorpe 				IB_PORT_DEVICE_MGMT_SUP |
1782f944c0fSJason Gunthorpe 				IB_PORT_VENDOR_CLASS_SUP;
1792f944c0fSJason Gunthorpe 	props->ip_gids = true;
180fe2caefcSParav Pandit 	props->gid_tbl_len = OCRDMA_MAX_SGID;
181fe2caefcSParav Pandit 	props->pkey_tbl_len = 1;
182fe2caefcSParav Pandit 	props->bad_pkey_cntr = 0;
183fe2caefcSParav Pandit 	props->qkey_viol_cntr = 0;
184f24ceba6SNaresh Gottumukkala 	get_link_speed_and_width(dev, &props->active_speed,
185f24ceba6SNaresh Gottumukkala 				 &props->active_width);
186fe2caefcSParav Pandit 	props->max_msg_sz = 0x80000000;
187fe2caefcSParav Pandit 	props->max_vl_num = 4;
188fe2caefcSParav Pandit 	return 0;
189fe2caefcSParav Pandit }
190fe2caefcSParav Pandit 
191fe2caefcSParav Pandit static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
192fe2caefcSParav Pandit 			   unsigned long len)
193fe2caefcSParav Pandit {
194fe2caefcSParav Pandit 	struct ocrdma_mm *mm;
195fe2caefcSParav Pandit 
196fe2caefcSParav Pandit 	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
197fe2caefcSParav Pandit 	if (mm == NULL)
198fe2caefcSParav Pandit 		return -ENOMEM;
199fe2caefcSParav Pandit 	mm->key.phy_addr = phy_addr;
200fe2caefcSParav Pandit 	mm->key.len = len;
201fe2caefcSParav Pandit 	INIT_LIST_HEAD(&mm->entry);
202fe2caefcSParav Pandit 
203fe2caefcSParav Pandit 	mutex_lock(&uctx->mm_list_lock);
204fe2caefcSParav Pandit 	list_add_tail(&mm->entry, &uctx->mm_head);
205fe2caefcSParav Pandit 	mutex_unlock(&uctx->mm_list_lock);
206fe2caefcSParav Pandit 	return 0;
207fe2caefcSParav Pandit }
208fe2caefcSParav Pandit 
209fe2caefcSParav Pandit static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
210fe2caefcSParav Pandit 			    unsigned long len)
211fe2caefcSParav Pandit {
212fe2caefcSParav Pandit 	struct ocrdma_mm *mm, *tmp;
213fe2caefcSParav Pandit 
214fe2caefcSParav Pandit 	mutex_lock(&uctx->mm_list_lock);
215fe2caefcSParav Pandit 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
21643a6b402SNaresh Gottumukkala 		if (len != mm->key.len && phy_addr != mm->key.phy_addr)
217fe2caefcSParav Pandit 			continue;
218fe2caefcSParav Pandit 
219fe2caefcSParav Pandit 		list_del(&mm->entry);
220fe2caefcSParav Pandit 		kfree(mm);
221fe2caefcSParav Pandit 		break;
222fe2caefcSParav Pandit 	}
223fe2caefcSParav Pandit 	mutex_unlock(&uctx->mm_list_lock);
224fe2caefcSParav Pandit }
225fe2caefcSParav Pandit 
226fe2caefcSParav Pandit static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
227fe2caefcSParav Pandit 			      unsigned long len)
228fe2caefcSParav Pandit {
229fe2caefcSParav Pandit 	bool found = false;
230fe2caefcSParav Pandit 	struct ocrdma_mm *mm;
231fe2caefcSParav Pandit 
232fe2caefcSParav Pandit 	mutex_lock(&uctx->mm_list_lock);
233fe2caefcSParav Pandit 	list_for_each_entry(mm, &uctx->mm_head, entry) {
23443a6b402SNaresh Gottumukkala 		if (len != mm->key.len && phy_addr != mm->key.phy_addr)
235fe2caefcSParav Pandit 			continue;
236fe2caefcSParav Pandit 
237fe2caefcSParav Pandit 		found = true;
238fe2caefcSParav Pandit 		break;
239fe2caefcSParav Pandit 	}
240fe2caefcSParav Pandit 	mutex_unlock(&uctx->mm_list_lock);
241fe2caefcSParav Pandit 	return found;
242fe2caefcSParav Pandit }
243fe2caefcSParav Pandit 
2449ba1377dSMitesh Ahuja 
2459ba1377dSMitesh Ahuja static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
2469ba1377dSMitesh Ahuja {
2479ba1377dSMitesh Ahuja 	u16 pd_bitmap_idx = 0;
2489ba1377dSMitesh Ahuja 	const unsigned long *pd_bitmap;
2499ba1377dSMitesh Ahuja 
2509ba1377dSMitesh Ahuja 	if (dpp_pool) {
2519ba1377dSMitesh Ahuja 		pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
2529ba1377dSMitesh Ahuja 		pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
2539ba1377dSMitesh Ahuja 						    dev->pd_mgr->max_dpp_pd);
2549ba1377dSMitesh Ahuja 		__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
2559ba1377dSMitesh Ahuja 		dev->pd_mgr->pd_dpp_count++;
2569ba1377dSMitesh Ahuja 		if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
2579ba1377dSMitesh Ahuja 			dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
2589ba1377dSMitesh Ahuja 	} else {
2599ba1377dSMitesh Ahuja 		pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
2609ba1377dSMitesh Ahuja 		pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
2619ba1377dSMitesh Ahuja 						    dev->pd_mgr->max_normal_pd);
2629ba1377dSMitesh Ahuja 		__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
2639ba1377dSMitesh Ahuja 		dev->pd_mgr->pd_norm_count++;
2649ba1377dSMitesh Ahuja 		if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
2659ba1377dSMitesh Ahuja 			dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
2669ba1377dSMitesh Ahuja 	}
2679ba1377dSMitesh Ahuja 	return pd_bitmap_idx;
2689ba1377dSMitesh Ahuja }
2699ba1377dSMitesh Ahuja 
2709ba1377dSMitesh Ahuja static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
2719ba1377dSMitesh Ahuja 					bool dpp_pool)
2729ba1377dSMitesh Ahuja {
2739ba1377dSMitesh Ahuja 	u16 pd_count;
2749ba1377dSMitesh Ahuja 	u16 pd_bit_index;
2759ba1377dSMitesh Ahuja 
2769ba1377dSMitesh Ahuja 	pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
2779ba1377dSMitesh Ahuja 			      dev->pd_mgr->pd_norm_count;
2789ba1377dSMitesh Ahuja 	if (pd_count == 0)
2799ba1377dSMitesh Ahuja 		return -EINVAL;
2809ba1377dSMitesh Ahuja 
2819ba1377dSMitesh Ahuja 	if (dpp_pool) {
2829ba1377dSMitesh Ahuja 		pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
2839ba1377dSMitesh Ahuja 		if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
2849ba1377dSMitesh Ahuja 			return -EINVAL;
2859ba1377dSMitesh Ahuja 		} else {
2869ba1377dSMitesh Ahuja 			__clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
2879ba1377dSMitesh Ahuja 			dev->pd_mgr->pd_dpp_count--;
2889ba1377dSMitesh Ahuja 		}
2899ba1377dSMitesh Ahuja 	} else {
2909ba1377dSMitesh Ahuja 		pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
2919ba1377dSMitesh Ahuja 		if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
2929ba1377dSMitesh Ahuja 			return -EINVAL;
2939ba1377dSMitesh Ahuja 		} else {
2949ba1377dSMitesh Ahuja 			__clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
2959ba1377dSMitesh Ahuja 			dev->pd_mgr->pd_norm_count--;
2969ba1377dSMitesh Ahuja 		}
2979ba1377dSMitesh Ahuja 	}
2989ba1377dSMitesh Ahuja 
2999ba1377dSMitesh Ahuja 	return 0;
3009ba1377dSMitesh Ahuja }
3019ba1377dSMitesh Ahuja 
302004d18eaSDan Carpenter static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
3039ba1377dSMitesh Ahuja 				   bool dpp_pool)
3049ba1377dSMitesh Ahuja {
3059ba1377dSMitesh Ahuja 	int status;
3069ba1377dSMitesh Ahuja 
3079ba1377dSMitesh Ahuja 	mutex_lock(&dev->dev_lock);
3089ba1377dSMitesh Ahuja 	status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
3099ba1377dSMitesh Ahuja 	mutex_unlock(&dev->dev_lock);
3109ba1377dSMitesh Ahuja 	return status;
3119ba1377dSMitesh Ahuja }
3129ba1377dSMitesh Ahuja 
3139ba1377dSMitesh Ahuja static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
3149ba1377dSMitesh Ahuja {
3159ba1377dSMitesh Ahuja 	u16 pd_idx = 0;
3169ba1377dSMitesh Ahuja 	int status = 0;
3179ba1377dSMitesh Ahuja 
3189ba1377dSMitesh Ahuja 	mutex_lock(&dev->dev_lock);
3199ba1377dSMitesh Ahuja 	if (pd->dpp_enabled) {
3209ba1377dSMitesh Ahuja 		/* try allocating DPP PD, if not available then normal PD */
3219ba1377dSMitesh Ahuja 		if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
3229ba1377dSMitesh Ahuja 			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
3239ba1377dSMitesh Ahuja 			pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
3249ba1377dSMitesh Ahuja 			pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
3259ba1377dSMitesh Ahuja 		} else if (dev->pd_mgr->pd_norm_count <
3269ba1377dSMitesh Ahuja 			   dev->pd_mgr->max_normal_pd) {
3279ba1377dSMitesh Ahuja 			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
3289ba1377dSMitesh Ahuja 			pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
3299ba1377dSMitesh Ahuja 			pd->dpp_enabled = false;
3309ba1377dSMitesh Ahuja 		} else {
3319ba1377dSMitesh Ahuja 			status = -EINVAL;
3329ba1377dSMitesh Ahuja 		}
3339ba1377dSMitesh Ahuja 	} else {
3349ba1377dSMitesh Ahuja 		if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
3359ba1377dSMitesh Ahuja 			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
3369ba1377dSMitesh Ahuja 			pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
3379ba1377dSMitesh Ahuja 		} else {
3389ba1377dSMitesh Ahuja 			status = -EINVAL;
3399ba1377dSMitesh Ahuja 		}
3409ba1377dSMitesh Ahuja 	}
3419ba1377dSMitesh Ahuja 	mutex_unlock(&dev->dev_lock);
3429ba1377dSMitesh Ahuja 	return status;
3439ba1377dSMitesh Ahuja }
3449ba1377dSMitesh Ahuja 
345ff23dfa1SShamir Rabinovitch /*
346ff23dfa1SShamir Rabinovitch  * NOTE:
347ff23dfa1SShamir Rabinovitch  *
348ff23dfa1SShamir Rabinovitch  * ocrdma_ucontext must be used here because this function is also
349ff23dfa1SShamir Rabinovitch  * called from ocrdma_alloc_ucontext where ib_udata does not have
350ff23dfa1SShamir Rabinovitch  * valid ib_ucontext pointer. ib_uverbs_get_context does not call
351ff23dfa1SShamir Rabinovitch  * uobj_{alloc|get_xxx} helpers which are used to store the
352ff23dfa1SShamir Rabinovitch  * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so
353ff23dfa1SShamir Rabinovitch  * ib_udata does NOT imply valid ib_ucontext here!
354ff23dfa1SShamir Rabinovitch  */
35521a428a0SLeon Romanovsky static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
356cffce990SNaresh Gottumukkala 			    struct ocrdma_ucontext *uctx,
357cffce990SNaresh Gottumukkala 			    struct ib_udata *udata)
358cffce990SNaresh Gottumukkala {
3590ca4c39fSMarkus Elfring 	int status;
360cffce990SNaresh Gottumukkala 
36159582d86SMitesh Ahuja 	if (udata && uctx && dev->attr.max_dpp_pds) {
362cffce990SNaresh Gottumukkala 		pd->dpp_enabled =
36321c3391aSDevesh Sharma 			ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
364cffce990SNaresh Gottumukkala 		pd->num_dpp_qp =
365a53d77a3SDevesh Sharma 			pd->dpp_enabled ? (dev->nic_info.db_page_size /
366a53d77a3SDevesh Sharma 					   dev->attr.wqe_size) : 0;
367cffce990SNaresh Gottumukkala 	}
368cffce990SNaresh Gottumukkala 
36921a428a0SLeon Romanovsky 	if (dev->pd_mgr->pd_prealloc_valid)
37021a428a0SLeon Romanovsky 		return ocrdma_get_pd_num(dev, pd);
3719ba1377dSMitesh Ahuja 
372cffce990SNaresh Gottumukkala retry:
373cffce990SNaresh Gottumukkala 	status = ocrdma_mbx_alloc_pd(dev, pd);
374cffce990SNaresh Gottumukkala 	if (status) {
375cffce990SNaresh Gottumukkala 		if (pd->dpp_enabled) {
376cffce990SNaresh Gottumukkala 			pd->dpp_enabled = false;
377cffce990SNaresh Gottumukkala 			pd->num_dpp_qp = 0;
378cffce990SNaresh Gottumukkala 			goto retry;
379cffce990SNaresh Gottumukkala 		}
38021a428a0SLeon Romanovsky 		return status;
381cffce990SNaresh Gottumukkala 	}
382cffce990SNaresh Gottumukkala 
38321a428a0SLeon Romanovsky 	return 0;
384cffce990SNaresh Gottumukkala }
385cffce990SNaresh Gottumukkala 
386cffce990SNaresh Gottumukkala static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
387cffce990SNaresh Gottumukkala 				 struct ocrdma_pd *pd)
388cffce990SNaresh Gottumukkala {
3898b0c05dcSAndrew F. Davis 	return (uctx->cntxt_pd == pd);
390cffce990SNaresh Gottumukkala }
391cffce990SNaresh Gottumukkala 
39221a428a0SLeon Romanovsky static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
393cffce990SNaresh Gottumukkala 			      struct ocrdma_pd *pd)
394cffce990SNaresh Gottumukkala {
3959ba1377dSMitesh Ahuja 	if (dev->pd_mgr->pd_prealloc_valid)
39621a428a0SLeon Romanovsky 		ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
3979ba1377dSMitesh Ahuja 	else
39821a428a0SLeon Romanovsky 		ocrdma_mbx_dealloc_pd(dev, pd);
399cffce990SNaresh Gottumukkala }
400cffce990SNaresh Gottumukkala 
401cffce990SNaresh Gottumukkala static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
402cffce990SNaresh Gottumukkala 				    struct ocrdma_ucontext *uctx,
403cffce990SNaresh Gottumukkala 				    struct ib_udata *udata)
404cffce990SNaresh Gottumukkala {
40521a428a0SLeon Romanovsky 	struct ib_device *ibdev = &dev->ibdev;
40621a428a0SLeon Romanovsky 	struct ib_pd *pd;
40721a428a0SLeon Romanovsky 	int status;
408cffce990SNaresh Gottumukkala 
40921a428a0SLeon Romanovsky 	pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
41021a428a0SLeon Romanovsky 	if (!pd)
41121a428a0SLeon Romanovsky 		return -ENOMEM;
41221a428a0SLeon Romanovsky 
41321a428a0SLeon Romanovsky 	pd->device  = ibdev;
41421a428a0SLeon Romanovsky 	uctx->cntxt_pd = get_ocrdma_pd(pd);
41521a428a0SLeon Romanovsky 
41621a428a0SLeon Romanovsky 	status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
41721a428a0SLeon Romanovsky 	if (status) {
41821a428a0SLeon Romanovsky 		kfree(uctx->cntxt_pd);
419cffce990SNaresh Gottumukkala 		goto err;
420cffce990SNaresh Gottumukkala 	}
421cffce990SNaresh Gottumukkala 
422cffce990SNaresh Gottumukkala 	uctx->cntxt_pd->uctx = uctx;
423cffce990SNaresh Gottumukkala 	uctx->cntxt_pd->ibpd.device = &dev->ibdev;
424cffce990SNaresh Gottumukkala err:
425cffce990SNaresh Gottumukkala 	return status;
426cffce990SNaresh Gottumukkala }
427cffce990SNaresh Gottumukkala 
428a2a074efSLeon Romanovsky static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
429cffce990SNaresh Gottumukkala {
430cffce990SNaresh Gottumukkala 	struct ocrdma_pd *pd = uctx->cntxt_pd;
431cffce990SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
432cffce990SNaresh Gottumukkala 
4336dab0264SMitesh Ahuja 	if (uctx->pd_in_use) {
4346dab0264SMitesh Ahuja 		pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
4356dab0264SMitesh Ahuja 		       __func__, dev->id, pd->id);
4366dab0264SMitesh Ahuja 	}
43721a428a0SLeon Romanovsky 	kfree(uctx->cntxt_pd);
438cffce990SNaresh Gottumukkala 	uctx->cntxt_pd = NULL;
439a2a074efSLeon Romanovsky 	_ocrdma_dealloc_pd(dev, pd);
440cffce990SNaresh Gottumukkala }
441cffce990SNaresh Gottumukkala 
442cffce990SNaresh Gottumukkala static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
443cffce990SNaresh Gottumukkala {
444cffce990SNaresh Gottumukkala 	struct ocrdma_pd *pd = NULL;
445cffce990SNaresh Gottumukkala 
446cffce990SNaresh Gottumukkala 	mutex_lock(&uctx->mm_list_lock);
447cffce990SNaresh Gottumukkala 	if (!uctx->pd_in_use) {
448cffce990SNaresh Gottumukkala 		uctx->pd_in_use = true;
449cffce990SNaresh Gottumukkala 		pd = uctx->cntxt_pd;
450cffce990SNaresh Gottumukkala 	}
451cffce990SNaresh Gottumukkala 	mutex_unlock(&uctx->mm_list_lock);
452cffce990SNaresh Gottumukkala 
453cffce990SNaresh Gottumukkala 	return pd;
454cffce990SNaresh Gottumukkala }
455cffce990SNaresh Gottumukkala 
456cffce990SNaresh Gottumukkala static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
457cffce990SNaresh Gottumukkala {
458cffce990SNaresh Gottumukkala 	mutex_lock(&uctx->mm_list_lock);
459cffce990SNaresh Gottumukkala 	uctx->pd_in_use = false;
460cffce990SNaresh Gottumukkala 	mutex_unlock(&uctx->mm_list_lock);
461cffce990SNaresh Gottumukkala }
462cffce990SNaresh Gottumukkala 
463a2a074efSLeon Romanovsky int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
464fe2caefcSParav Pandit {
465a2a074efSLeon Romanovsky 	struct ib_device *ibdev = uctx->device;
466fe2caefcSParav Pandit 	int status;
467a2a074efSLeon Romanovsky 	struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
468a2a074efSLeon Romanovsky 	struct ocrdma_alloc_ucontext_resp resp = {};
469fe2caefcSParav Pandit 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
470fe2caefcSParav Pandit 	struct pci_dev *pdev = dev->nic_info.pdev;
471fe2caefcSParav Pandit 	u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
472fe2caefcSParav Pandit 
473fe2caefcSParav Pandit 	if (!udata)
474a2a074efSLeon Romanovsky 		return -EFAULT;
475fe2caefcSParav Pandit 	INIT_LIST_HEAD(&ctx->mm_head);
476fe2caefcSParav Pandit 	mutex_init(&ctx->mm_list_lock);
477fe2caefcSParav Pandit 
478750afb08SLuis Chamberlain 	ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
479fe2caefcSParav Pandit 					    &ctx->ah_tbl.pa, GFP_KERNEL);
480a2a074efSLeon Romanovsky 	if (!ctx->ah_tbl.va)
481a2a074efSLeon Romanovsky 		return -ENOMEM;
482a2a074efSLeon Romanovsky 
483fe2caefcSParav Pandit 	ctx->ah_tbl.len = map_len;
484fe2caefcSParav Pandit 
485fe2caefcSParav Pandit 	resp.ah_tbl_len = ctx->ah_tbl.len;
4861b76d383SDevesh Sharma 	resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
487fe2caefcSParav Pandit 
488fe2caefcSParav Pandit 	status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
489fe2caefcSParav Pandit 	if (status)
490fe2caefcSParav Pandit 		goto map_err;
491cffce990SNaresh Gottumukkala 
492cffce990SNaresh Gottumukkala 	status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
493cffce990SNaresh Gottumukkala 	if (status)
494cffce990SNaresh Gottumukkala 		goto pd_err;
495cffce990SNaresh Gottumukkala 
496fe2caefcSParav Pandit 	resp.dev_id = dev->id;
497fe2caefcSParav Pandit 	resp.max_inline_data = dev->attr.max_inline_data;
498fe2caefcSParav Pandit 	resp.wqe_size = dev->attr.wqe_size;
499fe2caefcSParav Pandit 	resp.rqe_size = dev->attr.rqe_size;
500fe2caefcSParav Pandit 	resp.dpp_wqe_size = dev->attr.wqe_size;
501fe2caefcSParav Pandit 
502fe2caefcSParav Pandit 	memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
503fe2caefcSParav Pandit 	status = ib_copy_to_udata(udata, &resp, sizeof(resp));
504fe2caefcSParav Pandit 	if (status)
505fe2caefcSParav Pandit 		goto cpy_err;
506a2a074efSLeon Romanovsky 	return 0;
507fe2caefcSParav Pandit 
508fe2caefcSParav Pandit cpy_err:
50921a428a0SLeon Romanovsky 	ocrdma_dealloc_ucontext_pd(ctx);
510cffce990SNaresh Gottumukkala pd_err:
511fe2caefcSParav Pandit 	ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
512fe2caefcSParav Pandit map_err:
513fe2caefcSParav Pandit 	dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
514fe2caefcSParav Pandit 			  ctx->ah_tbl.pa);
515a2a074efSLeon Romanovsky 	return status;
516fe2caefcSParav Pandit }
517fe2caefcSParav Pandit 
518a2a074efSLeon Romanovsky void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
519fe2caefcSParav Pandit {
520fe2caefcSParav Pandit 	struct ocrdma_mm *mm, *tmp;
521fe2caefcSParav Pandit 	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
5221afc0454SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
5231afc0454SNaresh Gottumukkala 	struct pci_dev *pdev = dev->nic_info.pdev;
524fe2caefcSParav Pandit 
525a2a074efSLeon Romanovsky 	ocrdma_dealloc_ucontext_pd(uctx);
526cffce990SNaresh Gottumukkala 
527fe2caefcSParav Pandit 	ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
528fe2caefcSParav Pandit 	dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
529fe2caefcSParav Pandit 			  uctx->ah_tbl.pa);
530fe2caefcSParav Pandit 
531fe2caefcSParav Pandit 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
532fe2caefcSParav Pandit 		list_del(&mm->entry);
533fe2caefcSParav Pandit 		kfree(mm);
534fe2caefcSParav Pandit 	}
535fe2caefcSParav Pandit }
536fe2caefcSParav Pandit 
537fe2caefcSParav Pandit int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
538fe2caefcSParav Pandit {
539fe2caefcSParav Pandit 	struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
5401afc0454SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
541fe2caefcSParav Pandit 	unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
542fe2caefcSParav Pandit 	u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
543fe2caefcSParav Pandit 	unsigned long len = (vma->vm_end - vma->vm_start);
5440ca4c39fSMarkus Elfring 	int status;
545fe2caefcSParav Pandit 	bool found;
546fe2caefcSParav Pandit 
547fe2caefcSParav Pandit 	if (vma->vm_start & (PAGE_SIZE - 1))
548fe2caefcSParav Pandit 		return -EINVAL;
549fe2caefcSParav Pandit 	found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
550fe2caefcSParav Pandit 	if (!found)
551fe2caefcSParav Pandit 		return -EINVAL;
552fe2caefcSParav Pandit 
553fe2caefcSParav Pandit 	if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
554fe2caefcSParav Pandit 		dev->nic_info.db_total_size)) &&
555fe2caefcSParav Pandit 		(len <=	dev->nic_info.db_page_size)) {
55643a6b402SNaresh Gottumukkala 		if (vma->vm_flags & VM_READ)
55743a6b402SNaresh Gottumukkala 			return -EPERM;
55843a6b402SNaresh Gottumukkala 
55943a6b402SNaresh Gottumukkala 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
560fe2caefcSParav Pandit 		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
561fe2caefcSParav Pandit 					    len, vma->vm_page_prot);
562fe2caefcSParav Pandit 	} else if (dev->nic_info.dpp_unmapped_len &&
563fe2caefcSParav Pandit 		(vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
564fe2caefcSParav Pandit 		(vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
565fe2caefcSParav Pandit 			dev->nic_info.dpp_unmapped_len)) &&
566fe2caefcSParav Pandit 		(len <= dev->nic_info.dpp_unmapped_len)) {
56743a6b402SNaresh Gottumukkala 		if (vma->vm_flags & VM_READ)
56843a6b402SNaresh Gottumukkala 			return -EPERM;
56943a6b402SNaresh Gottumukkala 
570fe2caefcSParav Pandit 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
571fe2caefcSParav Pandit 		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
572fe2caefcSParav Pandit 					    len, vma->vm_page_prot);
573fe2caefcSParav Pandit 	} else {
574fe2caefcSParav Pandit 		status = remap_pfn_range(vma, vma->vm_start,
575fe2caefcSParav Pandit 					 vma->vm_pgoff, len, vma->vm_page_prot);
576fe2caefcSParav Pandit 	}
577fe2caefcSParav Pandit 	return status;
578fe2caefcSParav Pandit }
579fe2caefcSParav Pandit 
58045e86b33SNaresh Gottumukkala static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
581fe2caefcSParav Pandit 				struct ib_udata *udata)
582fe2caefcSParav Pandit {
583fe2caefcSParav Pandit 	int status;
584fe2caefcSParav Pandit 	u64 db_page_addr;
585da496438SRoland Dreier 	u64 dpp_page_addr = 0;
586fe2caefcSParav Pandit 	u32 db_page_size;
587fe2caefcSParav Pandit 	struct ocrdma_alloc_pd_uresp rsp;
588ff23dfa1SShamir Rabinovitch 	struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
589ff23dfa1SShamir Rabinovitch 		udata, struct ocrdma_ucontext, ibucontext);
590fe2caefcSParav Pandit 
59163ea3749SDan Carpenter 	memset(&rsp, 0, sizeof(rsp));
592fe2caefcSParav Pandit 	rsp.id = pd->id;
593fe2caefcSParav Pandit 	rsp.dpp_enabled = pd->dpp_enabled;
594cffce990SNaresh Gottumukkala 	db_page_addr = ocrdma_get_db_addr(dev, pd->id);
595f99b1649SNaresh Gottumukkala 	db_page_size = dev->nic_info.db_page_size;
596fe2caefcSParav Pandit 
597fe2caefcSParav Pandit 	status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
598fe2caefcSParav Pandit 	if (status)
599fe2caefcSParav Pandit 		return status;
600fe2caefcSParav Pandit 
601fe2caefcSParav Pandit 	if (pd->dpp_enabled) {
602f99b1649SNaresh Gottumukkala 		dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
60343a6b402SNaresh Gottumukkala 				(pd->id * PAGE_SIZE);
604fe2caefcSParav Pandit 		status = ocrdma_add_mmap(uctx, dpp_page_addr,
60543a6b402SNaresh Gottumukkala 				 PAGE_SIZE);
606fe2caefcSParav Pandit 		if (status)
607fe2caefcSParav Pandit 			goto dpp_map_err;
608fe2caefcSParav Pandit 		rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
609fe2caefcSParav Pandit 		rsp.dpp_page_addr_lo = dpp_page_addr;
610fe2caefcSParav Pandit 	}
611fe2caefcSParav Pandit 
612fe2caefcSParav Pandit 	status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
613fe2caefcSParav Pandit 	if (status)
614fe2caefcSParav Pandit 		goto ucopy_err;
615fe2caefcSParav Pandit 
616fe2caefcSParav Pandit 	pd->uctx = uctx;
617fe2caefcSParav Pandit 	return 0;
618fe2caefcSParav Pandit 
619fe2caefcSParav Pandit ucopy_err:
620da496438SRoland Dreier 	if (pd->dpp_enabled)
62143a6b402SNaresh Gottumukkala 		ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
622fe2caefcSParav Pandit dpp_map_err:
623fe2caefcSParav Pandit 	ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
624fe2caefcSParav Pandit 	return status;
625fe2caefcSParav Pandit }
626fe2caefcSParav Pandit 
627ff23dfa1SShamir Rabinovitch int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
628fe2caefcSParav Pandit {
62921a428a0SLeon Romanovsky 	struct ib_device *ibdev = ibpd->device;
630fe2caefcSParav Pandit 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
631fe2caefcSParav Pandit 	struct ocrdma_pd *pd;
632fe2caefcSParav Pandit 	int status;
633cffce990SNaresh Gottumukkala 	u8 is_uctx_pd = false;
634ff23dfa1SShamir Rabinovitch 	struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
635ff23dfa1SShamir Rabinovitch 		udata, struct ocrdma_ucontext, ibucontext);
636fe2caefcSParav Pandit 
637ff23dfa1SShamir Rabinovitch 	if (udata) {
638cffce990SNaresh Gottumukkala 		pd = ocrdma_get_ucontext_pd(uctx);
639cffce990SNaresh Gottumukkala 		if (pd) {
640cffce990SNaresh Gottumukkala 			is_uctx_pd = true;
641cffce990SNaresh Gottumukkala 			goto pd_mapping;
642fe2caefcSParav Pandit 		}
64343a6b402SNaresh Gottumukkala 	}
644fe2caefcSParav Pandit 
64521a428a0SLeon Romanovsky 	pd = get_ocrdma_pd(ibpd);
64621a428a0SLeon Romanovsky 	status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
64721a428a0SLeon Romanovsky 	if (status)
648cffce990SNaresh Gottumukkala 		goto exit;
649cffce990SNaresh Gottumukkala 
650cffce990SNaresh Gottumukkala pd_mapping:
651ff23dfa1SShamir Rabinovitch 	if (udata) {
652ff23dfa1SShamir Rabinovitch 		status = ocrdma_copy_pd_uresp(dev, pd, udata);
653fe2caefcSParav Pandit 		if (status)
654fe2caefcSParav Pandit 			goto err;
655fe2caefcSParav Pandit 	}
65621a428a0SLeon Romanovsky 	return 0;
657fe2caefcSParav Pandit 
658fe2caefcSParav Pandit err:
65921a428a0SLeon Romanovsky 	if (is_uctx_pd)
660cffce990SNaresh Gottumukkala 		ocrdma_release_ucontext_pd(uctx);
66121a428a0SLeon Romanovsky 	else
66221a428a0SLeon Romanovsky 		_ocrdma_dealloc_pd(dev, pd);
663cffce990SNaresh Gottumukkala exit:
66421a428a0SLeon Romanovsky 	return status;
665fe2caefcSParav Pandit }
666fe2caefcSParav Pandit 
66791a7c58fSLeon Romanovsky int ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
668fe2caefcSParav Pandit {
669fe2caefcSParav Pandit 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
670f99b1649SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
671cffce990SNaresh Gottumukkala 	struct ocrdma_ucontext *uctx = NULL;
672fe2caefcSParav Pandit 	u64 usr_db;
673fe2caefcSParav Pandit 
674cffce990SNaresh Gottumukkala 	uctx = pd->uctx;
675cffce990SNaresh Gottumukkala 	if (uctx) {
676fe2caefcSParav Pandit 		u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
67743a6b402SNaresh Gottumukkala 			(pd->id * PAGE_SIZE);
678fe2caefcSParav Pandit 		if (pd->dpp_enabled)
67943a6b402SNaresh Gottumukkala 			ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
680cffce990SNaresh Gottumukkala 		usr_db = ocrdma_get_db_addr(dev, pd->id);
681fe2caefcSParav Pandit 		ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
682cffce990SNaresh Gottumukkala 
683cffce990SNaresh Gottumukkala 		if (is_ucontext_pd(uctx, pd)) {
684cffce990SNaresh Gottumukkala 			ocrdma_release_ucontext_pd(uctx);
68591a7c58fSLeon Romanovsky 			return 0;
686fe2caefcSParav Pandit 		}
687cffce990SNaresh Gottumukkala 	}
68821a428a0SLeon Romanovsky 	_ocrdma_dealloc_pd(dev, pd);
68991a7c58fSLeon Romanovsky 	return 0;
690fe2caefcSParav Pandit }
691fe2caefcSParav Pandit 
6921afc0454SNaresh Gottumukkala static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
6931afc0454SNaresh Gottumukkala 			    u32 pdid, int acc, u32 num_pbls, u32 addr_check)
694fe2caefcSParav Pandit {
695fe2caefcSParav Pandit 	int status;
696fe2caefcSParav Pandit 
697fe2caefcSParav Pandit 	mr->hwmr.fr_mr = 0;
698fe2caefcSParav Pandit 	mr->hwmr.local_rd = 1;
699fe2caefcSParav Pandit 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
700fe2caefcSParav Pandit 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
701fe2caefcSParav Pandit 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
702fe2caefcSParav Pandit 	mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
703fe2caefcSParav Pandit 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
704fe2caefcSParav Pandit 	mr->hwmr.num_pbls = num_pbls;
705fe2caefcSParav Pandit 
706f99b1649SNaresh Gottumukkala 	status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
707f99b1649SNaresh Gottumukkala 	if (status)
708f99b1649SNaresh Gottumukkala 		return status;
709f99b1649SNaresh Gottumukkala 
710fe2caefcSParav Pandit 	mr->ibmr.lkey = mr->hwmr.lkey;
711fe2caefcSParav Pandit 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
712fe2caefcSParav Pandit 		mr->ibmr.rkey = mr->hwmr.lkey;
713f99b1649SNaresh Gottumukkala 	return 0;
714fe2caefcSParav Pandit }
715fe2caefcSParav Pandit 
716fe2caefcSParav Pandit struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
717fe2caefcSParav Pandit {
718f99b1649SNaresh Gottumukkala 	int status;
719fe2caefcSParav Pandit 	struct ocrdma_mr *mr;
720f99b1649SNaresh Gottumukkala 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
721f99b1649SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
722fe2caefcSParav Pandit 
723f99b1649SNaresh Gottumukkala 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
724f99b1649SNaresh Gottumukkala 		pr_err("%s err, invalid access rights\n", __func__);
725f99b1649SNaresh Gottumukkala 		return ERR_PTR(-EINVAL);
726f99b1649SNaresh Gottumukkala 	}
727f99b1649SNaresh Gottumukkala 
728f99b1649SNaresh Gottumukkala 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
729f99b1649SNaresh Gottumukkala 	if (!mr)
730f99b1649SNaresh Gottumukkala 		return ERR_PTR(-ENOMEM);
731f99b1649SNaresh Gottumukkala 
7321afc0454SNaresh Gottumukkala 	status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
733f99b1649SNaresh Gottumukkala 				   OCRDMA_ADDR_CHECK_DISABLE);
734f99b1649SNaresh Gottumukkala 	if (status) {
735f99b1649SNaresh Gottumukkala 		kfree(mr);
736f99b1649SNaresh Gottumukkala 		return ERR_PTR(status);
737f99b1649SNaresh Gottumukkala 	}
738fe2caefcSParav Pandit 
739fe2caefcSParav Pandit 	return &mr->ibmr;
740fe2caefcSParav Pandit }
741fe2caefcSParav Pandit 
742fe2caefcSParav Pandit static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
743fe2caefcSParav Pandit 				   struct ocrdma_hw_mr *mr)
744fe2caefcSParav Pandit {
745fe2caefcSParav Pandit 	struct pci_dev *pdev = dev->nic_info.pdev;
746fe2caefcSParav Pandit 	int i = 0;
747fe2caefcSParav Pandit 
748fe2caefcSParav Pandit 	if (mr->pbl_table) {
749fe2caefcSParav Pandit 		for (i = 0; i < mr->num_pbls; i++) {
750fe2caefcSParav Pandit 			if (!mr->pbl_table[i].va)
751fe2caefcSParav Pandit 				continue;
752fe2caefcSParav Pandit 			dma_free_coherent(&pdev->dev, mr->pbl_size,
753fe2caefcSParav Pandit 					  mr->pbl_table[i].va,
754fe2caefcSParav Pandit 					  mr->pbl_table[i].pa);
755fe2caefcSParav Pandit 		}
756fe2caefcSParav Pandit 		kfree(mr->pbl_table);
757fe2caefcSParav Pandit 		mr->pbl_table = NULL;
758fe2caefcSParav Pandit 	}
759fe2caefcSParav Pandit }
760fe2caefcSParav Pandit 
7611afc0454SNaresh Gottumukkala static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
7621afc0454SNaresh Gottumukkala 			      u32 num_pbes)
763fe2caefcSParav Pandit {
764fe2caefcSParav Pandit 	u32 num_pbls = 0;
765fe2caefcSParav Pandit 	u32 idx = 0;
766fe2caefcSParav Pandit 	int status = 0;
767fe2caefcSParav Pandit 	u32 pbl_size;
768fe2caefcSParav Pandit 
769fe2caefcSParav Pandit 	do {
770fe2caefcSParav Pandit 		pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
771fe2caefcSParav Pandit 		if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
772fe2caefcSParav Pandit 			status = -EFAULT;
773fe2caefcSParav Pandit 			break;
774fe2caefcSParav Pandit 		}
775fe2caefcSParav Pandit 		num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
776fe2caefcSParav Pandit 		num_pbls = num_pbls / (pbl_size / sizeof(u64));
777fe2caefcSParav Pandit 		idx++;
7781afc0454SNaresh Gottumukkala 	} while (num_pbls >= dev->attr.max_num_mr_pbl);
779fe2caefcSParav Pandit 
780fe2caefcSParav Pandit 	mr->hwmr.num_pbes = num_pbes;
781fe2caefcSParav Pandit 	mr->hwmr.num_pbls = num_pbls;
782fe2caefcSParav Pandit 	mr->hwmr.pbl_size = pbl_size;
783fe2caefcSParav Pandit 	return status;
784fe2caefcSParav Pandit }
785fe2caefcSParav Pandit 
786fe2caefcSParav Pandit static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
787fe2caefcSParav Pandit {
788fe2caefcSParav Pandit 	int status = 0;
789fe2caefcSParav Pandit 	int i;
790fe2caefcSParav Pandit 	u32 dma_len = mr->pbl_size;
791fe2caefcSParav Pandit 	struct pci_dev *pdev = dev->nic_info.pdev;
792fe2caefcSParav Pandit 	void *va;
793fe2caefcSParav Pandit 	dma_addr_t pa;
794fe2caefcSParav Pandit 
7956396bb22SKees Cook 	mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl),
7966396bb22SKees Cook 				GFP_KERNEL);
797fe2caefcSParav Pandit 
798fe2caefcSParav Pandit 	if (!mr->pbl_table)
799fe2caefcSParav Pandit 		return -ENOMEM;
800fe2caefcSParav Pandit 
801fe2caefcSParav Pandit 	for (i = 0; i < mr->num_pbls; i++) {
802750afb08SLuis Chamberlain 		va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
803fe2caefcSParav Pandit 		if (!va) {
804fe2caefcSParav Pandit 			ocrdma_free_mr_pbl_tbl(dev, mr);
805fe2caefcSParav Pandit 			status = -ENOMEM;
806fe2caefcSParav Pandit 			break;
807fe2caefcSParav Pandit 		}
808fe2caefcSParav Pandit 		mr->pbl_table[i].va = va;
809fe2caefcSParav Pandit 		mr->pbl_table[i].pa = pa;
810fe2caefcSParav Pandit 	}
811fe2caefcSParav Pandit 	return status;
812fe2caefcSParav Pandit }
813fe2caefcSParav Pandit 
814b8387f81SJason Gunthorpe static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr)
815fe2caefcSParav Pandit {
816fe2caefcSParav Pandit 	struct ocrdma_pbe *pbe;
81789603f7eSJason Gunthorpe 	struct ib_block_iter biter;
818fe2caefcSParav Pandit 	struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
819b8387f81SJason Gunthorpe 	int pbe_cnt;
820be8c456aSShiraz, Saleem 	u64 pg_addr;
821fe2caefcSParav Pandit 
822fe2caefcSParav Pandit 	if (!mr->hwmr.num_pbes)
823fe2caefcSParav Pandit 		return;
824fe2caefcSParav Pandit 
825fe2caefcSParav Pandit 	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
826fe2caefcSParav Pandit 	pbe_cnt = 0;
827fe2caefcSParav Pandit 
82889603f7eSJason Gunthorpe 	rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) {
829fe2caefcSParav Pandit 		/* store the page address in pbe */
83089603f7eSJason Gunthorpe 		pg_addr = rdma_block_iter_dma_address(&biter);
831be8c456aSShiraz, Saleem 		pbe->pa_lo = cpu_to_le32(pg_addr);
832be8c456aSShiraz, Saleem 		pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
833fe2caefcSParav Pandit 		pbe_cnt += 1;
834fe2caefcSParav Pandit 		pbe++;
835fe2caefcSParav Pandit 
836fe2caefcSParav Pandit 		/* if the given pbl is full storing the pbes,
837fe2caefcSParav Pandit 		 * move to next pbl.
838fe2caefcSParav Pandit 		 */
839be8c456aSShiraz, Saleem 		if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) {
840fe2caefcSParav Pandit 			pbl_tbl++;
841fe2caefcSParav Pandit 			pbe = (struct ocrdma_pbe *)pbl_tbl->va;
842fe2caefcSParav Pandit 			pbe_cnt = 0;
843fe2caefcSParav Pandit 		}
844fe2caefcSParav Pandit 	}
845fe2caefcSParav Pandit }
846fe2caefcSParav Pandit 
847fe2caefcSParav Pandit struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
848fe2caefcSParav Pandit 				 u64 usr_addr, int acc, struct ib_udata *udata)
849fe2caefcSParav Pandit {
850fe2caefcSParav Pandit 	int status = -ENOMEM;
851f99b1649SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
852fe2caefcSParav Pandit 	struct ocrdma_mr *mr;
853fe2caefcSParav Pandit 	struct ocrdma_pd *pd;
854fe2caefcSParav Pandit 
855fe2caefcSParav Pandit 	pd = get_ocrdma_pd(ibpd);
856fe2caefcSParav Pandit 
857fe2caefcSParav Pandit 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
858fe2caefcSParav Pandit 		return ERR_PTR(-EINVAL);
859fe2caefcSParav Pandit 
860fe2caefcSParav Pandit 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
861fe2caefcSParav Pandit 	if (!mr)
862fe2caefcSParav Pandit 		return ERR_PTR(status);
863c320e527SMoni Shoua 	mr->umem = ib_umem_get(ibpd->device, start, len, acc);
864fe2caefcSParav Pandit 	if (IS_ERR(mr->umem)) {
865fe2caefcSParav Pandit 		status = -EFAULT;
866fe2caefcSParav Pandit 		goto umem_err;
867fe2caefcSParav Pandit 	}
868b8387f81SJason Gunthorpe 	status = ocrdma_get_pbl_info(
869b8387f81SJason Gunthorpe 		dev, mr, ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE));
870fe2caefcSParav Pandit 	if (status)
871fe2caefcSParav Pandit 		goto umem_err;
872fe2caefcSParav Pandit 
873be8c456aSShiraz, Saleem 	mr->hwmr.pbe_size = PAGE_SIZE;
874fe2caefcSParav Pandit 	mr->hwmr.va = usr_addr;
875fe2caefcSParav Pandit 	mr->hwmr.len = len;
876fe2caefcSParav Pandit 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
877fe2caefcSParav Pandit 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
878fe2caefcSParav Pandit 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
879fe2caefcSParav Pandit 	mr->hwmr.local_rd = 1;
880fe2caefcSParav Pandit 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
881fe2caefcSParav Pandit 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
882fe2caefcSParav Pandit 	if (status)
883fe2caefcSParav Pandit 		goto umem_err;
884b8387f81SJason Gunthorpe 	build_user_pbes(dev, mr);
885fe2caefcSParav Pandit 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
886fe2caefcSParav Pandit 	if (status)
887fe2caefcSParav Pandit 		goto mbx_err;
888fe2caefcSParav Pandit 	mr->ibmr.lkey = mr->hwmr.lkey;
889fe2caefcSParav Pandit 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
890fe2caefcSParav Pandit 		mr->ibmr.rkey = mr->hwmr.lkey;
891fe2caefcSParav Pandit 
892fe2caefcSParav Pandit 	return &mr->ibmr;
893fe2caefcSParav Pandit 
894fe2caefcSParav Pandit mbx_err:
895fe2caefcSParav Pandit 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
896fe2caefcSParav Pandit umem_err:
897fe2caefcSParav Pandit 	kfree(mr);
898fe2caefcSParav Pandit 	return ERR_PTR(status);
899fe2caefcSParav Pandit }
900fe2caefcSParav Pandit 
901c4367a26SShamir Rabinovitch int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
902fe2caefcSParav Pandit {
903fe2caefcSParav Pandit 	struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
9041afc0454SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
905fe2caefcSParav Pandit 
9064b8180aaSMitesh Ahuja 	(void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
907fe2caefcSParav Pandit 
9082eaa1c56SSagi Grimberg 	kfree(mr->pages);
909fe2caefcSParav Pandit 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
910fe2caefcSParav Pandit 
911fe2caefcSParav Pandit 	/* it could be user registered memory. */
912fe2caefcSParav Pandit 	ib_umem_release(mr->umem);
913fe2caefcSParav Pandit 	kfree(mr);
9146dab0264SMitesh Ahuja 
9156dab0264SMitesh Ahuja 	/* Don't stop cleanup, in case FW is unresponsive */
9166dab0264SMitesh Ahuja 	if (dev->mqe_ctx.fw_error_state) {
9176dab0264SMitesh Ahuja 		pr_err("%s(%d) fw not responding.\n",
9186dab0264SMitesh Ahuja 		       __func__, dev->id);
9196dab0264SMitesh Ahuja 	}
9204b8180aaSMitesh Ahuja 	return 0;
921fe2caefcSParav Pandit }
922fe2caefcSParav Pandit 
9231afc0454SNaresh Gottumukkala static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
924ff23dfa1SShamir Rabinovitch 				struct ib_udata *udata)
925fe2caefcSParav Pandit {
926fe2caefcSParav Pandit 	int status;
927ff23dfa1SShamir Rabinovitch 	struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
928ff23dfa1SShamir Rabinovitch 		udata, struct ocrdma_ucontext, ibucontext);
929fe2caefcSParav Pandit 	struct ocrdma_create_cq_uresp uresp;
930fe2caefcSParav Pandit 
931ff23dfa1SShamir Rabinovitch 	/* this must be user flow! */
932ff23dfa1SShamir Rabinovitch 	if (!udata)
933ff23dfa1SShamir Rabinovitch 		return -EINVAL;
934ff23dfa1SShamir Rabinovitch 
93563ea3749SDan Carpenter 	memset(&uresp, 0, sizeof(uresp));
936fe2caefcSParav Pandit 	uresp.cq_id = cq->id;
93743a6b402SNaresh Gottumukkala 	uresp.page_size = PAGE_ALIGN(cq->len);
938fe2caefcSParav Pandit 	uresp.num_pages = 1;
939fe2caefcSParav Pandit 	uresp.max_hw_cqe = cq->max_hw_cqe;
9401b76d383SDevesh Sharma 	uresp.page_addr[0] = virt_to_phys(cq->va);
941cffce990SNaresh Gottumukkala 	uresp.db_page_addr =  ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
9421afc0454SNaresh Gottumukkala 	uresp.db_page_size = dev->nic_info.db_page_size;
943fe2caefcSParav Pandit 	uresp.phase_change = cq->phase_change ? 1 : 0;
944fe2caefcSParav Pandit 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
945fe2caefcSParav Pandit 	if (status) {
946ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) copy error cqid=0x%x.\n",
9471afc0454SNaresh Gottumukkala 		       __func__, dev->id, cq->id);
948fe2caefcSParav Pandit 		goto err;
949fe2caefcSParav Pandit 	}
950fe2caefcSParav Pandit 	status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
951fe2caefcSParav Pandit 	if (status)
952fe2caefcSParav Pandit 		goto err;
953fe2caefcSParav Pandit 	status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
954fe2caefcSParav Pandit 	if (status) {
955fe2caefcSParav Pandit 		ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
956fe2caefcSParav Pandit 		goto err;
957fe2caefcSParav Pandit 	}
958fe2caefcSParav Pandit 	cq->ucontext = uctx;
959fe2caefcSParav Pandit err:
960fe2caefcSParav Pandit 	return status;
961fe2caefcSParav Pandit }
962fe2caefcSParav Pandit 
963e39afe3dSLeon Romanovsky int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
964fe2caefcSParav Pandit 		     struct ib_udata *udata)
965fe2caefcSParav Pandit {
966e39afe3dSLeon Romanovsky 	struct ib_device *ibdev = ibcq->device;
967bcf4c1eaSMatan Barak 	int entries = attr->cqe;
968e39afe3dSLeon Romanovsky 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
969fe2caefcSParav Pandit 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
970ff23dfa1SShamir Rabinovitch 	struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
971ff23dfa1SShamir Rabinovitch 		udata, struct ocrdma_ucontext, ibucontext);
972cffce990SNaresh Gottumukkala 	u16 pd_id = 0;
973fe2caefcSParav Pandit 	int status;
974fe2caefcSParav Pandit 	struct ocrdma_create_cq_ureq ureq;
975fe2caefcSParav Pandit 
976bcf4c1eaSMatan Barak 	if (attr->flags)
977e39afe3dSLeon Romanovsky 		return -EINVAL;
978bcf4c1eaSMatan Barak 
979fe2caefcSParav Pandit 	if (udata) {
980fe2caefcSParav Pandit 		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
981e39afe3dSLeon Romanovsky 			return -EFAULT;
982fe2caefcSParav Pandit 	} else
983fe2caefcSParav Pandit 		ureq.dpp_cq = 0;
984fe2caefcSParav Pandit 
985fe2caefcSParav Pandit 	spin_lock_init(&cq->cq_lock);
986fe2caefcSParav Pandit 	spin_lock_init(&cq->comp_handler_lock);
987fe2caefcSParav Pandit 	INIT_LIST_HEAD(&cq->sq_head);
988fe2caefcSParav Pandit 	INIT_LIST_HEAD(&cq->rq_head);
989fe2caefcSParav Pandit 
990ff23dfa1SShamir Rabinovitch 	if (udata)
991cffce990SNaresh Gottumukkala 		pd_id = uctx->cntxt_pd->id;
992cffce990SNaresh Gottumukkala 
993cffce990SNaresh Gottumukkala 	status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
994e39afe3dSLeon Romanovsky 	if (status)
995e39afe3dSLeon Romanovsky 		return status;
996e39afe3dSLeon Romanovsky 
997ff23dfa1SShamir Rabinovitch 	if (udata) {
998ff23dfa1SShamir Rabinovitch 		status = ocrdma_copy_cq_uresp(dev, cq, udata);
999fe2caefcSParav Pandit 		if (status)
1000fe2caefcSParav Pandit 			goto ctx_err;
1001fe2caefcSParav Pandit 	}
1002fe2caefcSParav Pandit 	cq->phase = OCRDMA_CQE_VALID;
1003fe2caefcSParav Pandit 	dev->cq_tbl[cq->id] = cq;
1004e39afe3dSLeon Romanovsky 	return 0;
1005fe2caefcSParav Pandit 
1006fe2caefcSParav Pandit ctx_err:
1007fe2caefcSParav Pandit 	ocrdma_mbx_destroy_cq(dev, cq);
1008e39afe3dSLeon Romanovsky 	return status;
1009fe2caefcSParav Pandit }
1010fe2caefcSParav Pandit 
1011fe2caefcSParav Pandit int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1012fe2caefcSParav Pandit 		     struct ib_udata *udata)
1013fe2caefcSParav Pandit {
1014fe2caefcSParav Pandit 	int status = 0;
1015fe2caefcSParav Pandit 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1016fe2caefcSParav Pandit 
1017fe2caefcSParav Pandit 	if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1018fe2caefcSParav Pandit 		status = -EINVAL;
1019fe2caefcSParav Pandit 		return status;
1020fe2caefcSParav Pandit 	}
1021fe2caefcSParav Pandit 	ibcq->cqe = new_cnt;
1022fe2caefcSParav Pandit 	return status;
1023fe2caefcSParav Pandit }
1024fe2caefcSParav Pandit 
1025ea617626SDevesh Sharma static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1026ea617626SDevesh Sharma {
1027ea617626SDevesh Sharma 	int cqe_cnt;
1028ea617626SDevesh Sharma 	int valid_count = 0;
1029ea617626SDevesh Sharma 	unsigned long flags;
1030ea617626SDevesh Sharma 
1031ea617626SDevesh Sharma 	struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1032ea617626SDevesh Sharma 	struct ocrdma_cqe *cqe = NULL;
1033ea617626SDevesh Sharma 
1034ea617626SDevesh Sharma 	cqe = cq->va;
1035ea617626SDevesh Sharma 	cqe_cnt = cq->cqe_cnt;
1036ea617626SDevesh Sharma 
1037ea617626SDevesh Sharma 	/* Last irq might have scheduled a polling thread
1038ea617626SDevesh Sharma 	 * sync-up with it before hard flushing.
1039ea617626SDevesh Sharma 	 */
1040ea617626SDevesh Sharma 	spin_lock_irqsave(&cq->cq_lock, flags);
1041ea617626SDevesh Sharma 	while (cqe_cnt) {
1042ea617626SDevesh Sharma 		if (is_cqe_valid(cq, cqe))
1043ea617626SDevesh Sharma 			valid_count++;
1044ea617626SDevesh Sharma 		cqe++;
1045ea617626SDevesh Sharma 		cqe_cnt--;
1046ea617626SDevesh Sharma 	}
1047ea617626SDevesh Sharma 	ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1048ea617626SDevesh Sharma 	spin_unlock_irqrestore(&cq->cq_lock, flags);
1049ea617626SDevesh Sharma }
1050ea617626SDevesh Sharma 
105143d781b9SLeon Romanovsky int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1052fe2caefcSParav Pandit {
1053fe2caefcSParav Pandit 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1054ea617626SDevesh Sharma 	struct ocrdma_eq *eq = NULL;
10551afc0454SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1056cffce990SNaresh Gottumukkala 	int pdid = 0;
1057ea617626SDevesh Sharma 	u32 irq, indx;
1058ea617626SDevesh Sharma 
1059ea617626SDevesh Sharma 	dev->cq_tbl[cq->id] = NULL;
1060ea617626SDevesh Sharma 	indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1061ea617626SDevesh Sharma 
1062ea617626SDevesh Sharma 	eq = &dev->eq_tbl[indx];
1063ea617626SDevesh Sharma 	irq = ocrdma_get_irq(dev, eq);
1064ea617626SDevesh Sharma 	synchronize_irq(irq);
1065ea617626SDevesh Sharma 	ocrdma_flush_cq(cq);
1066fe2caefcSParav Pandit 
1067a52c8e24SLeon Romanovsky 	ocrdma_mbx_destroy_cq(dev, cq);
1068fe2caefcSParav Pandit 	if (cq->ucontext) {
1069cffce990SNaresh Gottumukkala 		pdid = cq->ucontext->cntxt_pd->id;
107043a6b402SNaresh Gottumukkala 		ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
107143a6b402SNaresh Gottumukkala 				PAGE_ALIGN(cq->len));
1072cffce990SNaresh Gottumukkala 		ocrdma_del_mmap(cq->ucontext,
1073cffce990SNaresh Gottumukkala 				ocrdma_get_db_addr(dev, pdid),
1074fe2caefcSParav Pandit 				dev->nic_info.db_page_size);
1075fe2caefcSParav Pandit 	}
107643d781b9SLeon Romanovsky 	return 0;
1077fe2caefcSParav Pandit }
1078fe2caefcSParav Pandit 
1079fe2caefcSParav Pandit static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1080fe2caefcSParav Pandit {
1081fe2caefcSParav Pandit 	int status = -EINVAL;
1082fe2caefcSParav Pandit 
1083fe2caefcSParav Pandit 	if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1084fe2caefcSParav Pandit 		dev->qp_tbl[qp->id] = qp;
1085fe2caefcSParav Pandit 		status = 0;
1086fe2caefcSParav Pandit 	}
1087fe2caefcSParav Pandit 	return status;
1088fe2caefcSParav Pandit }
1089fe2caefcSParav Pandit 
1090fe2caefcSParav Pandit static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1091fe2caefcSParav Pandit {
1092fe2caefcSParav Pandit 	dev->qp_tbl[qp->id] = NULL;
1093fe2caefcSParav Pandit }
1094fe2caefcSParav Pandit 
1095fe2caefcSParav Pandit static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1096e00b64f7SShamir Rabinovitch 				  struct ib_qp_init_attr *attrs,
1097e00b64f7SShamir Rabinovitch 				  struct ib_udata *udata)
1098fe2caefcSParav Pandit {
109943a6b402SNaresh Gottumukkala 	if ((attrs->qp_type != IB_QPT_GSI) &&
110043a6b402SNaresh Gottumukkala 	    (attrs->qp_type != IB_QPT_RC) &&
110143a6b402SNaresh Gottumukkala 	    (attrs->qp_type != IB_QPT_UC) &&
110243a6b402SNaresh Gottumukkala 	    (attrs->qp_type != IB_QPT_UD)) {
1103ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1104fe2caefcSParav Pandit 		       __func__, dev->id, attrs->qp_type);
1105bb8865f4SKamal Heib 		return -EOPNOTSUPP;
1106fe2caefcSParav Pandit 	}
110743a6b402SNaresh Gottumukkala 	/* Skip the check for QP1 to support CM size of 128 */
110843a6b402SNaresh Gottumukkala 	if ((attrs->qp_type != IB_QPT_GSI) &&
110943a6b402SNaresh Gottumukkala 	    (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1110ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1111fe2caefcSParav Pandit 		       __func__, dev->id, attrs->cap.max_send_wr);
1112ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) supported send_wr=0x%x\n",
1113fe2caefcSParav Pandit 		       __func__, dev->id, dev->attr.max_wqe);
1114fe2caefcSParav Pandit 		return -EINVAL;
1115fe2caefcSParav Pandit 	}
1116fe2caefcSParav Pandit 	if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1117ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1118fe2caefcSParav Pandit 		       __func__, dev->id, attrs->cap.max_recv_wr);
1119ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) supported recv_wr=0x%x\n",
1120fe2caefcSParav Pandit 		       __func__, dev->id, dev->attr.max_rqe);
1121fe2caefcSParav Pandit 		return -EINVAL;
1122fe2caefcSParav Pandit 	}
1123fe2caefcSParav Pandit 	if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1124ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1125ef99c4c2SNaresh Gottumukkala 		       __func__, dev->id, attrs->cap.max_inline_data);
1126ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) supported inline data size=0x%x\n",
1127fe2caefcSParav Pandit 		       __func__, dev->id, dev->attr.max_inline_data);
1128fe2caefcSParav Pandit 		return -EINVAL;
1129fe2caefcSParav Pandit 	}
1130fe2caefcSParav Pandit 	if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1131ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1132fe2caefcSParav Pandit 		       __func__, dev->id, attrs->cap.max_send_sge);
1133ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) supported send_sge=0x%x\n",
1134fe2caefcSParav Pandit 		       __func__, dev->id, dev->attr.max_send_sge);
1135fe2caefcSParav Pandit 		return -EINVAL;
1136fe2caefcSParav Pandit 	}
1137fe2caefcSParav Pandit 	if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1138ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1139fe2caefcSParav Pandit 		       __func__, dev->id, attrs->cap.max_recv_sge);
1140ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) supported recv_sge=0x%x\n",
1141fe2caefcSParav Pandit 		       __func__, dev->id, dev->attr.max_recv_sge);
1142fe2caefcSParav Pandit 		return -EINVAL;
1143fe2caefcSParav Pandit 	}
1144fe2caefcSParav Pandit 	/* unprivileged user space cannot create special QP */
1145e00b64f7SShamir Rabinovitch 	if (udata && attrs->qp_type == IB_QPT_GSI) {
1146ef99c4c2SNaresh Gottumukkala 		pr_err
1147fe2caefcSParav Pandit 		    ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1148fe2caefcSParav Pandit 		     __func__, dev->id, attrs->qp_type);
1149fe2caefcSParav Pandit 		return -EINVAL;
1150fe2caefcSParav Pandit 	}
1151fe2caefcSParav Pandit 	/* allow creating only one GSI type of QP */
1152fe2caefcSParav Pandit 	if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1153ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) GSI special QPs already created.\n",
1154fe2caefcSParav Pandit 		       __func__, dev->id);
1155fe2caefcSParav Pandit 		return -EINVAL;
1156fe2caefcSParav Pandit 	}
1157fe2caefcSParav Pandit 	/* verify consumer QPs are not trying to use GSI QP's CQ */
1158fe2caefcSParav Pandit 	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1159fe2caefcSParav Pandit 		if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
11609e8fa040SRoland Dreier 			(dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1161ef99c4c2SNaresh Gottumukkala 			pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1162fe2caefcSParav Pandit 				__func__, dev->id);
1163fe2caefcSParav Pandit 			return -EINVAL;
1164fe2caefcSParav Pandit 		}
1165fe2caefcSParav Pandit 	}
1166fe2caefcSParav Pandit 	return 0;
1167fe2caefcSParav Pandit }
1168fe2caefcSParav Pandit 
1169fe2caefcSParav Pandit static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1170fe2caefcSParav Pandit 				struct ib_udata *udata, int dpp_offset,
1171fe2caefcSParav Pandit 				int dpp_credit_lmt, int srq)
1172fe2caefcSParav Pandit {
11730ca4c39fSMarkus Elfring 	int status;
1174fe2caefcSParav Pandit 	u64 usr_db;
1175fe2caefcSParav Pandit 	struct ocrdma_create_qp_uresp uresp;
1176fe2caefcSParav Pandit 	struct ocrdma_pd *pd = qp->pd;
1177d2b8f7b1SMitesh Ahuja 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1178fe2caefcSParav Pandit 
1179fe2caefcSParav Pandit 	memset(&uresp, 0, sizeof(uresp));
1180fe2caefcSParav Pandit 	usr_db = dev->nic_info.unmapped_db +
1181fe2caefcSParav Pandit 			(pd->id * dev->nic_info.db_page_size);
1182fe2caefcSParav Pandit 	uresp.qp_id = qp->id;
1183fe2caefcSParav Pandit 	uresp.sq_dbid = qp->sq.dbid;
1184fe2caefcSParav Pandit 	uresp.num_sq_pages = 1;
118543a6b402SNaresh Gottumukkala 	uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
11861b76d383SDevesh Sharma 	uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1187fe2caefcSParav Pandit 	uresp.num_wqe_allocated = qp->sq.max_cnt;
1188fe2caefcSParav Pandit 	if (!srq) {
1189fe2caefcSParav Pandit 		uresp.rq_dbid = qp->rq.dbid;
1190fe2caefcSParav Pandit 		uresp.num_rq_pages = 1;
119143a6b402SNaresh Gottumukkala 		uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
11921b76d383SDevesh Sharma 		uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1193fe2caefcSParav Pandit 		uresp.num_rqe_allocated = qp->rq.max_cnt;
1194fe2caefcSParav Pandit 	}
1195fe2caefcSParav Pandit 	uresp.db_page_addr = usr_db;
1196fe2caefcSParav Pandit 	uresp.db_page_size = dev->nic_info.db_page_size;
1197fe2caefcSParav Pandit 	uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1198f11220eeSNaresh Gottumukkala 	uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
11992df84fa8SDevesh Sharma 	uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1200fe2caefcSParav Pandit 
1201fe2caefcSParav Pandit 	if (qp->dpp_enabled) {
1202fe2caefcSParav Pandit 		uresp.dpp_credit = dpp_credit_lmt;
1203fe2caefcSParav Pandit 		uresp.dpp_offset = dpp_offset;
1204fe2caefcSParav Pandit 	}
1205fe2caefcSParav Pandit 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1206fe2caefcSParav Pandit 	if (status) {
1207ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1208fe2caefcSParav Pandit 		goto err;
1209fe2caefcSParav Pandit 	}
1210fe2caefcSParav Pandit 	status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1211fe2caefcSParav Pandit 				 uresp.sq_page_size);
1212fe2caefcSParav Pandit 	if (status)
1213fe2caefcSParav Pandit 		goto err;
1214fe2caefcSParav Pandit 
1215fe2caefcSParav Pandit 	if (!srq) {
1216fe2caefcSParav Pandit 		status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1217fe2caefcSParav Pandit 					 uresp.rq_page_size);
1218fe2caefcSParav Pandit 		if (status)
1219fe2caefcSParav Pandit 			goto rq_map_err;
1220fe2caefcSParav Pandit 	}
1221fe2caefcSParav Pandit 	return status;
1222fe2caefcSParav Pandit rq_map_err:
1223fe2caefcSParav Pandit 	ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1224fe2caefcSParav Pandit err:
1225fe2caefcSParav Pandit 	return status;
1226fe2caefcSParav Pandit }
1227fe2caefcSParav Pandit 
1228fe2caefcSParav Pandit static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1229fe2caefcSParav Pandit 			     struct ocrdma_pd *pd)
1230fe2caefcSParav Pandit {
123121c3391aSDevesh Sharma 	if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1232fe2caefcSParav Pandit 		qp->sq_db = dev->nic_info.db +
1233fe2caefcSParav Pandit 			(pd->id * dev->nic_info.db_page_size) +
1234fe2caefcSParav Pandit 			OCRDMA_DB_GEN2_SQ_OFFSET;
1235fe2caefcSParav Pandit 		qp->rq_db = dev->nic_info.db +
1236fe2caefcSParav Pandit 			(pd->id * dev->nic_info.db_page_size) +
1237f11220eeSNaresh Gottumukkala 			OCRDMA_DB_GEN2_RQ_OFFSET;
1238fe2caefcSParav Pandit 	} else {
1239fe2caefcSParav Pandit 		qp->sq_db = dev->nic_info.db +
1240fe2caefcSParav Pandit 			(pd->id * dev->nic_info.db_page_size) +
1241fe2caefcSParav Pandit 			OCRDMA_DB_SQ_OFFSET;
1242fe2caefcSParav Pandit 		qp->rq_db = dev->nic_info.db +
1243fe2caefcSParav Pandit 			(pd->id * dev->nic_info.db_page_size) +
1244fe2caefcSParav Pandit 			OCRDMA_DB_RQ_OFFSET;
1245fe2caefcSParav Pandit 	}
1246fe2caefcSParav Pandit }
1247fe2caefcSParav Pandit 
1248fe2caefcSParav Pandit static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1249fe2caefcSParav Pandit {
1250fe2caefcSParav Pandit 	qp->wqe_wr_id_tbl =
12516396bb22SKees Cook 	    kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)),
1252fe2caefcSParav Pandit 		    GFP_KERNEL);
1253fe2caefcSParav Pandit 	if (qp->wqe_wr_id_tbl == NULL)
1254fe2caefcSParav Pandit 		return -ENOMEM;
1255fe2caefcSParav Pandit 	qp->rqe_wr_id_tbl =
12566396bb22SKees Cook 	    kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
1257fe2caefcSParav Pandit 	if (qp->rqe_wr_id_tbl == NULL)
1258fe2caefcSParav Pandit 		return -ENOMEM;
1259fe2caefcSParav Pandit 
1260fe2caefcSParav Pandit 	return 0;
1261fe2caefcSParav Pandit }
1262fe2caefcSParav Pandit 
1263fe2caefcSParav Pandit static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1264fe2caefcSParav Pandit 				      struct ocrdma_pd *pd,
1265fe2caefcSParav Pandit 				      struct ib_qp_init_attr *attrs)
1266fe2caefcSParav Pandit {
1267fe2caefcSParav Pandit 	qp->pd = pd;
1268fe2caefcSParav Pandit 	spin_lock_init(&qp->q_lock);
1269fe2caefcSParav Pandit 	INIT_LIST_HEAD(&qp->sq_entry);
1270fe2caefcSParav Pandit 	INIT_LIST_HEAD(&qp->rq_entry);
1271fe2caefcSParav Pandit 
1272fe2caefcSParav Pandit 	qp->qp_type = attrs->qp_type;
1273fe2caefcSParav Pandit 	qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1274fe2caefcSParav Pandit 	qp->max_inline_data = attrs->cap.max_inline_data;
1275fe2caefcSParav Pandit 	qp->sq.max_sges = attrs->cap.max_send_sge;
1276fe2caefcSParav Pandit 	qp->rq.max_sges = attrs->cap.max_recv_sge;
1277fe2caefcSParav Pandit 	qp->state = OCRDMA_QPS_RST;
12782b51a9b9SNaresh Gottumukkala 	qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1279fe2caefcSParav Pandit }
1280fe2caefcSParav Pandit 
1281fe2caefcSParav Pandit static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1282fe2caefcSParav Pandit 				   struct ib_qp_init_attr *attrs)
1283fe2caefcSParav Pandit {
1284fe2caefcSParav Pandit 	if (attrs->qp_type == IB_QPT_GSI) {
1285fe2caefcSParav Pandit 		dev->gsi_qp_created = 1;
1286fe2caefcSParav Pandit 		dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1287fe2caefcSParav Pandit 		dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1288fe2caefcSParav Pandit 	}
1289fe2caefcSParav Pandit }
1290fe2caefcSParav Pandit 
1291fe2caefcSParav Pandit struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1292fe2caefcSParav Pandit 			       struct ib_qp_init_attr *attrs,
1293fe2caefcSParav Pandit 			       struct ib_udata *udata)
1294fe2caefcSParav Pandit {
1295fe2caefcSParav Pandit 	int status;
1296fe2caefcSParav Pandit 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1297fe2caefcSParav Pandit 	struct ocrdma_qp *qp;
1298f99b1649SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1299fe2caefcSParav Pandit 	struct ocrdma_create_qp_ureq ureq;
1300fe2caefcSParav Pandit 	u16 dpp_credit_lmt, dpp_offset;
1301fe2caefcSParav Pandit 
1302e00b64f7SShamir Rabinovitch 	status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
1303fe2caefcSParav Pandit 	if (status)
1304fe2caefcSParav Pandit 		goto gen_err;
1305fe2caefcSParav Pandit 
1306fe2caefcSParav Pandit 	memset(&ureq, 0, sizeof(ureq));
1307fe2caefcSParav Pandit 	if (udata) {
1308fe2caefcSParav Pandit 		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1309fe2caefcSParav Pandit 			return ERR_PTR(-EFAULT);
1310fe2caefcSParav Pandit 	}
1311fe2caefcSParav Pandit 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1312fe2caefcSParav Pandit 	if (!qp) {
1313fe2caefcSParav Pandit 		status = -ENOMEM;
1314fe2caefcSParav Pandit 		goto gen_err;
1315fe2caefcSParav Pandit 	}
1316fe2caefcSParav Pandit 	ocrdma_set_qp_init_params(qp, pd, attrs);
131743a6b402SNaresh Gottumukkala 	if (udata == NULL)
131843a6b402SNaresh Gottumukkala 		qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
131943a6b402SNaresh Gottumukkala 					OCRDMA_QP_FAST_REG);
1320fe2caefcSParav Pandit 
1321fe2caefcSParav Pandit 	mutex_lock(&dev->dev_lock);
1322fe2caefcSParav Pandit 	status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1323fe2caefcSParav Pandit 					ureq.dpp_cq_id,
1324fe2caefcSParav Pandit 					&dpp_offset, &dpp_credit_lmt);
1325fe2caefcSParav Pandit 	if (status)
1326fe2caefcSParav Pandit 		goto mbx_err;
1327fe2caefcSParav Pandit 
1328fe2caefcSParav Pandit 	/* user space QP's wr_id table are managed in library */
1329fe2caefcSParav Pandit 	if (udata == NULL) {
1330fe2caefcSParav Pandit 		status = ocrdma_alloc_wr_id_tbl(qp);
1331fe2caefcSParav Pandit 		if (status)
1332fe2caefcSParav Pandit 			goto map_err;
1333fe2caefcSParav Pandit 	}
1334fe2caefcSParav Pandit 
1335fe2caefcSParav Pandit 	status = ocrdma_add_qpn_map(dev, qp);
1336fe2caefcSParav Pandit 	if (status)
1337fe2caefcSParav Pandit 		goto map_err;
1338fe2caefcSParav Pandit 	ocrdma_set_qp_db(dev, qp, pd);
1339fe2caefcSParav Pandit 	if (udata) {
1340fe2caefcSParav Pandit 		status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1341fe2caefcSParav Pandit 					      dpp_credit_lmt,
1342fe2caefcSParav Pandit 					      (attrs->srq != NULL));
1343fe2caefcSParav Pandit 		if (status)
1344fe2caefcSParav Pandit 			goto cpy_err;
1345fe2caefcSParav Pandit 	}
1346fe2caefcSParav Pandit 	ocrdma_store_gsi_qp_cq(dev, attrs);
134727159f50SGottumukkala, Naresh 	qp->ibqp.qp_num = qp->id;
1348fe2caefcSParav Pandit 	mutex_unlock(&dev->dev_lock);
1349fe2caefcSParav Pandit 	return &qp->ibqp;
1350fe2caefcSParav Pandit 
1351fe2caefcSParav Pandit cpy_err:
1352fe2caefcSParav Pandit 	ocrdma_del_qpn_map(dev, qp);
1353fe2caefcSParav Pandit map_err:
1354fe2caefcSParav Pandit 	ocrdma_mbx_destroy_qp(dev, qp);
1355fe2caefcSParav Pandit mbx_err:
1356fe2caefcSParav Pandit 	mutex_unlock(&dev->dev_lock);
1357fe2caefcSParav Pandit 	kfree(qp->wqe_wr_id_tbl);
1358fe2caefcSParav Pandit 	kfree(qp->rqe_wr_id_tbl);
1359fe2caefcSParav Pandit 	kfree(qp);
1360ef99c4c2SNaresh Gottumukkala 	pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1361fe2caefcSParav Pandit gen_err:
1362fe2caefcSParav Pandit 	return ERR_PTR(status);
1363fe2caefcSParav Pandit }
1364fe2caefcSParav Pandit 
1365fe2caefcSParav Pandit int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1366fe2caefcSParav Pandit 		      int attr_mask)
1367fe2caefcSParav Pandit {
1368fe2caefcSParav Pandit 	int status = 0;
1369fe2caefcSParav Pandit 	struct ocrdma_qp *qp;
1370fe2caefcSParav Pandit 	struct ocrdma_dev *dev;
1371fe2caefcSParav Pandit 	enum ib_qp_state old_qps;
1372fe2caefcSParav Pandit 
1373fe2caefcSParav Pandit 	qp = get_ocrdma_qp(ibqp);
1374d2b8f7b1SMitesh Ahuja 	dev = get_ocrdma_dev(ibqp->device);
1375fe2caefcSParav Pandit 	if (attr_mask & IB_QP_STATE)
1376057729cbSNaresh Gottumukkala 		status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1377fe2caefcSParav Pandit 	/* if new and previous states are same hw doesn't need to
1378fe2caefcSParav Pandit 	 * know about it.
1379fe2caefcSParav Pandit 	 */
1380fe2caefcSParav Pandit 	if (status < 0)
1381fe2caefcSParav Pandit 		return status;
138295f60bb8SMarkus Elfring 	return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1383fe2caefcSParav Pandit }
1384fe2caefcSParav Pandit 
1385fe2caefcSParav Pandit int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1386fe2caefcSParav Pandit 		     int attr_mask, struct ib_udata *udata)
1387fe2caefcSParav Pandit {
1388fe2caefcSParav Pandit 	unsigned long flags;
1389fe2caefcSParav Pandit 	int status = -EINVAL;
1390fe2caefcSParav Pandit 	struct ocrdma_qp *qp;
1391fe2caefcSParav Pandit 	struct ocrdma_dev *dev;
1392fe2caefcSParav Pandit 	enum ib_qp_state old_qps, new_qps;
1393fe2caefcSParav Pandit 
1394fe2caefcSParav Pandit 	qp = get_ocrdma_qp(ibqp);
1395d2b8f7b1SMitesh Ahuja 	dev = get_ocrdma_dev(ibqp->device);
1396fe2caefcSParav Pandit 
1397fe2caefcSParav Pandit 	/* syncronize with multiple context trying to change, retrive qps */
1398fe2caefcSParav Pandit 	mutex_lock(&dev->dev_lock);
1399fe2caefcSParav Pandit 	/* syncronize with wqe, rqe posting and cqe processing contexts */
1400fe2caefcSParav Pandit 	spin_lock_irqsave(&qp->q_lock, flags);
1401fe2caefcSParav Pandit 	old_qps = get_ibqp_state(qp->state);
1402fe2caefcSParav Pandit 	if (attr_mask & IB_QP_STATE)
1403fe2caefcSParav Pandit 		new_qps = attr->qp_state;
1404fe2caefcSParav Pandit 	else
1405fe2caefcSParav Pandit 		new_qps = old_qps;
1406fe2caefcSParav Pandit 	spin_unlock_irqrestore(&qp->q_lock, flags);
1407fe2caefcSParav Pandit 
1408d31131bbSKamal Heib 	if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
1409ef99c4c2SNaresh Gottumukkala 		pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1410fe2caefcSParav Pandit 		       "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1411fe2caefcSParav Pandit 		       __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1412fe2caefcSParav Pandit 		       old_qps, new_qps);
1413fe2caefcSParav Pandit 		goto param_err;
1414fe2caefcSParav Pandit 	}
1415fe2caefcSParav Pandit 
1416fe2caefcSParav Pandit 	status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1417fe2caefcSParav Pandit 	if (status > 0)
1418fe2caefcSParav Pandit 		status = 0;
1419fe2caefcSParav Pandit param_err:
1420fe2caefcSParav Pandit 	mutex_unlock(&dev->dev_lock);
1421fe2caefcSParav Pandit 	return status;
1422fe2caefcSParav Pandit }
1423fe2caefcSParav Pandit 
1424fe2caefcSParav Pandit static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1425fe2caefcSParav Pandit {
1426fe2caefcSParav Pandit 	switch (mtu) {
1427fe2caefcSParav Pandit 	case 256:
1428fe2caefcSParav Pandit 		return IB_MTU_256;
1429fe2caefcSParav Pandit 	case 512:
1430fe2caefcSParav Pandit 		return IB_MTU_512;
1431fe2caefcSParav Pandit 	case 1024:
1432fe2caefcSParav Pandit 		return IB_MTU_1024;
1433fe2caefcSParav Pandit 	case 2048:
1434fe2caefcSParav Pandit 		return IB_MTU_2048;
1435fe2caefcSParav Pandit 	case 4096:
1436fe2caefcSParav Pandit 		return IB_MTU_4096;
1437fe2caefcSParav Pandit 	default:
1438fe2caefcSParav Pandit 		return IB_MTU_1024;
1439fe2caefcSParav Pandit 	}
1440fe2caefcSParav Pandit }
1441fe2caefcSParav Pandit 
1442fe2caefcSParav Pandit static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1443fe2caefcSParav Pandit {
1444fe2caefcSParav Pandit 	int ib_qp_acc_flags = 0;
1445fe2caefcSParav Pandit 
1446fe2caefcSParav Pandit 	if (qp_cap_flags & OCRDMA_QP_INB_WR)
1447fe2caefcSParav Pandit 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1448fe2caefcSParav Pandit 	if (qp_cap_flags & OCRDMA_QP_INB_RD)
1449fe2caefcSParav Pandit 		ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1450fe2caefcSParav Pandit 	return ib_qp_acc_flags;
1451fe2caefcSParav Pandit }
1452fe2caefcSParav Pandit 
1453fe2caefcSParav Pandit int ocrdma_query_qp(struct ib_qp *ibqp,
1454fe2caefcSParav Pandit 		    struct ib_qp_attr *qp_attr,
1455fe2caefcSParav Pandit 		    int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1456fe2caefcSParav Pandit {
1457fe2caefcSParav Pandit 	int status;
1458fe2caefcSParav Pandit 	u32 qp_state;
1459fe2caefcSParav Pandit 	struct ocrdma_qp_params params;
1460fe2caefcSParav Pandit 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1461d2b8f7b1SMitesh Ahuja 	struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1462fe2caefcSParav Pandit 
1463fe2caefcSParav Pandit 	memset(&params, 0, sizeof(params));
1464fe2caefcSParav Pandit 	mutex_lock(&dev->dev_lock);
1465fe2caefcSParav Pandit 	status = ocrdma_mbx_query_qp(dev, qp, &params);
1466fe2caefcSParav Pandit 	mutex_unlock(&dev->dev_lock);
1467fe2caefcSParav Pandit 	if (status)
1468fe2caefcSParav Pandit 		goto mbx_err;
146995bf0093SMitesh Ahuja 	if (qp->qp_type == IB_QPT_UD)
147095bf0093SMitesh Ahuja 		qp_attr->qkey = params.qkey;
1471fe2caefcSParav Pandit 	qp_attr->path_mtu =
1472fe2caefcSParav Pandit 		ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1473fe2caefcSParav Pandit 				OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1474fe2caefcSParav Pandit 				OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1475fe2caefcSParav Pandit 	qp_attr->path_mig_state = IB_MIG_MIGRATED;
1476fe2caefcSParav Pandit 	qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1477fe2caefcSParav Pandit 	qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1478fe2caefcSParav Pandit 	qp_attr->dest_qp_num =
1479fe2caefcSParav Pandit 	    params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1480fe2caefcSParav Pandit 
1481fe2caefcSParav Pandit 	qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1482fe2caefcSParav Pandit 	qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1483fe2caefcSParav Pandit 	qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1484fe2caefcSParav Pandit 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
1485fe2caefcSParav Pandit 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1486c43e9ab8SNaresh Gottumukkala 	qp_attr->cap.max_inline_data = qp->max_inline_data;
1487fe2caefcSParav Pandit 	qp_init_attr->cap = qp_attr->cap;
148844c58487SDasaratharaman Chandramouli 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1489fe2caefcSParav Pandit 
1490d8966fcdSDasaratharaman Chandramouli 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
1491d8966fcdSDasaratharaman Chandramouli 			params.rnt_rc_sl_fl &
1492d8966fcdSDasaratharaman Chandramouli 			  OCRDMA_QP_PARAMS_FLOW_LABEL_MASK,
1493d8966fcdSDasaratharaman Chandramouli 			qp->sgid_idx,
1494d8966fcdSDasaratharaman Chandramouli 			(params.hop_lmt_rq_psn &
1495d8966fcdSDasaratharaman Chandramouli 			 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1496d8966fcdSDasaratharaman Chandramouli 			 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
1497d8966fcdSDasaratharaman Chandramouli 			(params.tclass_sq_psn &
1498d8966fcdSDasaratharaman Chandramouli 			 OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1499d8966fcdSDasaratharaman Chandramouli 			 OCRDMA_QP_PARAMS_TCLASS_SHIFT);
1500d8966fcdSDasaratharaman Chandramouli 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid[0]);
1501d8966fcdSDasaratharaman Chandramouli 
1502d8966fcdSDasaratharaman Chandramouli 	rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
1503d8966fcdSDasaratharaman Chandramouli 	rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl &
1504fe2caefcSParav Pandit 					   OCRDMA_QP_PARAMS_SL_MASK) >>
1505d8966fcdSDasaratharaman Chandramouli 					   OCRDMA_QP_PARAMS_SL_SHIFT);
1506fe2caefcSParav Pandit 	qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1507fe2caefcSParav Pandit 			    OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1508fe2caefcSParav Pandit 				OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1509fe2caefcSParav Pandit 	qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1510fe2caefcSParav Pandit 			      OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1511fe2caefcSParav Pandit 				OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1512fe2caefcSParav Pandit 	qp_attr->retry_cnt =
1513fe2caefcSParav Pandit 	    (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1514fe2caefcSParav Pandit 		OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1515fe2caefcSParav Pandit 	qp_attr->min_rnr_timer = 0;
1516fe2caefcSParav Pandit 	qp_attr->pkey_index = 0;
1517fe2caefcSParav Pandit 	qp_attr->port_num = 1;
1518d8966fcdSDasaratharaman Chandramouli 	rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
1519d8966fcdSDasaratharaman Chandramouli 	rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
1520fe2caefcSParav Pandit 	qp_attr->alt_pkey_index = 0;
1521fe2caefcSParav Pandit 	qp_attr->alt_port_num = 0;
1522fe2caefcSParav Pandit 	qp_attr->alt_timeout = 0;
1523fe2caefcSParav Pandit 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1524fe2caefcSParav Pandit 	qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1525fe2caefcSParav Pandit 		    OCRDMA_QP_PARAMS_STATE_SHIFT;
152643c706b1SPadmanabh Ratnakar 	qp_attr->qp_state = get_ibqp_state(qp_state);
152743c706b1SPadmanabh Ratnakar 	qp_attr->cur_qp_state = qp_attr->qp_state;
1528fe2caefcSParav Pandit 	qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1529fe2caefcSParav Pandit 	qp_attr->max_dest_rd_atomic =
1530fe2caefcSParav Pandit 	    params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1531fe2caefcSParav Pandit 	qp_attr->max_rd_atomic =
1532fe2caefcSParav Pandit 	    params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1533fe2caefcSParav Pandit 	qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1534fe2caefcSParav Pandit 				OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
153543c706b1SPadmanabh Ratnakar 	/* Sync driver QP state with FW */
153643c706b1SPadmanabh Ratnakar 	ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1537fe2caefcSParav Pandit mbx_err:
1538fe2caefcSParav Pandit 	return status;
1539fe2caefcSParav Pandit }
1540fe2caefcSParav Pandit 
1541f3070e7eSRasmus Villemoes static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1542fe2caefcSParav Pandit {
1543f3070e7eSRasmus Villemoes 	unsigned int i = idx / 32;
1544f3070e7eSRasmus Villemoes 	u32 mask = (1U << (idx % 32));
1545fe2caefcSParav Pandit 
1546ba64fdcaSRasmus Villemoes 	srq->idx_bit_fields[i] ^= mask;
1547fe2caefcSParav Pandit }
1548fe2caefcSParav Pandit 
1549fe2caefcSParav Pandit static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1550fe2caefcSParav Pandit {
155143a6b402SNaresh Gottumukkala 	return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1552fe2caefcSParav Pandit }
1553fe2caefcSParav Pandit 
1554fe2caefcSParav Pandit static int is_hw_sq_empty(struct ocrdma_qp *qp)
1555fe2caefcSParav Pandit {
155643a6b402SNaresh Gottumukkala 	return (qp->sq.tail == qp->sq.head);
1557fe2caefcSParav Pandit }
1558fe2caefcSParav Pandit 
1559fe2caefcSParav Pandit static int is_hw_rq_empty(struct ocrdma_qp *qp)
1560fe2caefcSParav Pandit {
156143a6b402SNaresh Gottumukkala 	return (qp->rq.tail == qp->rq.head);
1562fe2caefcSParav Pandit }
1563fe2caefcSParav Pandit 
1564fe2caefcSParav Pandit static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1565fe2caefcSParav Pandit {
1566fe2caefcSParav Pandit 	return q->va + (q->head * q->entry_size);
1567fe2caefcSParav Pandit }
1568fe2caefcSParav Pandit 
1569fe2caefcSParav Pandit static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1570fe2caefcSParav Pandit 				      u32 idx)
1571fe2caefcSParav Pandit {
1572fe2caefcSParav Pandit 	return q->va + (idx * q->entry_size);
1573fe2caefcSParav Pandit }
1574fe2caefcSParav Pandit 
1575fe2caefcSParav Pandit static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1576fe2caefcSParav Pandit {
1577fe2caefcSParav Pandit 	q->head = (q->head + 1) & q->max_wqe_idx;
1578fe2caefcSParav Pandit }
1579fe2caefcSParav Pandit 
1580fe2caefcSParav Pandit static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1581fe2caefcSParav Pandit {
1582fe2caefcSParav Pandit 	q->tail = (q->tail + 1) & q->max_wqe_idx;
1583fe2caefcSParav Pandit }
1584fe2caefcSParav Pandit 
1585fe2caefcSParav Pandit /* discard the cqe for a given QP */
1586fe2caefcSParav Pandit static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1587fe2caefcSParav Pandit {
1588fe2caefcSParav Pandit 	unsigned long cq_flags;
1589fe2caefcSParav Pandit 	unsigned long flags;
1590fe2caefcSParav Pandit 	int discard_cnt = 0;
1591fe2caefcSParav Pandit 	u32 cur_getp, stop_getp;
1592fe2caefcSParav Pandit 	struct ocrdma_cqe *cqe;
1593cf5788adSSelvin Xavier 	u32 qpn = 0, wqe_idx = 0;
1594fe2caefcSParav Pandit 
1595fe2caefcSParav Pandit 	spin_lock_irqsave(&cq->cq_lock, cq_flags);
1596fe2caefcSParav Pandit 
1597fe2caefcSParav Pandit 	/* traverse through the CQEs in the hw CQ,
1598fe2caefcSParav Pandit 	 * find the matching CQE for a given qp,
1599fe2caefcSParav Pandit 	 * mark the matching one discarded by clearing qpn.
1600fe2caefcSParav Pandit 	 * ring the doorbell in the poll_cq() as
1601fe2caefcSParav Pandit 	 * we don't complete out of order cqe.
1602fe2caefcSParav Pandit 	 */
1603fe2caefcSParav Pandit 
1604fe2caefcSParav Pandit 	cur_getp = cq->getp;
1605fe2caefcSParav Pandit 	/* find upto when do we reap the cq. */
1606fe2caefcSParav Pandit 	stop_getp = cur_getp;
1607fe2caefcSParav Pandit 	do {
1608fe2caefcSParav Pandit 		if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1609fe2caefcSParav Pandit 			break;
1610fe2caefcSParav Pandit 
1611fe2caefcSParav Pandit 		cqe = cq->va + cur_getp;
1612fe2caefcSParav Pandit 		/* if (a) done reaping whole hw cq, or
1613fe2caefcSParav Pandit 		 *    (b) qp_xq becomes empty.
1614fe2caefcSParav Pandit 		 * then exit
1615fe2caefcSParav Pandit 		 */
1616fe2caefcSParav Pandit 		qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1617fe2caefcSParav Pandit 		/* if previously discarded cqe found, skip that too. */
1618fe2caefcSParav Pandit 		/* check for matching qp */
1619fe2caefcSParav Pandit 		if (qpn == 0 || qpn != qp->id)
1620fe2caefcSParav Pandit 			goto skip_cqe;
1621fe2caefcSParav Pandit 
1622f99b1649SNaresh Gottumukkala 		if (is_cqe_for_sq(cqe)) {
1623fe2caefcSParav Pandit 			ocrdma_hwq_inc_tail(&qp->sq);
1624f99b1649SNaresh Gottumukkala 		} else {
1625fe2caefcSParav Pandit 			if (qp->srq) {
1626cf5788adSSelvin Xavier 				wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1627cf5788adSSelvin Xavier 					OCRDMA_CQE_BUFTAG_SHIFT) &
1628cf5788adSSelvin Xavier 					qp->srq->rq.max_wqe_idx;
1629db287ec5Sssh10 				BUG_ON(wqe_idx < 1);
1630fe2caefcSParav Pandit 				spin_lock_irqsave(&qp->srq->q_lock, flags);
1631fe2caefcSParav Pandit 				ocrdma_hwq_inc_tail(&qp->srq->rq);
1632cf5788adSSelvin Xavier 				ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1633fe2caefcSParav Pandit 				spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1634fe2caefcSParav Pandit 
1635f99b1649SNaresh Gottumukkala 			} else {
1636fe2caefcSParav Pandit 				ocrdma_hwq_inc_tail(&qp->rq);
1637fe2caefcSParav Pandit 			}
1638f99b1649SNaresh Gottumukkala 		}
1639cf5788adSSelvin Xavier 		/* mark cqe discarded so that it is not picked up later
1640cf5788adSSelvin Xavier 		 * in the poll_cq().
1641cf5788adSSelvin Xavier 		 */
1642cf5788adSSelvin Xavier 		discard_cnt += 1;
1643cf5788adSSelvin Xavier 		cqe->cmn.qpn = 0;
1644fe2caefcSParav Pandit skip_cqe:
1645fe2caefcSParav Pandit 		cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1646fe2caefcSParav Pandit 	} while (cur_getp != stop_getp);
1647fe2caefcSParav Pandit 	spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1648fe2caefcSParav Pandit }
1649fe2caefcSParav Pandit 
1650f11220eeSNaresh Gottumukkala void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1651fe2caefcSParav Pandit {
1652fe2caefcSParav Pandit 	int found = false;
1653fe2caefcSParav Pandit 	unsigned long flags;
1654d2b8f7b1SMitesh Ahuja 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1655fe2caefcSParav Pandit 	/* sync with any active CQ poll */
1656fe2caefcSParav Pandit 
1657fe2caefcSParav Pandit 	spin_lock_irqsave(&dev->flush_q_lock, flags);
1658fe2caefcSParav Pandit 	found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1659fe2caefcSParav Pandit 	if (found)
1660fe2caefcSParav Pandit 		list_del(&qp->sq_entry);
1661fe2caefcSParav Pandit 	if (!qp->srq) {
1662fe2caefcSParav Pandit 		found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1663fe2caefcSParav Pandit 		if (found)
1664fe2caefcSParav Pandit 			list_del(&qp->rq_entry);
1665fe2caefcSParav Pandit 	}
1666fe2caefcSParav Pandit 	spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1667fe2caefcSParav Pandit }
1668fe2caefcSParav Pandit 
1669c4367a26SShamir Rabinovitch int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1670fe2caefcSParav Pandit {
1671fe2caefcSParav Pandit 	struct ocrdma_pd *pd;
1672fe2caefcSParav Pandit 	struct ocrdma_qp *qp;
1673fe2caefcSParav Pandit 	struct ocrdma_dev *dev;
1674fe2caefcSParav Pandit 	struct ib_qp_attr attrs;
1675fe48822bSDevesh Sharma 	int attr_mask;
1676d19081e0SDan Carpenter 	unsigned long flags;
1677fe2caefcSParav Pandit 
1678fe2caefcSParav Pandit 	qp = get_ocrdma_qp(ibqp);
1679d2b8f7b1SMitesh Ahuja 	dev = get_ocrdma_dev(ibqp->device);
1680fe2caefcSParav Pandit 
1681fe2caefcSParav Pandit 	pd = qp->pd;
1682fe2caefcSParav Pandit 
1683fe2caefcSParav Pandit 	/* change the QP state to ERROR */
1684fe48822bSDevesh Sharma 	if (qp->state != OCRDMA_QPS_RST) {
1685fe48822bSDevesh Sharma 		attrs.qp_state = IB_QPS_ERR;
1686fe48822bSDevesh Sharma 		attr_mask = IB_QP_STATE;
1687fe2caefcSParav Pandit 		_ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1688fe48822bSDevesh Sharma 	}
1689fe2caefcSParav Pandit 	/* ensure that CQEs for newly created QP (whose id may be same with
1690fe2caefcSParav Pandit 	 * one which just getting destroyed are same), dont get
1691fe2caefcSParav Pandit 	 * discarded until the old CQEs are discarded.
1692fe2caefcSParav Pandit 	 */
1693fe2caefcSParav Pandit 	mutex_lock(&dev->dev_lock);
16944b8180aaSMitesh Ahuja 	(void) ocrdma_mbx_destroy_qp(dev, qp);
1695fe2caefcSParav Pandit 
1696fe2caefcSParav Pandit 	/*
1697fe2caefcSParav Pandit 	 * acquire CQ lock while destroy is in progress, in order to
1698fe2caefcSParav Pandit 	 * protect against proessing in-flight CQEs for this QP.
1699fe2caefcSParav Pandit 	 */
1700d19081e0SDan Carpenter 	spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1701beae9eb5SBart Van Assche 	if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
1702d19081e0SDan Carpenter 		spin_lock(&qp->rq_cq->cq_lock);
1703fe2caefcSParav Pandit 		ocrdma_del_qpn_map(dev, qp);
1704d19081e0SDan Carpenter 		spin_unlock(&qp->rq_cq->cq_lock);
1705beae9eb5SBart Van Assche 	} else {
1706beae9eb5SBart Van Assche 		ocrdma_del_qpn_map(dev, qp);
1707beae9eb5SBart Van Assche 	}
1708d19081e0SDan Carpenter 	spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1709fe2caefcSParav Pandit 
1710fe2caefcSParav Pandit 	if (!pd->uctx) {
1711fe2caefcSParav Pandit 		ocrdma_discard_cqes(qp, qp->sq_cq);
1712fe2caefcSParav Pandit 		ocrdma_discard_cqes(qp, qp->rq_cq);
1713fe2caefcSParav Pandit 	}
1714fe2caefcSParav Pandit 	mutex_unlock(&dev->dev_lock);
1715fe2caefcSParav Pandit 
1716fe2caefcSParav Pandit 	if (pd->uctx) {
171743a6b402SNaresh Gottumukkala 		ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
171843a6b402SNaresh Gottumukkala 				PAGE_ALIGN(qp->sq.len));
1719fe2caefcSParav Pandit 		if (!qp->srq)
172043a6b402SNaresh Gottumukkala 			ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
172143a6b402SNaresh Gottumukkala 					PAGE_ALIGN(qp->rq.len));
1722fe2caefcSParav Pandit 	}
1723fe2caefcSParav Pandit 
1724fe2caefcSParav Pandit 	ocrdma_del_flush_qp(qp);
1725fe2caefcSParav Pandit 
1726fe2caefcSParav Pandit 	kfree(qp->wqe_wr_id_tbl);
1727fe2caefcSParav Pandit 	kfree(qp->rqe_wr_id_tbl);
1728fe2caefcSParav Pandit 	kfree(qp);
17294b8180aaSMitesh Ahuja 	return 0;
1730fe2caefcSParav Pandit }
1731fe2caefcSParav Pandit 
17321afc0454SNaresh Gottumukkala static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
17331afc0454SNaresh Gottumukkala 				struct ib_udata *udata)
1734fe2caefcSParav Pandit {
1735fe2caefcSParav Pandit 	int status;
1736fe2caefcSParav Pandit 	struct ocrdma_create_srq_uresp uresp;
1737fe2caefcSParav Pandit 
173863ea3749SDan Carpenter 	memset(&uresp, 0, sizeof(uresp));
1739fe2caefcSParav Pandit 	uresp.rq_dbid = srq->rq.dbid;
1740fe2caefcSParav Pandit 	uresp.num_rq_pages = 1;
17411b76d383SDevesh Sharma 	uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1742fe2caefcSParav Pandit 	uresp.rq_page_size = srq->rq.len;
17431afc0454SNaresh Gottumukkala 	uresp.db_page_addr = dev->nic_info.unmapped_db +
17441afc0454SNaresh Gottumukkala 	    (srq->pd->id * dev->nic_info.db_page_size);
17451afc0454SNaresh Gottumukkala 	uresp.db_page_size = dev->nic_info.db_page_size;
1746fe2caefcSParav Pandit 	uresp.num_rqe_allocated = srq->rq.max_cnt;
174721c3391aSDevesh Sharma 	if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1748f11220eeSNaresh Gottumukkala 		uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1749fe2caefcSParav Pandit 		uresp.db_shift = 24;
1750fe2caefcSParav Pandit 	} else {
1751fe2caefcSParav Pandit 		uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1752fe2caefcSParav Pandit 		uresp.db_shift = 16;
1753fe2caefcSParav Pandit 	}
1754fe2caefcSParav Pandit 
1755fe2caefcSParav Pandit 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1756fe2caefcSParav Pandit 	if (status)
1757fe2caefcSParav Pandit 		return status;
1758fe2caefcSParav Pandit 	status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1759fe2caefcSParav Pandit 				 uresp.rq_page_size);
1760fe2caefcSParav Pandit 	if (status)
1761fe2caefcSParav Pandit 		return status;
1762fe2caefcSParav Pandit 	return status;
1763fe2caefcSParav Pandit }
1764fe2caefcSParav Pandit 
176568e326deSLeon Romanovsky int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1766fe2caefcSParav Pandit 		      struct ib_udata *udata)
1767fe2caefcSParav Pandit {
176868e326deSLeon Romanovsky 	int status;
176968e326deSLeon Romanovsky 	struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd);
177068e326deSLeon Romanovsky 	struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
177168e326deSLeon Romanovsky 	struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
1772fe2caefcSParav Pandit 
1773fe2caefcSParav Pandit 	if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
177468e326deSLeon Romanovsky 		return -EINVAL;
1775fe2caefcSParav Pandit 	if (init_attr->attr.max_wr > dev->attr.max_rqe)
177668e326deSLeon Romanovsky 		return -EINVAL;
1777fe2caefcSParav Pandit 
1778fe2caefcSParav Pandit 	spin_lock_init(&srq->q_lock);
1779fe2caefcSParav Pandit 	srq->pd = pd;
1780fe2caefcSParav Pandit 	srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
17811afc0454SNaresh Gottumukkala 	status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1782fe2caefcSParav Pandit 	if (status)
178368e326deSLeon Romanovsky 		return status;
1784fe2caefcSParav Pandit 
178568e326deSLeon Romanovsky 	if (!udata) {
17866396bb22SKees Cook 		srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
1787fe2caefcSParav Pandit 					     GFP_KERNEL);
178868e326deSLeon Romanovsky 		if (!srq->rqe_wr_id_tbl) {
178968e326deSLeon Romanovsky 			status = -ENOMEM;
1790fe2caefcSParav Pandit 			goto arm_err;
179168e326deSLeon Romanovsky 		}
1792fe2caefcSParav Pandit 
1793fe2caefcSParav Pandit 		srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1794fe2caefcSParav Pandit 		    (srq->rq.max_cnt % 32 ? 1 : 0);
1795fe2caefcSParav Pandit 		srq->idx_bit_fields =
17966da2ec56SKees Cook 		    kmalloc_array(srq->bit_fields_len, sizeof(u32),
17976da2ec56SKees Cook 				  GFP_KERNEL);
179868e326deSLeon Romanovsky 		if (!srq->idx_bit_fields) {
179968e326deSLeon Romanovsky 			status = -ENOMEM;
1800fe2caefcSParav Pandit 			goto arm_err;
180168e326deSLeon Romanovsky 		}
1802fe2caefcSParav Pandit 		memset(srq->idx_bit_fields, 0xff,
1803fe2caefcSParav Pandit 		       srq->bit_fields_len * sizeof(u32));
1804fe2caefcSParav Pandit 	}
1805fe2caefcSParav Pandit 
1806fe2caefcSParav Pandit 	if (init_attr->attr.srq_limit) {
1807fe2caefcSParav Pandit 		status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1808fe2caefcSParav Pandit 		if (status)
1809fe2caefcSParav Pandit 			goto arm_err;
1810fe2caefcSParav Pandit 	}
1811fe2caefcSParav Pandit 
1812fe2caefcSParav Pandit 	if (udata) {
18131afc0454SNaresh Gottumukkala 		status = ocrdma_copy_srq_uresp(dev, srq, udata);
1814fe2caefcSParav Pandit 		if (status)
1815fe2caefcSParav Pandit 			goto arm_err;
1816fe2caefcSParav Pandit 	}
1817fe2caefcSParav Pandit 
181868e326deSLeon Romanovsky 	return 0;
1819fe2caefcSParav Pandit 
1820fe2caefcSParav Pandit arm_err:
1821fe2caefcSParav Pandit 	ocrdma_mbx_destroy_srq(dev, srq);
1822fe2caefcSParav Pandit 	kfree(srq->rqe_wr_id_tbl);
1823fe2caefcSParav Pandit 	kfree(srq->idx_bit_fields);
182468e326deSLeon Romanovsky 	return status;
1825fe2caefcSParav Pandit }
1826fe2caefcSParav Pandit 
1827fe2caefcSParav Pandit int ocrdma_modify_srq(struct ib_srq *ibsrq,
1828fe2caefcSParav Pandit 		      struct ib_srq_attr *srq_attr,
1829fe2caefcSParav Pandit 		      enum ib_srq_attr_mask srq_attr_mask,
1830fe2caefcSParav Pandit 		      struct ib_udata *udata)
1831fe2caefcSParav Pandit {
18320ca4c39fSMarkus Elfring 	int status;
1833fe2caefcSParav Pandit 	struct ocrdma_srq *srq;
1834fe2caefcSParav Pandit 
1835fe2caefcSParav Pandit 	srq = get_ocrdma_srq(ibsrq);
1836fe2caefcSParav Pandit 	if (srq_attr_mask & IB_SRQ_MAX_WR)
1837fe2caefcSParav Pandit 		status = -EINVAL;
1838fe2caefcSParav Pandit 	else
1839fe2caefcSParav Pandit 		status = ocrdma_mbx_modify_srq(srq, srq_attr);
1840fe2caefcSParav Pandit 	return status;
1841fe2caefcSParav Pandit }
1842fe2caefcSParav Pandit 
1843fe2caefcSParav Pandit int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1844fe2caefcSParav Pandit {
1845fe2caefcSParav Pandit 	int status;
1846fe2caefcSParav Pandit 	struct ocrdma_srq *srq;
1847fe2caefcSParav Pandit 
1848fe2caefcSParav Pandit 	srq = get_ocrdma_srq(ibsrq);
1849fe2caefcSParav Pandit 	status = ocrdma_mbx_query_srq(srq, srq_attr);
1850fe2caefcSParav Pandit 	return status;
1851fe2caefcSParav Pandit }
1852fe2caefcSParav Pandit 
1853119181d1SLeon Romanovsky int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1854fe2caefcSParav Pandit {
1855fe2caefcSParav Pandit 	struct ocrdma_srq *srq;
18561afc0454SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1857fe2caefcSParav Pandit 
1858fe2caefcSParav Pandit 	srq = get_ocrdma_srq(ibsrq);
1859fe2caefcSParav Pandit 
186068e326deSLeon Romanovsky 	ocrdma_mbx_destroy_srq(dev, srq);
1861fe2caefcSParav Pandit 
1862fe2caefcSParav Pandit 	if (srq->pd->uctx)
186343a6b402SNaresh Gottumukkala 		ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
186443a6b402SNaresh Gottumukkala 				PAGE_ALIGN(srq->rq.len));
1865fe2caefcSParav Pandit 
1866fe2caefcSParav Pandit 	kfree(srq->idx_bit_fields);
1867fe2caefcSParav Pandit 	kfree(srq->rqe_wr_id_tbl);
1868119181d1SLeon Romanovsky 	return 0;
1869fe2caefcSParav Pandit }
1870fe2caefcSParav Pandit 
1871fe2caefcSParav Pandit /* unprivileged verbs and their support functions. */
1872fe2caefcSParav Pandit static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1873fe2caefcSParav Pandit 				struct ocrdma_hdr_wqe *hdr,
1874f696bf6dSBart Van Assche 				const struct ib_send_wr *wr)
1875fe2caefcSParav Pandit {
1876fe2caefcSParav Pandit 	struct ocrdma_ewqe_ud_hdr *ud_hdr =
1877fe2caefcSParav Pandit 		(struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1878e622f2f4SChristoph Hellwig 	struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
1879fe2caefcSParav Pandit 
1880e622f2f4SChristoph Hellwig 	ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
1881fe2caefcSParav Pandit 	if (qp->qp_type == IB_QPT_GSI)
1882fe2caefcSParav Pandit 		ud_hdr->qkey = qp->qkey;
1883fe2caefcSParav Pandit 	else
1884e622f2f4SChristoph Hellwig 		ud_hdr->qkey = ud_wr(wr)->remote_qkey;
1885fe2caefcSParav Pandit 	ud_hdr->rsvd_ahid = ah->id;
18866b062667SDevesh Sharma 	ud_hdr->hdr_type = ah->hdr_type;
188729565f2fSDevesh Sharma 	if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
188829565f2fSDevesh Sharma 		hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1889fe2caefcSParav Pandit }
1890fe2caefcSParav Pandit 
1891fe2caefcSParav Pandit static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1892fe2caefcSParav Pandit 			      struct ocrdma_sge *sge, int num_sge,
1893fe2caefcSParav Pandit 			      struct ib_sge *sg_list)
1894fe2caefcSParav Pandit {
1895fe2caefcSParav Pandit 	int i;
1896fe2caefcSParav Pandit 
1897fe2caefcSParav Pandit 	for (i = 0; i < num_sge; i++) {
1898fe2caefcSParav Pandit 		sge[i].lrkey = sg_list[i].lkey;
1899fe2caefcSParav Pandit 		sge[i].addr_lo = sg_list[i].addr;
1900fe2caefcSParav Pandit 		sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1901fe2caefcSParav Pandit 		sge[i].len = sg_list[i].length;
1902fe2caefcSParav Pandit 		hdr->total_len += sg_list[i].length;
1903fe2caefcSParav Pandit 	}
1904fe2caefcSParav Pandit 	if (num_sge == 0)
1905fe2caefcSParav Pandit 		memset(sge, 0, sizeof(*sge));
1906fe2caefcSParav Pandit }
1907fe2caefcSParav Pandit 
1908117e6dd1SNaresh Gottumukkala static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1909117e6dd1SNaresh Gottumukkala {
1910117e6dd1SNaresh Gottumukkala 	uint32_t total_len = 0, i;
1911117e6dd1SNaresh Gottumukkala 
1912117e6dd1SNaresh Gottumukkala 	for (i = 0; i < num_sge; i++)
1913117e6dd1SNaresh Gottumukkala 		total_len += sg_list[i].length;
1914117e6dd1SNaresh Gottumukkala 	return total_len;
1915117e6dd1SNaresh Gottumukkala }
1916117e6dd1SNaresh Gottumukkala 
1917117e6dd1SNaresh Gottumukkala 
1918fe2caefcSParav Pandit static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1919fe2caefcSParav Pandit 				    struct ocrdma_hdr_wqe *hdr,
1920fe2caefcSParav Pandit 				    struct ocrdma_sge *sge,
1921f696bf6dSBart Van Assche 				    const struct ib_send_wr *wr, u32 wqe_size)
1922fe2caefcSParav Pandit {
1923117e6dd1SNaresh Gottumukkala 	int i;
1924117e6dd1SNaresh Gottumukkala 	char *dpp_addr;
1925117e6dd1SNaresh Gottumukkala 
192643a6b402SNaresh Gottumukkala 	if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1927117e6dd1SNaresh Gottumukkala 		hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1928117e6dd1SNaresh Gottumukkala 		if (unlikely(hdr->total_len > qp->max_inline_data)) {
1929ef99c4c2SNaresh Gottumukkala 			pr_err("%s() supported_len=0x%x,\n"
19301a84db56SMasanari Iida 			       " unsupported len req=0x%x\n", __func__,
1931117e6dd1SNaresh Gottumukkala 				qp->max_inline_data, hdr->total_len);
1932fe2caefcSParav Pandit 			return -EINVAL;
1933fe2caefcSParav Pandit 		}
1934117e6dd1SNaresh Gottumukkala 		dpp_addr = (char *)sge;
1935117e6dd1SNaresh Gottumukkala 		for (i = 0; i < wr->num_sge; i++) {
1936117e6dd1SNaresh Gottumukkala 			memcpy(dpp_addr,
1937117e6dd1SNaresh Gottumukkala 			       (void *)(unsigned long)wr->sg_list[i].addr,
1938117e6dd1SNaresh Gottumukkala 			       wr->sg_list[i].length);
1939117e6dd1SNaresh Gottumukkala 			dpp_addr += wr->sg_list[i].length;
1940117e6dd1SNaresh Gottumukkala 		}
1941117e6dd1SNaresh Gottumukkala 
1942fe2caefcSParav Pandit 		wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1943117e6dd1SNaresh Gottumukkala 		if (0 == hdr->total_len)
194443a6b402SNaresh Gottumukkala 			wqe_size += sizeof(struct ocrdma_sge);
1945fe2caefcSParav Pandit 		hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1946fe2caefcSParav Pandit 	} else {
1947fe2caefcSParav Pandit 		ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1948fe2caefcSParav Pandit 		if (wr->num_sge)
1949fe2caefcSParav Pandit 			wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1950fe2caefcSParav Pandit 		else
1951fe2caefcSParav Pandit 			wqe_size += sizeof(struct ocrdma_sge);
1952fe2caefcSParav Pandit 		hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1953fe2caefcSParav Pandit 	}
1954fe2caefcSParav Pandit 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1955fe2caefcSParav Pandit 	return 0;
1956fe2caefcSParav Pandit }
1957fe2caefcSParav Pandit 
1958fe2caefcSParav Pandit static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1959f696bf6dSBart Van Assche 			     const struct ib_send_wr *wr)
1960fe2caefcSParav Pandit {
1961fe2caefcSParav Pandit 	int status;
1962fe2caefcSParav Pandit 	struct ocrdma_sge *sge;
1963fe2caefcSParav Pandit 	u32 wqe_size = sizeof(*hdr);
1964fe2caefcSParav Pandit 
1965fe2caefcSParav Pandit 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1966fe2caefcSParav Pandit 		ocrdma_build_ud_hdr(qp, hdr, wr);
1967fe2caefcSParav Pandit 		sge = (struct ocrdma_sge *)(hdr + 2);
1968fe2caefcSParav Pandit 		wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
1969f99b1649SNaresh Gottumukkala 	} else {
1970fe2caefcSParav Pandit 		sge = (struct ocrdma_sge *)(hdr + 1);
1971f99b1649SNaresh Gottumukkala 	}
1972fe2caefcSParav Pandit 
1973fe2caefcSParav Pandit 	status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1974fe2caefcSParav Pandit 	return status;
1975fe2caefcSParav Pandit }
1976fe2caefcSParav Pandit 
1977fe2caefcSParav Pandit static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1978f696bf6dSBart Van Assche 			      const struct ib_send_wr *wr)
1979fe2caefcSParav Pandit {
1980fe2caefcSParav Pandit 	int status;
1981fe2caefcSParav Pandit 	struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1982fe2caefcSParav Pandit 	struct ocrdma_sge *sge = ext_rw + 1;
1983fe2caefcSParav Pandit 	u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
1984fe2caefcSParav Pandit 
1985fe2caefcSParav Pandit 	status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1986fe2caefcSParav Pandit 	if (status)
1987fe2caefcSParav Pandit 		return status;
1988e622f2f4SChristoph Hellwig 	ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
1989e622f2f4SChristoph Hellwig 	ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
1990e622f2f4SChristoph Hellwig 	ext_rw->lrkey = rdma_wr(wr)->rkey;
1991fe2caefcSParav Pandit 	ext_rw->len = hdr->total_len;
1992fe2caefcSParav Pandit 	return 0;
1993fe2caefcSParav Pandit }
1994fe2caefcSParav Pandit 
1995fe2caefcSParav Pandit static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1996f696bf6dSBart Van Assche 			      const struct ib_send_wr *wr)
1997fe2caefcSParav Pandit {
1998fe2caefcSParav Pandit 	struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1999fe2caefcSParav Pandit 	struct ocrdma_sge *sge = ext_rw + 1;
2000fe2caefcSParav Pandit 	u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2001fe2caefcSParav Pandit 	    sizeof(struct ocrdma_hdr_wqe);
2002fe2caefcSParav Pandit 
2003fe2caefcSParav Pandit 	ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2004fe2caefcSParav Pandit 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2005fe2caefcSParav Pandit 	hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2006fe2caefcSParav Pandit 	hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2007fe2caefcSParav Pandit 
2008e622f2f4SChristoph Hellwig 	ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2009e622f2f4SChristoph Hellwig 	ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2010e622f2f4SChristoph Hellwig 	ext_rw->lrkey = rdma_wr(wr)->rkey;
2011fe2caefcSParav Pandit 	ext_rw->len = hdr->total_len;
2012fe2caefcSParav Pandit }
2013fe2caefcSParav Pandit 
20147c33880cSNaresh Gottumukkala static int get_encoded_page_size(int pg_sz)
20157c33880cSNaresh Gottumukkala {
20167c33880cSNaresh Gottumukkala 	/* Max size is 256M 4096 << 16 */
20177c33880cSNaresh Gottumukkala 	int i = 0;
20187c33880cSNaresh Gottumukkala 	for (; i < 17; i++)
20197c33880cSNaresh Gottumukkala 		if (pg_sz == (4096 << i))
20207c33880cSNaresh Gottumukkala 			break;
20217c33880cSNaresh Gottumukkala 	return i;
20227c33880cSNaresh Gottumukkala }
20237c33880cSNaresh Gottumukkala 
20242eaa1c56SSagi Grimberg static int ocrdma_build_reg(struct ocrdma_qp *qp,
20252eaa1c56SSagi Grimberg 			    struct ocrdma_hdr_wqe *hdr,
2026f696bf6dSBart Van Assche 			    const struct ib_reg_wr *wr)
20272eaa1c56SSagi Grimberg {
20282eaa1c56SSagi Grimberg 	u64 fbo;
20292eaa1c56SSagi Grimberg 	struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
20302eaa1c56SSagi Grimberg 	struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
20312eaa1c56SSagi Grimberg 	struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
20322eaa1c56SSagi Grimberg 	struct ocrdma_pbe *pbe;
20332eaa1c56SSagi Grimberg 	u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
20342eaa1c56SSagi Grimberg 	int num_pbes = 0, i;
20352eaa1c56SSagi Grimberg 
20362eaa1c56SSagi Grimberg 	wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
20372eaa1c56SSagi Grimberg 
20382eaa1c56SSagi Grimberg 	hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
20392eaa1c56SSagi Grimberg 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
20402eaa1c56SSagi Grimberg 
20412eaa1c56SSagi Grimberg 	if (wr->access & IB_ACCESS_LOCAL_WRITE)
20422eaa1c56SSagi Grimberg 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
20432eaa1c56SSagi Grimberg 	if (wr->access & IB_ACCESS_REMOTE_WRITE)
20442eaa1c56SSagi Grimberg 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
20452eaa1c56SSagi Grimberg 	if (wr->access & IB_ACCESS_REMOTE_READ)
20462eaa1c56SSagi Grimberg 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
20472eaa1c56SSagi Grimberg 	hdr->lkey = wr->key;
20482eaa1c56SSagi Grimberg 	hdr->total_len = mr->ibmr.length;
20492eaa1c56SSagi Grimberg 
20502eaa1c56SSagi Grimberg 	fbo = mr->ibmr.iova - mr->pages[0];
20512eaa1c56SSagi Grimberg 
20522eaa1c56SSagi Grimberg 	fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
20532eaa1c56SSagi Grimberg 	fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
20542eaa1c56SSagi Grimberg 	fast_reg->fbo_hi = upper_32_bits(fbo);
20552eaa1c56SSagi Grimberg 	fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
20562eaa1c56SSagi Grimberg 	fast_reg->num_sges = mr->npages;
20572eaa1c56SSagi Grimberg 	fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
20582eaa1c56SSagi Grimberg 
20592eaa1c56SSagi Grimberg 	pbe = pbl_tbl->va;
20602eaa1c56SSagi Grimberg 	for (i = 0; i < mr->npages; i++) {
20612eaa1c56SSagi Grimberg 		u64 buf_addr = mr->pages[i];
20622eaa1c56SSagi Grimberg 
20632eaa1c56SSagi Grimberg 		pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
20642eaa1c56SSagi Grimberg 		pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
20652eaa1c56SSagi Grimberg 		num_pbes += 1;
20662eaa1c56SSagi Grimberg 		pbe++;
20672eaa1c56SSagi Grimberg 
20682eaa1c56SSagi Grimberg 		/* if the pbl is full storing the pbes,
20692eaa1c56SSagi Grimberg 		 * move to next pbl.
20702eaa1c56SSagi Grimberg 		*/
20712eaa1c56SSagi Grimberg 		if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
20722eaa1c56SSagi Grimberg 			pbl_tbl++;
20732eaa1c56SSagi Grimberg 			pbe = (struct ocrdma_pbe *)pbl_tbl->va;
20742eaa1c56SSagi Grimberg 		}
20752eaa1c56SSagi Grimberg 	}
20762eaa1c56SSagi Grimberg 
20772eaa1c56SSagi Grimberg 	return 0;
20782eaa1c56SSagi Grimberg }
20797c33880cSNaresh Gottumukkala 
2080fe2caefcSParav Pandit static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2081fe2caefcSParav Pandit {
20822df84fa8SDevesh Sharma 	u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2083fe2caefcSParav Pandit 
2084fe2caefcSParav Pandit 	iowrite32(val, qp->sq_db);
2085fe2caefcSParav Pandit }
2086fe2caefcSParav Pandit 
2087d34ac5cdSBart Van Assche int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2088d34ac5cdSBart Van Assche 		     const struct ib_send_wr **bad_wr)
2089fe2caefcSParav Pandit {
2090fe2caefcSParav Pandit 	int status = 0;
2091fe2caefcSParav Pandit 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2092fe2caefcSParav Pandit 	struct ocrdma_hdr_wqe *hdr;
2093fe2caefcSParav Pandit 	unsigned long flags;
2094fe2caefcSParav Pandit 
2095fe2caefcSParav Pandit 	spin_lock_irqsave(&qp->q_lock, flags);
2096fe2caefcSParav Pandit 	if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2097fe2caefcSParav Pandit 		spin_unlock_irqrestore(&qp->q_lock, flags);
2098f6ddcf71SNaresh Gottumukkala 		*bad_wr = wr;
2099fe2caefcSParav Pandit 		return -EINVAL;
2100fe2caefcSParav Pandit 	}
2101fe2caefcSParav Pandit 
2102fe2caefcSParav Pandit 	while (wr) {
2103f252b5dcSMitesh Ahuja 		if (qp->qp_type == IB_QPT_UD &&
2104f252b5dcSMitesh Ahuja 		    (wr->opcode != IB_WR_SEND &&
2105f252b5dcSMitesh Ahuja 		     wr->opcode != IB_WR_SEND_WITH_IMM)) {
2106f252b5dcSMitesh Ahuja 			*bad_wr = wr;
2107f252b5dcSMitesh Ahuja 			status = -EINVAL;
2108f252b5dcSMitesh Ahuja 			break;
2109f252b5dcSMitesh Ahuja 		}
2110fe2caefcSParav Pandit 		if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2111fe2caefcSParav Pandit 		    wr->num_sge > qp->sq.max_sges) {
2112f6ddcf71SNaresh Gottumukkala 			*bad_wr = wr;
2113fe2caefcSParav Pandit 			status = -ENOMEM;
2114fe2caefcSParav Pandit 			break;
2115fe2caefcSParav Pandit 		}
2116fe2caefcSParav Pandit 		hdr = ocrdma_hwq_head(&qp->sq);
2117fe2caefcSParav Pandit 		hdr->cw = 0;
21182b51a9b9SNaresh Gottumukkala 		if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2119fe2caefcSParav Pandit 			hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2120fe2caefcSParav Pandit 		if (wr->send_flags & IB_SEND_FENCE)
2121fe2caefcSParav Pandit 			hdr->cw |=
2122fe2caefcSParav Pandit 			    (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2123fe2caefcSParav Pandit 		if (wr->send_flags & IB_SEND_SOLICITED)
2124fe2caefcSParav Pandit 			hdr->cw |=
2125fe2caefcSParav Pandit 			    (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2126fe2caefcSParav Pandit 		hdr->total_len = 0;
2127fe2caefcSParav Pandit 		switch (wr->opcode) {
2128fe2caefcSParav Pandit 		case IB_WR_SEND_WITH_IMM:
2129fe2caefcSParav Pandit 			hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2130fe2caefcSParav Pandit 			hdr->immdt = ntohl(wr->ex.imm_data);
2131df561f66SGustavo A. R. Silva 			fallthrough;
2132fe2caefcSParav Pandit 		case IB_WR_SEND:
2133fe2caefcSParav Pandit 			hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2134fe2caefcSParav Pandit 			ocrdma_build_send(qp, hdr, wr);
2135fe2caefcSParav Pandit 			break;
2136fe2caefcSParav Pandit 		case IB_WR_SEND_WITH_INV:
2137fe2caefcSParav Pandit 			hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2138fe2caefcSParav Pandit 			hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2139fe2caefcSParav Pandit 			hdr->lkey = wr->ex.invalidate_rkey;
2140fe2caefcSParav Pandit 			status = ocrdma_build_send(qp, hdr, wr);
2141fe2caefcSParav Pandit 			break;
2142fe2caefcSParav Pandit 		case IB_WR_RDMA_WRITE_WITH_IMM:
2143fe2caefcSParav Pandit 			hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2144fe2caefcSParav Pandit 			hdr->immdt = ntohl(wr->ex.imm_data);
2145df561f66SGustavo A. R. Silva 			fallthrough;
2146fe2caefcSParav Pandit 		case IB_WR_RDMA_WRITE:
2147fe2caefcSParav Pandit 			hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2148fe2caefcSParav Pandit 			status = ocrdma_build_write(qp, hdr, wr);
2149fe2caefcSParav Pandit 			break;
2150fe2caefcSParav Pandit 		case IB_WR_RDMA_READ:
2151fe2caefcSParav Pandit 			ocrdma_build_read(qp, hdr, wr);
2152fe2caefcSParav Pandit 			break;
2153fe2caefcSParav Pandit 		case IB_WR_LOCAL_INV:
2154fe2caefcSParav Pandit 			hdr->cw |=
2155fe2caefcSParav Pandit 			    (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
21567c33880cSNaresh Gottumukkala 			hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
21577c33880cSNaresh Gottumukkala 					sizeof(struct ocrdma_sge)) /
2158fe2caefcSParav Pandit 				OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2159fe2caefcSParav Pandit 			hdr->lkey = wr->ex.invalidate_rkey;
2160fe2caefcSParav Pandit 			break;
21612eaa1c56SSagi Grimberg 		case IB_WR_REG_MR:
21622eaa1c56SSagi Grimberg 			status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
21632eaa1c56SSagi Grimberg 			break;
2164fe2caefcSParav Pandit 		default:
2165fe2caefcSParav Pandit 			status = -EINVAL;
2166fe2caefcSParav Pandit 			break;
2167fe2caefcSParav Pandit 		}
2168fe2caefcSParav Pandit 		if (status) {
2169fe2caefcSParav Pandit 			*bad_wr = wr;
2170fe2caefcSParav Pandit 			break;
2171fe2caefcSParav Pandit 		}
21722b51a9b9SNaresh Gottumukkala 		if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2173fe2caefcSParav Pandit 			qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2174fe2caefcSParav Pandit 		else
2175fe2caefcSParav Pandit 			qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2176fe2caefcSParav Pandit 		qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2177fe2caefcSParav Pandit 		ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2178fe2caefcSParav Pandit 				   OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2179fe2caefcSParav Pandit 		/* make sure wqe is written before adapter can access it */
2180fe2caefcSParav Pandit 		wmb();
2181fe2caefcSParav Pandit 		/* inform hw to start processing it */
2182fe2caefcSParav Pandit 		ocrdma_ring_sq_db(qp);
2183fe2caefcSParav Pandit 
2184fe2caefcSParav Pandit 		/* update pointer, counter for next wr */
2185fe2caefcSParav Pandit 		ocrdma_hwq_inc_head(&qp->sq);
2186fe2caefcSParav Pandit 		wr = wr->next;
2187fe2caefcSParav Pandit 	}
2188fe2caefcSParav Pandit 	spin_unlock_irqrestore(&qp->q_lock, flags);
2189fe2caefcSParav Pandit 	return status;
2190fe2caefcSParav Pandit }
2191fe2caefcSParav Pandit 
2192fe2caefcSParav Pandit static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2193fe2caefcSParav Pandit {
21942df84fa8SDevesh Sharma 	u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2195fe2caefcSParav Pandit 
2196fe2caefcSParav Pandit 	iowrite32(val, qp->rq_db);
2197fe2caefcSParav Pandit }
2198fe2caefcSParav Pandit 
2199d34ac5cdSBart Van Assche static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
2200d34ac5cdSBart Van Assche 			     const struct ib_recv_wr *wr, u16 tag)
2201fe2caefcSParav Pandit {
2202fe2caefcSParav Pandit 	u32 wqe_size = 0;
2203fe2caefcSParav Pandit 	struct ocrdma_sge *sge;
2204fe2caefcSParav Pandit 	if (wr->num_sge)
2205fe2caefcSParav Pandit 		wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2206fe2caefcSParav Pandit 	else
2207fe2caefcSParav Pandit 		wqe_size = sizeof(*sge) + sizeof(*rqe);
2208fe2caefcSParav Pandit 
2209fe2caefcSParav Pandit 	rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2210fe2caefcSParav Pandit 				OCRDMA_WQE_SIZE_SHIFT);
2211fe2caefcSParav Pandit 	rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2212fe2caefcSParav Pandit 	rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2213fe2caefcSParav Pandit 	rqe->total_len = 0;
2214fe2caefcSParav Pandit 	rqe->rsvd_tag = tag;
2215fe2caefcSParav Pandit 	sge = (struct ocrdma_sge *)(rqe + 1);
2216fe2caefcSParav Pandit 	ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2217fe2caefcSParav Pandit 	ocrdma_cpu_to_le32(rqe, wqe_size);
2218fe2caefcSParav Pandit }
2219fe2caefcSParav Pandit 
2220d34ac5cdSBart Van Assche int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
2221d34ac5cdSBart Van Assche 		     const struct ib_recv_wr **bad_wr)
2222fe2caefcSParav Pandit {
2223fe2caefcSParav Pandit 	int status = 0;
2224fe2caefcSParav Pandit 	unsigned long flags;
2225fe2caefcSParav Pandit 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2226fe2caefcSParav Pandit 	struct ocrdma_hdr_wqe *rqe;
2227fe2caefcSParav Pandit 
2228fe2caefcSParav Pandit 	spin_lock_irqsave(&qp->q_lock, flags);
2229fe2caefcSParav Pandit 	if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2230fe2caefcSParav Pandit 		spin_unlock_irqrestore(&qp->q_lock, flags);
2231fe2caefcSParav Pandit 		*bad_wr = wr;
2232fe2caefcSParav Pandit 		return -EINVAL;
2233fe2caefcSParav Pandit 	}
2234fe2caefcSParav Pandit 	while (wr) {
2235fe2caefcSParav Pandit 		if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2236fe2caefcSParav Pandit 		    wr->num_sge > qp->rq.max_sges) {
2237fe2caefcSParav Pandit 			*bad_wr = wr;
2238fe2caefcSParav Pandit 			status = -ENOMEM;
2239fe2caefcSParav Pandit 			break;
2240fe2caefcSParav Pandit 		}
2241fe2caefcSParav Pandit 		rqe = ocrdma_hwq_head(&qp->rq);
2242fe2caefcSParav Pandit 		ocrdma_build_rqe(rqe, wr, 0);
2243fe2caefcSParav Pandit 
2244fe2caefcSParav Pandit 		qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2245fe2caefcSParav Pandit 		/* make sure rqe is written before adapter can access it */
2246fe2caefcSParav Pandit 		wmb();
2247fe2caefcSParav Pandit 
2248fe2caefcSParav Pandit 		/* inform hw to start processing it */
2249fe2caefcSParav Pandit 		ocrdma_ring_rq_db(qp);
2250fe2caefcSParav Pandit 
2251fe2caefcSParav Pandit 		/* update pointer, counter for next wr */
2252fe2caefcSParav Pandit 		ocrdma_hwq_inc_head(&qp->rq);
2253fe2caefcSParav Pandit 		wr = wr->next;
2254fe2caefcSParav Pandit 	}
2255fe2caefcSParav Pandit 	spin_unlock_irqrestore(&qp->q_lock, flags);
2256fe2caefcSParav Pandit 	return status;
2257fe2caefcSParav Pandit }
2258fe2caefcSParav Pandit 
2259fe2caefcSParav Pandit /* cqe for srq's rqe can potentially arrive out of order.
2260fe2caefcSParav Pandit  * index gives the entry in the shadow table where to store
2261fe2caefcSParav Pandit  * the wr_id. tag/index is returned in cqe to reference back
2262fe2caefcSParav Pandit  * for a given rqe.
2263fe2caefcSParav Pandit  */
2264fe2caefcSParav Pandit static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2265fe2caefcSParav Pandit {
2266fe2caefcSParav Pandit 	int row = 0;
2267fe2caefcSParav Pandit 	int indx = 0;
2268fe2caefcSParav Pandit 
2269fe2caefcSParav Pandit 	for (row = 0; row < srq->bit_fields_len; row++) {
2270fe2caefcSParav Pandit 		if (srq->idx_bit_fields[row]) {
2271fe2caefcSParav Pandit 			indx = ffs(srq->idx_bit_fields[row]);
2272fe2caefcSParav Pandit 			indx = (row * 32) + (indx - 1);
2273db287ec5Sssh10 			BUG_ON(indx >= srq->rq.max_cnt);
2274fe2caefcSParav Pandit 			ocrdma_srq_toggle_bit(srq, indx);
2275fe2caefcSParav Pandit 			break;
2276fe2caefcSParav Pandit 		}
2277fe2caefcSParav Pandit 	}
2278fe2caefcSParav Pandit 
2279db287ec5Sssh10 	BUG_ON(row == srq->bit_fields_len);
2280cf5788adSSelvin Xavier 	return indx + 1; /* Use from index 1 */
2281fe2caefcSParav Pandit }
2282fe2caefcSParav Pandit 
2283fe2caefcSParav Pandit static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2284fe2caefcSParav Pandit {
2285fe2caefcSParav Pandit 	u32 val = srq->rq.dbid | (1 << 16);
2286fe2caefcSParav Pandit 
2287fe2caefcSParav Pandit 	iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2288fe2caefcSParav Pandit }
2289fe2caefcSParav Pandit 
2290d34ac5cdSBart Van Assche int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2291d34ac5cdSBart Van Assche 			 const struct ib_recv_wr **bad_wr)
2292fe2caefcSParav Pandit {
2293fe2caefcSParav Pandit 	int status = 0;
2294fe2caefcSParav Pandit 	unsigned long flags;
2295fe2caefcSParav Pandit 	struct ocrdma_srq *srq;
2296fe2caefcSParav Pandit 	struct ocrdma_hdr_wqe *rqe;
2297fe2caefcSParav Pandit 	u16 tag;
2298fe2caefcSParav Pandit 
2299fe2caefcSParav Pandit 	srq = get_ocrdma_srq(ibsrq);
2300fe2caefcSParav Pandit 
2301fe2caefcSParav Pandit 	spin_lock_irqsave(&srq->q_lock, flags);
2302fe2caefcSParav Pandit 	while (wr) {
2303fe2caefcSParav Pandit 		if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2304fe2caefcSParav Pandit 		    wr->num_sge > srq->rq.max_sges) {
2305fe2caefcSParav Pandit 			status = -ENOMEM;
2306fe2caefcSParav Pandit 			*bad_wr = wr;
2307fe2caefcSParav Pandit 			break;
2308fe2caefcSParav Pandit 		}
2309fe2caefcSParav Pandit 		tag = ocrdma_srq_get_idx(srq);
2310fe2caefcSParav Pandit 		rqe = ocrdma_hwq_head(&srq->rq);
2311fe2caefcSParav Pandit 		ocrdma_build_rqe(rqe, wr, tag);
2312fe2caefcSParav Pandit 
2313fe2caefcSParav Pandit 		srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2314fe2caefcSParav Pandit 		/* make sure rqe is written before adapter can perform DMA */
2315fe2caefcSParav Pandit 		wmb();
2316fe2caefcSParav Pandit 		/* inform hw to start processing it */
2317fe2caefcSParav Pandit 		ocrdma_ring_srq_db(srq);
2318fe2caefcSParav Pandit 		/* update pointer, counter for next wr */
2319fe2caefcSParav Pandit 		ocrdma_hwq_inc_head(&srq->rq);
2320fe2caefcSParav Pandit 		wr = wr->next;
2321fe2caefcSParav Pandit 	}
2322fe2caefcSParav Pandit 	spin_unlock_irqrestore(&srq->q_lock, flags);
2323fe2caefcSParav Pandit 	return status;
2324fe2caefcSParav Pandit }
2325fe2caefcSParav Pandit 
2326fe2caefcSParav Pandit static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2327fe2caefcSParav Pandit {
2328f99b1649SNaresh Gottumukkala 	enum ib_wc_status ibwc_status;
2329fe2caefcSParav Pandit 
2330fe2caefcSParav Pandit 	switch (status) {
2331fe2caefcSParav Pandit 	case OCRDMA_CQE_GENERAL_ERR:
2332fe2caefcSParav Pandit 		ibwc_status = IB_WC_GENERAL_ERR;
2333fe2caefcSParav Pandit 		break;
2334fe2caefcSParav Pandit 	case OCRDMA_CQE_LOC_LEN_ERR:
2335fe2caefcSParav Pandit 		ibwc_status = IB_WC_LOC_LEN_ERR;
2336fe2caefcSParav Pandit 		break;
2337fe2caefcSParav Pandit 	case OCRDMA_CQE_LOC_QP_OP_ERR:
2338fe2caefcSParav Pandit 		ibwc_status = IB_WC_LOC_QP_OP_ERR;
2339fe2caefcSParav Pandit 		break;
2340fe2caefcSParav Pandit 	case OCRDMA_CQE_LOC_EEC_OP_ERR:
2341fe2caefcSParav Pandit 		ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2342fe2caefcSParav Pandit 		break;
2343fe2caefcSParav Pandit 	case OCRDMA_CQE_LOC_PROT_ERR:
2344fe2caefcSParav Pandit 		ibwc_status = IB_WC_LOC_PROT_ERR;
2345fe2caefcSParav Pandit 		break;
2346fe2caefcSParav Pandit 	case OCRDMA_CQE_WR_FLUSH_ERR:
2347fe2caefcSParav Pandit 		ibwc_status = IB_WC_WR_FLUSH_ERR;
2348fe2caefcSParav Pandit 		break;
2349fe2caefcSParav Pandit 	case OCRDMA_CQE_MW_BIND_ERR:
2350fe2caefcSParav Pandit 		ibwc_status = IB_WC_MW_BIND_ERR;
2351fe2caefcSParav Pandit 		break;
2352fe2caefcSParav Pandit 	case OCRDMA_CQE_BAD_RESP_ERR:
2353fe2caefcSParav Pandit 		ibwc_status = IB_WC_BAD_RESP_ERR;
2354fe2caefcSParav Pandit 		break;
2355fe2caefcSParav Pandit 	case OCRDMA_CQE_LOC_ACCESS_ERR:
2356fe2caefcSParav Pandit 		ibwc_status = IB_WC_LOC_ACCESS_ERR;
2357fe2caefcSParav Pandit 		break;
2358fe2caefcSParav Pandit 	case OCRDMA_CQE_REM_INV_REQ_ERR:
2359fe2caefcSParav Pandit 		ibwc_status = IB_WC_REM_INV_REQ_ERR;
2360fe2caefcSParav Pandit 		break;
2361fe2caefcSParav Pandit 	case OCRDMA_CQE_REM_ACCESS_ERR:
2362fe2caefcSParav Pandit 		ibwc_status = IB_WC_REM_ACCESS_ERR;
2363fe2caefcSParav Pandit 		break;
2364fe2caefcSParav Pandit 	case OCRDMA_CQE_REM_OP_ERR:
2365fe2caefcSParav Pandit 		ibwc_status = IB_WC_REM_OP_ERR;
2366fe2caefcSParav Pandit 		break;
2367fe2caefcSParav Pandit 	case OCRDMA_CQE_RETRY_EXC_ERR:
2368fe2caefcSParav Pandit 		ibwc_status = IB_WC_RETRY_EXC_ERR;
2369fe2caefcSParav Pandit 		break;
2370fe2caefcSParav Pandit 	case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2371fe2caefcSParav Pandit 		ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2372fe2caefcSParav Pandit 		break;
2373fe2caefcSParav Pandit 	case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2374fe2caefcSParav Pandit 		ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2375fe2caefcSParav Pandit 		break;
2376fe2caefcSParav Pandit 	case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2377fe2caefcSParav Pandit 		ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2378fe2caefcSParav Pandit 		break;
2379fe2caefcSParav Pandit 	case OCRDMA_CQE_REM_ABORT_ERR:
2380fe2caefcSParav Pandit 		ibwc_status = IB_WC_REM_ABORT_ERR;
2381fe2caefcSParav Pandit 		break;
2382fe2caefcSParav Pandit 	case OCRDMA_CQE_INV_EECN_ERR:
2383fe2caefcSParav Pandit 		ibwc_status = IB_WC_INV_EECN_ERR;
2384fe2caefcSParav Pandit 		break;
2385fe2caefcSParav Pandit 	case OCRDMA_CQE_INV_EEC_STATE_ERR:
2386fe2caefcSParav Pandit 		ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2387fe2caefcSParav Pandit 		break;
2388fe2caefcSParav Pandit 	case OCRDMA_CQE_FATAL_ERR:
2389fe2caefcSParav Pandit 		ibwc_status = IB_WC_FATAL_ERR;
2390fe2caefcSParav Pandit 		break;
2391fe2caefcSParav Pandit 	case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2392fe2caefcSParav Pandit 		ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2393fe2caefcSParav Pandit 		break;
2394fe2caefcSParav Pandit 	default:
2395fe2caefcSParav Pandit 		ibwc_status = IB_WC_GENERAL_ERR;
2396fe2caefcSParav Pandit 		break;
23972b50176dSJoe Perches 	}
2398fe2caefcSParav Pandit 	return ibwc_status;
2399fe2caefcSParav Pandit }
2400fe2caefcSParav Pandit 
2401fe2caefcSParav Pandit static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2402fe2caefcSParav Pandit 		      u32 wqe_idx)
2403fe2caefcSParav Pandit {
2404fe2caefcSParav Pandit 	struct ocrdma_hdr_wqe *hdr;
2405fe2caefcSParav Pandit 	struct ocrdma_sge *rw;
2406fe2caefcSParav Pandit 	int opcode;
2407fe2caefcSParav Pandit 
2408fe2caefcSParav Pandit 	hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2409fe2caefcSParav Pandit 
2410fe2caefcSParav Pandit 	ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2411fe2caefcSParav Pandit 	/* Undo the hdr->cw swap */
2412fe2caefcSParav Pandit 	opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2413fe2caefcSParav Pandit 	switch (opcode) {
2414fe2caefcSParav Pandit 	case OCRDMA_WRITE:
2415fe2caefcSParav Pandit 		ibwc->opcode = IB_WC_RDMA_WRITE;
2416fe2caefcSParav Pandit 		break;
2417fe2caefcSParav Pandit 	case OCRDMA_READ:
2418fe2caefcSParav Pandit 		rw = (struct ocrdma_sge *)(hdr + 1);
2419fe2caefcSParav Pandit 		ibwc->opcode = IB_WC_RDMA_READ;
2420fe2caefcSParav Pandit 		ibwc->byte_len = rw->len;
2421fe2caefcSParav Pandit 		break;
2422fe2caefcSParav Pandit 	case OCRDMA_SEND:
2423fe2caefcSParav Pandit 		ibwc->opcode = IB_WC_SEND;
2424fe2caefcSParav Pandit 		break;
24257c33880cSNaresh Gottumukkala 	case OCRDMA_FR_MR:
2426191cfed5SSagi Grimberg 		ibwc->opcode = IB_WC_REG_MR;
24277c33880cSNaresh Gottumukkala 		break;
2428fe2caefcSParav Pandit 	case OCRDMA_LKEY_INV:
2429fe2caefcSParav Pandit 		ibwc->opcode = IB_WC_LOCAL_INV;
2430fe2caefcSParav Pandit 		break;
2431fe2caefcSParav Pandit 	default:
2432fe2caefcSParav Pandit 		ibwc->status = IB_WC_GENERAL_ERR;
2433ef99c4c2SNaresh Gottumukkala 		pr_err("%s() invalid opcode received = 0x%x\n",
2434fe2caefcSParav Pandit 		       __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2435fe2caefcSParav Pandit 		break;
24362b50176dSJoe Perches 	}
2437fe2caefcSParav Pandit }
2438fe2caefcSParav Pandit 
2439fe2caefcSParav Pandit static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2440fe2caefcSParav Pandit 						struct ocrdma_cqe *cqe)
2441fe2caefcSParav Pandit {
2442fe2caefcSParav Pandit 	if (is_cqe_for_sq(cqe)) {
2443fe2caefcSParav Pandit 		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2444fe2caefcSParav Pandit 				cqe->flags_status_srcqpn) &
2445fe2caefcSParav Pandit 					~OCRDMA_CQE_STATUS_MASK);
2446fe2caefcSParav Pandit 		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2447fe2caefcSParav Pandit 				cqe->flags_status_srcqpn) |
2448fe2caefcSParav Pandit 				(OCRDMA_CQE_WR_FLUSH_ERR <<
2449fe2caefcSParav Pandit 					OCRDMA_CQE_STATUS_SHIFT));
2450fe2caefcSParav Pandit 	} else {
2451fe2caefcSParav Pandit 		if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2452fe2caefcSParav Pandit 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2453fe2caefcSParav Pandit 					cqe->flags_status_srcqpn) &
2454fe2caefcSParav Pandit 						~OCRDMA_CQE_UD_STATUS_MASK);
2455fe2caefcSParav Pandit 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2456fe2caefcSParav Pandit 					cqe->flags_status_srcqpn) |
2457fe2caefcSParav Pandit 					(OCRDMA_CQE_WR_FLUSH_ERR <<
2458fe2caefcSParav Pandit 						OCRDMA_CQE_UD_STATUS_SHIFT));
2459fe2caefcSParav Pandit 		} else {
2460fe2caefcSParav Pandit 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2461fe2caefcSParav Pandit 					cqe->flags_status_srcqpn) &
2462fe2caefcSParav Pandit 						~OCRDMA_CQE_STATUS_MASK);
2463fe2caefcSParav Pandit 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2464fe2caefcSParav Pandit 					cqe->flags_status_srcqpn) |
2465fe2caefcSParav Pandit 					(OCRDMA_CQE_WR_FLUSH_ERR <<
2466fe2caefcSParav Pandit 						OCRDMA_CQE_STATUS_SHIFT));
2467fe2caefcSParav Pandit 		}
2468fe2caefcSParav Pandit 	}
2469fe2caefcSParav Pandit }
2470fe2caefcSParav Pandit 
2471fe2caefcSParav Pandit static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2472fe2caefcSParav Pandit 				  struct ocrdma_qp *qp, int status)
2473fe2caefcSParav Pandit {
2474fe2caefcSParav Pandit 	bool expand = false;
2475fe2caefcSParav Pandit 
2476fe2caefcSParav Pandit 	ibwc->byte_len = 0;
2477fe2caefcSParav Pandit 	ibwc->qp = &qp->ibqp;
2478fe2caefcSParav Pandit 	ibwc->status = ocrdma_to_ibwc_err(status);
2479fe2caefcSParav Pandit 
2480fe2caefcSParav Pandit 	ocrdma_flush_qp(qp);
2481057729cbSNaresh Gottumukkala 	ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2482fe2caefcSParav Pandit 
2483fe2caefcSParav Pandit 	/* if wqe/rqe pending for which cqe needs to be returned,
2484fe2caefcSParav Pandit 	 * trigger inflating it.
2485fe2caefcSParav Pandit 	 */
2486fe2caefcSParav Pandit 	if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2487fe2caefcSParav Pandit 		expand = true;
2488fe2caefcSParav Pandit 		ocrdma_set_cqe_status_flushed(qp, cqe);
2489fe2caefcSParav Pandit 	}
2490fe2caefcSParav Pandit 	return expand;
2491fe2caefcSParav Pandit }
2492fe2caefcSParav Pandit 
2493fe2caefcSParav Pandit static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2494fe2caefcSParav Pandit 				  struct ocrdma_qp *qp, int status)
2495fe2caefcSParav Pandit {
2496fe2caefcSParav Pandit 	ibwc->opcode = IB_WC_RECV;
2497fe2caefcSParav Pandit 	ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2498fe2caefcSParav Pandit 	ocrdma_hwq_inc_tail(&qp->rq);
2499fe2caefcSParav Pandit 
2500fe2caefcSParav Pandit 	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2501fe2caefcSParav Pandit }
2502fe2caefcSParav Pandit 
2503fe2caefcSParav Pandit static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2504fe2caefcSParav Pandit 				  struct ocrdma_qp *qp, int status)
2505fe2caefcSParav Pandit {
2506fe2caefcSParav Pandit 	ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2507fe2caefcSParav Pandit 	ocrdma_hwq_inc_tail(&qp->sq);
2508fe2caefcSParav Pandit 
2509fe2caefcSParav Pandit 	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2510fe2caefcSParav Pandit }
2511fe2caefcSParav Pandit 
2512fe2caefcSParav Pandit 
2513fe2caefcSParav Pandit static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2514fe2caefcSParav Pandit 				 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2515fe2caefcSParav Pandit 				 bool *polled, bool *stop)
2516fe2caefcSParav Pandit {
2517fe2caefcSParav Pandit 	bool expand;
2518ad56ebb4SSelvin Xavier 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2519fe2caefcSParav Pandit 	int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2520fe2caefcSParav Pandit 		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2521ad56ebb4SSelvin Xavier 	if (status < OCRDMA_MAX_CQE_ERR)
2522ad56ebb4SSelvin Xavier 		atomic_inc(&dev->cqe_err_stats[status]);
2523fe2caefcSParav Pandit 
2524fe2caefcSParav Pandit 	/* when hw sq is empty, but rq is not empty, so we continue
2525fe2caefcSParav Pandit 	 * to keep the cqe in order to get the cq event again.
2526fe2caefcSParav Pandit 	 */
2527fe2caefcSParav Pandit 	if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2528fe2caefcSParav Pandit 		/* when cq for rq and sq is same, it is safe to return
2529fe2caefcSParav Pandit 		 * flush cqe for RQEs.
2530fe2caefcSParav Pandit 		 */
2531fe2caefcSParav Pandit 		if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2532fe2caefcSParav Pandit 			*polled = true;
2533fe2caefcSParav Pandit 			status = OCRDMA_CQE_WR_FLUSH_ERR;
2534fe2caefcSParav Pandit 			expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2535fe2caefcSParav Pandit 		} else {
2536fe2caefcSParav Pandit 			/* stop processing further cqe as this cqe is used for
2537fe2caefcSParav Pandit 			 * triggering cq event on buddy cq of RQ.
2538fe2caefcSParav Pandit 			 * When QP is destroyed, this cqe will be removed
2539fe2caefcSParav Pandit 			 * from the cq's hardware q.
2540fe2caefcSParav Pandit 			 */
2541fe2caefcSParav Pandit 			*polled = false;
2542fe2caefcSParav Pandit 			*stop = true;
2543fe2caefcSParav Pandit 			expand = false;
2544fe2caefcSParav Pandit 		}
2545a96ffb1dSSelvin Xavier 	} else if (is_hw_sq_empty(qp)) {
2546a96ffb1dSSelvin Xavier 		/* Do nothing */
2547a96ffb1dSSelvin Xavier 		expand = false;
2548a96ffb1dSSelvin Xavier 		*polled = false;
2549a96ffb1dSSelvin Xavier 		*stop = false;
2550fe2caefcSParav Pandit 	} else {
2551fe2caefcSParav Pandit 		*polled = true;
2552fe2caefcSParav Pandit 		expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2553fe2caefcSParav Pandit 	}
2554fe2caefcSParav Pandit 	return expand;
2555fe2caefcSParav Pandit }
2556fe2caefcSParav Pandit 
2557fe2caefcSParav Pandit static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2558fe2caefcSParav Pandit 				     struct ocrdma_cqe *cqe,
2559fe2caefcSParav Pandit 				     struct ib_wc *ibwc, bool *polled)
2560fe2caefcSParav Pandit {
2561fe2caefcSParav Pandit 	bool expand = false;
2562fe2caefcSParav Pandit 	int tail = qp->sq.tail;
2563fe2caefcSParav Pandit 	u32 wqe_idx;
2564fe2caefcSParav Pandit 
2565fe2caefcSParav Pandit 	if (!qp->wqe_wr_id_tbl[tail].signaled) {
2566fe2caefcSParav Pandit 		*polled = false;    /* WC cannot be consumed yet */
2567fe2caefcSParav Pandit 	} else {
2568fe2caefcSParav Pandit 		ibwc->status = IB_WC_SUCCESS;
2569fe2caefcSParav Pandit 		ibwc->wc_flags = 0;
2570fe2caefcSParav Pandit 		ibwc->qp = &qp->ibqp;
2571fe2caefcSParav Pandit 		ocrdma_update_wc(qp, ibwc, tail);
2572fe2caefcSParav Pandit 		*polled = true;
2573ae3bca90SParav Pandit 	}
257443a6b402SNaresh Gottumukkala 	wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
257543a6b402SNaresh Gottumukkala 			OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2576fe2caefcSParav Pandit 	if (tail != wqe_idx)
2577fe2caefcSParav Pandit 		expand = true; /* Coalesced CQE can't be consumed yet */
2578ae3bca90SParav Pandit 
2579fe2caefcSParav Pandit 	ocrdma_hwq_inc_tail(&qp->sq);
2580fe2caefcSParav Pandit 	return expand;
2581fe2caefcSParav Pandit }
2582fe2caefcSParav Pandit 
2583fe2caefcSParav Pandit static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2584fe2caefcSParav Pandit 			     struct ib_wc *ibwc, bool *polled, bool *stop)
2585fe2caefcSParav Pandit {
2586fe2caefcSParav Pandit 	int status;
2587fe2caefcSParav Pandit 	bool expand;
2588fe2caefcSParav Pandit 
2589fe2caefcSParav Pandit 	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2590fe2caefcSParav Pandit 		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2591fe2caefcSParav Pandit 
2592fe2caefcSParav Pandit 	if (status == OCRDMA_CQE_SUCCESS)
2593fe2caefcSParav Pandit 		expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2594fe2caefcSParav Pandit 	else
2595fe2caefcSParav Pandit 		expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2596fe2caefcSParav Pandit 	return expand;
2597fe2caefcSParav Pandit }
2598fe2caefcSParav Pandit 
25996b062667SDevesh Sharma static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
26006b062667SDevesh Sharma 				 struct ocrdma_cqe *cqe)
2601fe2caefcSParav Pandit {
2602fe2caefcSParav Pandit 	int status;
26036b062667SDevesh Sharma 	u16 hdr_type = 0;
2604fe2caefcSParav Pandit 
2605fe2caefcSParav Pandit 	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2606fe2caefcSParav Pandit 		OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2607fe2caefcSParav Pandit 	ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2608fe2caefcSParav Pandit 						OCRDMA_CQE_SRCQP_MASK;
2609aff3ead9SSelvin Xavier 	ibwc->pkey_index = 0;
2610fe2caefcSParav Pandit 	ibwc->wc_flags = IB_WC_GRH;
2611fe2caefcSParav Pandit 	ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
26126b062667SDevesh Sharma 			  OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
26136b062667SDevesh Sharma 			  OCRDMA_CQE_UD_XFER_LEN_MASK;
26146b062667SDevesh Sharma 
26156b062667SDevesh Sharma 	if (ocrdma_is_udp_encap_supported(dev)) {
26166b062667SDevesh Sharma 		hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
26176b062667SDevesh Sharma 			    OCRDMA_CQE_UD_L3TYPE_SHIFT) &
26186b062667SDevesh Sharma 			    OCRDMA_CQE_UD_L3TYPE_MASK;
26196b062667SDevesh Sharma 		ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
26206b062667SDevesh Sharma 		ibwc->network_hdr_type = hdr_type;
26216b062667SDevesh Sharma 	}
26226b062667SDevesh Sharma 
2623fe2caefcSParav Pandit 	return status;
2624fe2caefcSParav Pandit }
2625fe2caefcSParav Pandit 
2626fe2caefcSParav Pandit static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2627fe2caefcSParav Pandit 				       struct ocrdma_cqe *cqe,
2628fe2caefcSParav Pandit 				       struct ocrdma_qp *qp)
2629fe2caefcSParav Pandit {
2630fe2caefcSParav Pandit 	unsigned long flags;
2631fe2caefcSParav Pandit 	struct ocrdma_srq *srq;
2632fe2caefcSParav Pandit 	u32 wqe_idx;
2633fe2caefcSParav Pandit 
2634fe2caefcSParav Pandit 	srq = get_ocrdma_srq(qp->ibqp.srq);
263543a6b402SNaresh Gottumukkala 	wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
263643a6b402SNaresh Gottumukkala 		OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2637db287ec5Sssh10 	BUG_ON(wqe_idx < 1);
2638cf5788adSSelvin Xavier 
2639fe2caefcSParav Pandit 	ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2640fe2caefcSParav Pandit 	spin_lock_irqsave(&srq->q_lock, flags);
2641cf5788adSSelvin Xavier 	ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2642fe2caefcSParav Pandit 	spin_unlock_irqrestore(&srq->q_lock, flags);
2643fe2caefcSParav Pandit 	ocrdma_hwq_inc_tail(&srq->rq);
2644fe2caefcSParav Pandit }
2645fe2caefcSParav Pandit 
2646fe2caefcSParav Pandit static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2647fe2caefcSParav Pandit 				struct ib_wc *ibwc, bool *polled, bool *stop,
2648fe2caefcSParav Pandit 				int status)
2649fe2caefcSParav Pandit {
2650fe2caefcSParav Pandit 	bool expand;
2651ad56ebb4SSelvin Xavier 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2652ad56ebb4SSelvin Xavier 
2653ad56ebb4SSelvin Xavier 	if (status < OCRDMA_MAX_CQE_ERR)
2654ad56ebb4SSelvin Xavier 		atomic_inc(&dev->cqe_err_stats[status]);
2655fe2caefcSParav Pandit 
2656fe2caefcSParav Pandit 	/* when hw_rq is empty, but wq is not empty, so continue
2657fe2caefcSParav Pandit 	 * to keep the cqe to get the cq event again.
2658fe2caefcSParav Pandit 	 */
2659fe2caefcSParav Pandit 	if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2660fe2caefcSParav Pandit 		if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2661fe2caefcSParav Pandit 			*polled = true;
2662fe2caefcSParav Pandit 			status = OCRDMA_CQE_WR_FLUSH_ERR;
2663fe2caefcSParav Pandit 			expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2664fe2caefcSParav Pandit 		} else {
2665fe2caefcSParav Pandit 			*polled = false;
2666fe2caefcSParav Pandit 			*stop = true;
2667fe2caefcSParav Pandit 			expand = false;
2668fe2caefcSParav Pandit 		}
2669a96ffb1dSSelvin Xavier 	} else if (is_hw_rq_empty(qp)) {
2670a96ffb1dSSelvin Xavier 		/* Do nothing */
2671a96ffb1dSSelvin Xavier 		expand = false;
2672a96ffb1dSSelvin Xavier 		*polled = false;
2673a96ffb1dSSelvin Xavier 		*stop = false;
2674a3698a9bSParav Pandit 	} else {
2675a3698a9bSParav Pandit 		*polled = true;
2676fe2caefcSParav Pandit 		expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2677a3698a9bSParav Pandit 	}
2678fe2caefcSParav Pandit 	return expand;
2679fe2caefcSParav Pandit }
2680fe2caefcSParav Pandit 
2681fe2caefcSParav Pandit static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2682fe2caefcSParav Pandit 				     struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2683fe2caefcSParav Pandit {
26846b062667SDevesh Sharma 	struct ocrdma_dev *dev;
26856b062667SDevesh Sharma 
26866b062667SDevesh Sharma 	dev = get_ocrdma_dev(qp->ibqp.device);
2687fe2caefcSParav Pandit 	ibwc->opcode = IB_WC_RECV;
2688fe2caefcSParav Pandit 	ibwc->qp = &qp->ibqp;
2689fe2caefcSParav Pandit 	ibwc->status = IB_WC_SUCCESS;
2690fe2caefcSParav Pandit 
2691fe2caefcSParav Pandit 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
26926b062667SDevesh Sharma 		ocrdma_update_ud_rcqe(dev, ibwc, cqe);
2693fe2caefcSParav Pandit 	else
2694fe2caefcSParav Pandit 		ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2695fe2caefcSParav Pandit 
2696fe2caefcSParav Pandit 	if (is_cqe_imm(cqe)) {
2697fe2caefcSParav Pandit 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2698fe2caefcSParav Pandit 		ibwc->wc_flags |= IB_WC_WITH_IMM;
2699fe2caefcSParav Pandit 	} else if (is_cqe_wr_imm(cqe)) {
2700fe2caefcSParav Pandit 		ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2701fe2caefcSParav Pandit 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2702fe2caefcSParav Pandit 		ibwc->wc_flags |= IB_WC_WITH_IMM;
2703fe2caefcSParav Pandit 	} else if (is_cqe_invalidated(cqe)) {
2704fe2caefcSParav Pandit 		ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2705fe2caefcSParav Pandit 		ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2706fe2caefcSParav Pandit 	}
2707f99b1649SNaresh Gottumukkala 	if (qp->ibqp.srq) {
2708fe2caefcSParav Pandit 		ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2709f99b1649SNaresh Gottumukkala 	} else {
2710fe2caefcSParav Pandit 		ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2711fe2caefcSParav Pandit 		ocrdma_hwq_inc_tail(&qp->rq);
2712fe2caefcSParav Pandit 	}
2713fe2caefcSParav Pandit }
2714fe2caefcSParav Pandit 
2715fe2caefcSParav Pandit static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2716fe2caefcSParav Pandit 			     struct ib_wc *ibwc, bool *polled, bool *stop)
2717fe2caefcSParav Pandit {
2718fe2caefcSParav Pandit 	int status;
2719fe2caefcSParav Pandit 	bool expand = false;
2720fe2caefcSParav Pandit 
2721fe2caefcSParav Pandit 	ibwc->wc_flags = 0;
2722f99b1649SNaresh Gottumukkala 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2723fe2caefcSParav Pandit 		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2724fe2caefcSParav Pandit 					OCRDMA_CQE_UD_STATUS_MASK) >>
2725fe2caefcSParav Pandit 					OCRDMA_CQE_UD_STATUS_SHIFT;
2726f99b1649SNaresh Gottumukkala 	} else {
2727fe2caefcSParav Pandit 		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2728fe2caefcSParav Pandit 			     OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2729f99b1649SNaresh Gottumukkala 	}
2730fe2caefcSParav Pandit 
2731fe2caefcSParav Pandit 	if (status == OCRDMA_CQE_SUCCESS) {
2732fe2caefcSParav Pandit 		*polled = true;
2733fe2caefcSParav Pandit 		ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2734fe2caefcSParav Pandit 	} else {
2735fe2caefcSParav Pandit 		expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2736fe2caefcSParav Pandit 					      status);
2737fe2caefcSParav Pandit 	}
2738fe2caefcSParav Pandit 	return expand;
2739fe2caefcSParav Pandit }
2740fe2caefcSParav Pandit 
2741fe2caefcSParav Pandit static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2742fe2caefcSParav Pandit 				   u16 cur_getp)
2743fe2caefcSParav Pandit {
2744fe2caefcSParav Pandit 	if (cq->phase_change) {
2745fe2caefcSParav Pandit 		if (cur_getp == 0)
2746fe2caefcSParav Pandit 			cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2747f99b1649SNaresh Gottumukkala 	} else {
2748fe2caefcSParav Pandit 		/* clear valid bit */
2749fe2caefcSParav Pandit 		cqe->flags_status_srcqpn = 0;
2750fe2caefcSParav Pandit 	}
2751f99b1649SNaresh Gottumukkala }
2752fe2caefcSParav Pandit 
2753fe2caefcSParav Pandit static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2754fe2caefcSParav Pandit 			    struct ib_wc *ibwc)
2755fe2caefcSParav Pandit {
2756fe2caefcSParav Pandit 	u16 qpn = 0;
2757fe2caefcSParav Pandit 	int i = 0;
2758fe2caefcSParav Pandit 	bool expand = false;
2759fe2caefcSParav Pandit 	int polled_hw_cqes = 0;
2760fe2caefcSParav Pandit 	struct ocrdma_qp *qp = NULL;
27611afc0454SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2762fe2caefcSParav Pandit 	struct ocrdma_cqe *cqe;
2763fe2caefcSParav Pandit 	u16 cur_getp; bool polled = false; bool stop = false;
2764fe2caefcSParav Pandit 
2765fe2caefcSParav Pandit 	cur_getp = cq->getp;
2766fe2caefcSParav Pandit 	while (num_entries) {
2767fe2caefcSParav Pandit 		cqe = cq->va + cur_getp;
2768fe2caefcSParav Pandit 		/* check whether valid cqe or not */
2769fe2caefcSParav Pandit 		if (!is_cqe_valid(cq, cqe))
2770fe2caefcSParav Pandit 			break;
2771fe2caefcSParav Pandit 		qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2772fe2caefcSParav Pandit 		/* ignore discarded cqe */
2773fe2caefcSParav Pandit 		if (qpn == 0)
2774fe2caefcSParav Pandit 			goto skip_cqe;
2775fe2caefcSParav Pandit 		qp = dev->qp_tbl[qpn];
2776fe2caefcSParav Pandit 		BUG_ON(qp == NULL);
2777fe2caefcSParav Pandit 
2778fe2caefcSParav Pandit 		if (is_cqe_for_sq(cqe)) {
2779fe2caefcSParav Pandit 			expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2780fe2caefcSParav Pandit 						  &stop);
2781fe2caefcSParav Pandit 		} else {
2782fe2caefcSParav Pandit 			expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2783fe2caefcSParav Pandit 						  &stop);
2784fe2caefcSParav Pandit 		}
2785fe2caefcSParav Pandit 		if (expand)
2786fe2caefcSParav Pandit 			goto expand_cqe;
2787fe2caefcSParav Pandit 		if (stop)
2788fe2caefcSParav Pandit 			goto stop_cqe;
2789fe2caefcSParav Pandit 		/* clear qpn to avoid duplicate processing by discard_cqe() */
2790fe2caefcSParav Pandit 		cqe->cmn.qpn = 0;
2791fe2caefcSParav Pandit skip_cqe:
2792fe2caefcSParav Pandit 		polled_hw_cqes += 1;
2793fe2caefcSParav Pandit 		cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2794fe2caefcSParav Pandit 		ocrdma_change_cq_phase(cq, cqe, cur_getp);
2795fe2caefcSParav Pandit expand_cqe:
2796fe2caefcSParav Pandit 		if (polled) {
2797fe2caefcSParav Pandit 			num_entries -= 1;
2798fe2caefcSParav Pandit 			i += 1;
2799fe2caefcSParav Pandit 			ibwc = ibwc + 1;
2800fe2caefcSParav Pandit 			polled = false;
2801fe2caefcSParav Pandit 		}
2802fe2caefcSParav Pandit 	}
2803fe2caefcSParav Pandit stop_cqe:
2804fe2caefcSParav Pandit 	cq->getp = cur_getp;
2805b41f7852SDevesh Sharma 
2806b41f7852SDevesh Sharma 	if (polled_hw_cqes)
2807b41f7852SDevesh Sharma 		ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2808ea617626SDevesh Sharma 
2809fe2caefcSParav Pandit 	return i;
2810fe2caefcSParav Pandit }
2811fe2caefcSParav Pandit 
2812fe2caefcSParav Pandit /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2813fe2caefcSParav Pandit static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2814fe2caefcSParav Pandit 			      struct ocrdma_qp *qp, struct ib_wc *ibwc)
2815fe2caefcSParav Pandit {
2816fe2caefcSParav Pandit 	int err_cqes = 0;
2817fe2caefcSParav Pandit 
2818fe2caefcSParav Pandit 	while (num_entries) {
2819fe2caefcSParav Pandit 		if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2820fe2caefcSParav Pandit 			break;
2821fe2caefcSParav Pandit 		if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2822fe2caefcSParav Pandit 			ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2823fe2caefcSParav Pandit 			ocrdma_hwq_inc_tail(&qp->sq);
2824fe2caefcSParav Pandit 		} else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2825fe2caefcSParav Pandit 			ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2826fe2caefcSParav Pandit 			ocrdma_hwq_inc_tail(&qp->rq);
2827f99b1649SNaresh Gottumukkala 		} else {
2828fe2caefcSParav Pandit 			return err_cqes;
2829f99b1649SNaresh Gottumukkala 		}
2830fe2caefcSParav Pandit 		ibwc->byte_len = 0;
2831fe2caefcSParav Pandit 		ibwc->status = IB_WC_WR_FLUSH_ERR;
2832fe2caefcSParav Pandit 		ibwc = ibwc + 1;
2833fe2caefcSParav Pandit 		err_cqes += 1;
2834fe2caefcSParav Pandit 		num_entries -= 1;
2835fe2caefcSParav Pandit 	}
2836fe2caefcSParav Pandit 	return err_cqes;
2837fe2caefcSParav Pandit }
2838fe2caefcSParav Pandit 
2839fe2caefcSParav Pandit int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2840fe2caefcSParav Pandit {
2841fe2caefcSParav Pandit 	int cqes_to_poll = num_entries;
28421afc0454SNaresh Gottumukkala 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
28431afc0454SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2844fe2caefcSParav Pandit 	int num_os_cqe = 0, err_cqes = 0;
2845fe2caefcSParav Pandit 	struct ocrdma_qp *qp;
28461afc0454SNaresh Gottumukkala 	unsigned long flags;
2847fe2caefcSParav Pandit 
2848fe2caefcSParav Pandit 	/* poll cqes from adapter CQ */
2849fe2caefcSParav Pandit 	spin_lock_irqsave(&cq->cq_lock, flags);
2850fe2caefcSParav Pandit 	num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2851fe2caefcSParav Pandit 	spin_unlock_irqrestore(&cq->cq_lock, flags);
2852fe2caefcSParav Pandit 	cqes_to_poll -= num_os_cqe;
2853fe2caefcSParav Pandit 
2854fe2caefcSParav Pandit 	if (cqes_to_poll) {
2855fe2caefcSParav Pandit 		wc = wc + num_os_cqe;
2856fe2caefcSParav Pandit 		/* adapter returns single error cqe when qp moves to
2857fe2caefcSParav Pandit 		 * error state. So insert error cqes with wc_status as
2858fe2caefcSParav Pandit 		 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2859fe2caefcSParav Pandit 		 * respectively which uses this CQ.
2860fe2caefcSParav Pandit 		 */
2861fe2caefcSParav Pandit 		spin_lock_irqsave(&dev->flush_q_lock, flags);
2862fe2caefcSParav Pandit 		list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2863fe2caefcSParav Pandit 			if (cqes_to_poll == 0)
2864fe2caefcSParav Pandit 				break;
2865fe2caefcSParav Pandit 			err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2866fe2caefcSParav Pandit 			cqes_to_poll -= err_cqes;
2867fe2caefcSParav Pandit 			num_os_cqe += err_cqes;
2868fe2caefcSParav Pandit 			wc = wc + err_cqes;
2869fe2caefcSParav Pandit 		}
2870fe2caefcSParav Pandit 		spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2871fe2caefcSParav Pandit 	}
2872fe2caefcSParav Pandit 	return num_os_cqe;
2873fe2caefcSParav Pandit }
2874fe2caefcSParav Pandit 
2875fe2caefcSParav Pandit int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2876fe2caefcSParav Pandit {
28771afc0454SNaresh Gottumukkala 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
28781afc0454SNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2879fe2caefcSParav Pandit 	u16 cq_id;
28801afc0454SNaresh Gottumukkala 	unsigned long flags;
2881ea617626SDevesh Sharma 	bool arm_needed = false, sol_needed = false;
2882fe2caefcSParav Pandit 
2883fe2caefcSParav Pandit 	cq_id = cq->id;
2884fe2caefcSParav Pandit 
2885fe2caefcSParav Pandit 	spin_lock_irqsave(&cq->cq_lock, flags);
2886fe2caefcSParav Pandit 	if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2887ea617626SDevesh Sharma 		arm_needed = true;
2888fe2caefcSParav Pandit 	if (cq_flags & IB_CQ_SOLICITED)
2889ea617626SDevesh Sharma 		sol_needed = true;
2890fe2caefcSParav Pandit 
2891ea617626SDevesh Sharma 	ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2892fe2caefcSParav Pandit 	spin_unlock_irqrestore(&cq->cq_lock, flags);
2893ea617626SDevesh Sharma 
2894fe2caefcSParav Pandit 	return 0;
2895fe2caefcSParav Pandit }
28967c33880cSNaresh Gottumukkala 
2897c4367a26SShamir Rabinovitch struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
289842a3b153SGal Pressman 			      u32 max_num_sg)
28997c33880cSNaresh Gottumukkala {
29007c33880cSNaresh Gottumukkala 	int status;
29017c33880cSNaresh Gottumukkala 	struct ocrdma_mr *mr;
29027c33880cSNaresh Gottumukkala 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
29037c33880cSNaresh Gottumukkala 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
29047c33880cSNaresh Gottumukkala 
2905cacb7d59SSagi Grimberg 	if (mr_type != IB_MR_TYPE_MEM_REG)
2906cacb7d59SSagi Grimberg 		return ERR_PTR(-EINVAL);
2907cacb7d59SSagi Grimberg 
2908cacb7d59SSagi Grimberg 	if (max_num_sg > dev->attr.max_pages_per_frmr)
29097c33880cSNaresh Gottumukkala 		return ERR_PTR(-EINVAL);
29107c33880cSNaresh Gottumukkala 
29117c33880cSNaresh Gottumukkala 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
29127c33880cSNaresh Gottumukkala 	if (!mr)
29137c33880cSNaresh Gottumukkala 		return ERR_PTR(-ENOMEM);
29147c33880cSNaresh Gottumukkala 
29152eaa1c56SSagi Grimberg 	mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
29162eaa1c56SSagi Grimberg 	if (!mr->pages) {
29172eaa1c56SSagi Grimberg 		status = -ENOMEM;
29182eaa1c56SSagi Grimberg 		goto pl_err;
29192eaa1c56SSagi Grimberg 	}
29202eaa1c56SSagi Grimberg 
2921cacb7d59SSagi Grimberg 	status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
29227c33880cSNaresh Gottumukkala 	if (status)
29237c33880cSNaresh Gottumukkala 		goto pbl_err;
29247c33880cSNaresh Gottumukkala 	mr->hwmr.fr_mr = 1;
29257c33880cSNaresh Gottumukkala 	mr->hwmr.remote_rd = 0;
29267c33880cSNaresh Gottumukkala 	mr->hwmr.remote_wr = 0;
29277c33880cSNaresh Gottumukkala 	mr->hwmr.local_rd = 0;
29287c33880cSNaresh Gottumukkala 	mr->hwmr.local_wr = 0;
29297c33880cSNaresh Gottumukkala 	mr->hwmr.mw_bind = 0;
29307c33880cSNaresh Gottumukkala 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
29317c33880cSNaresh Gottumukkala 	if (status)
29327c33880cSNaresh Gottumukkala 		goto pbl_err;
29337c33880cSNaresh Gottumukkala 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
29347c33880cSNaresh Gottumukkala 	if (status)
29357c33880cSNaresh Gottumukkala 		goto mbx_err;
29367c33880cSNaresh Gottumukkala 	mr->ibmr.rkey = mr->hwmr.lkey;
29377c33880cSNaresh Gottumukkala 	mr->ibmr.lkey = mr->hwmr.lkey;
29387a1e89d8SRoland Dreier 	dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
29397a1e89d8SRoland Dreier 		(unsigned long) mr;
29407c33880cSNaresh Gottumukkala 	return &mr->ibmr;
29417c33880cSNaresh Gottumukkala mbx_err:
29427c33880cSNaresh Gottumukkala 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
29437c33880cSNaresh Gottumukkala pbl_err:
29442eaa1c56SSagi Grimberg 	kfree(mr->pages);
29452eaa1c56SSagi Grimberg pl_err:
29467c33880cSNaresh Gottumukkala 	kfree(mr);
29477c33880cSNaresh Gottumukkala 	return ERR_PTR(-ENOMEM);
29487c33880cSNaresh Gottumukkala }
29497c33880cSNaresh Gottumukkala 
29502eaa1c56SSagi Grimberg static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
29512eaa1c56SSagi Grimberg {
29522eaa1c56SSagi Grimberg 	struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
29532eaa1c56SSagi Grimberg 
29542eaa1c56SSagi Grimberg 	if (unlikely(mr->npages == mr->hwmr.num_pbes))
29552eaa1c56SSagi Grimberg 		return -ENOMEM;
29562eaa1c56SSagi Grimberg 
29572eaa1c56SSagi Grimberg 	mr->pages[mr->npages++] = addr;
29582eaa1c56SSagi Grimberg 
29592eaa1c56SSagi Grimberg 	return 0;
29602eaa1c56SSagi Grimberg }
29612eaa1c56SSagi Grimberg 
2962ff2ba993SChristoph Hellwig int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
29639aa8b321SBart Van Assche 		     unsigned int *sg_offset)
29642eaa1c56SSagi Grimberg {
29652eaa1c56SSagi Grimberg 	struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
29662eaa1c56SSagi Grimberg 
29672eaa1c56SSagi Grimberg 	mr->npages = 0;
29682eaa1c56SSagi Grimberg 
2969ff2ba993SChristoph Hellwig 	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
29702eaa1c56SSagi Grimberg }
2971