xref: /linux/drivers/infiniband/hw/mana/main.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 #include "linux/pci.h"
8 
mana_ib_uncfg_vport(struct mana_ib_dev * dev,struct mana_ib_pd * pd,u32 port)9 void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
10 			 u32 port)
11 {
12 	struct mana_port_context *mpc;
13 	struct net_device *ndev;
14 
15 	ndev = mana_ib_get_netdev(&dev->ib_dev, port);
16 	mpc = netdev_priv(ndev);
17 
18 	mutex_lock(&pd->vport_mutex);
19 
20 	pd->vport_use_count--;
21 	WARN_ON(pd->vport_use_count < 0);
22 
23 	if (!pd->vport_use_count)
24 		mana_uncfg_vport(mpc);
25 
26 	mutex_unlock(&pd->vport_mutex);
27 }
28 
mana_ib_cfg_vport(struct mana_ib_dev * dev,u32 port,struct mana_ib_pd * pd,u32 doorbell_id)29 int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
30 		      u32 doorbell_id)
31 {
32 	struct mana_port_context *mpc;
33 	struct net_device *ndev;
34 	int err;
35 
36 	ndev = mana_ib_get_netdev(&dev->ib_dev, port);
37 	mpc = netdev_priv(ndev);
38 
39 	mutex_lock(&pd->vport_mutex);
40 
41 	pd->vport_use_count++;
42 	if (pd->vport_use_count > 1) {
43 		ibdev_dbg(&dev->ib_dev,
44 			  "Skip as this PD is already configured vport\n");
45 		mutex_unlock(&pd->vport_mutex);
46 		return 0;
47 	}
48 
49 	err = mana_cfg_vport(mpc, pd->pdn, doorbell_id);
50 	if (err) {
51 		pd->vport_use_count--;
52 		mutex_unlock(&pd->vport_mutex);
53 
54 		ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err);
55 		return err;
56 	}
57 
58 	mutex_unlock(&pd->vport_mutex);
59 
60 	pd->tx_shortform_allowed = mpc->tx_shortform_allowed;
61 	pd->tx_vp_offset = mpc->tx_vp_offset;
62 
63 	ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n",
64 		  mpc->port_handle, pd->pdn, doorbell_id);
65 
66 	return 0;
67 }
68 
mana_ib_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)69 int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
70 {
71 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
72 	struct ib_device *ibdev = ibpd->device;
73 	struct gdma_create_pd_resp resp = {};
74 	struct gdma_create_pd_req req = {};
75 	enum gdma_pd_flags flags = 0;
76 	struct mana_ib_dev *dev;
77 	struct gdma_context *gc;
78 	int err;
79 
80 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
81 	gc = mdev_to_gc(dev);
82 
83 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
84 			     sizeof(resp));
85 
86 	if (!udata)
87 		flags |= GDMA_PD_FLAG_ALLOW_GPA_MR;
88 
89 	req.flags = flags;
90 	err = mana_gd_send_request(gc, sizeof(req), &req,
91 				   sizeof(resp), &resp);
92 
93 	if (err || resp.hdr.status) {
94 		ibdev_dbg(&dev->ib_dev,
95 			  "Failed to get pd_id err %d status %u\n", err,
96 			  resp.hdr.status);
97 		if (!err)
98 			err = -EPROTO;
99 
100 		return err;
101 	}
102 
103 	pd->pd_handle = resp.pd_handle;
104 	pd->pdn = resp.pd_id;
105 	ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
106 		  pd->pd_handle, pd->pdn);
107 
108 	mutex_init(&pd->vport_mutex);
109 	pd->vport_use_count = 0;
110 	return 0;
111 }
112 
mana_ib_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)113 int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
114 {
115 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
116 	struct ib_device *ibdev = ibpd->device;
117 	struct gdma_destory_pd_resp resp = {};
118 	struct gdma_destroy_pd_req req = {};
119 	struct mana_ib_dev *dev;
120 	struct gdma_context *gc;
121 	int err;
122 
123 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
124 	gc = mdev_to_gc(dev);
125 
126 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
127 			     sizeof(resp));
128 
129 	req.pd_handle = pd->pd_handle;
130 	err = mana_gd_send_request(gc, sizeof(req), &req,
131 				   sizeof(resp), &resp);
132 
133 	if (err || resp.hdr.status) {
134 		ibdev_dbg(&dev->ib_dev,
135 			  "Failed to destroy pd_handle 0x%llx err %d status %u",
136 			  pd->pd_handle, err, resp.hdr.status);
137 		if (!err)
138 			err = -EPROTO;
139 	}
140 
141 	return err;
142 }
143 
mana_gd_destroy_doorbell_page(struct gdma_context * gc,int doorbell_page)144 static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
145 					 int doorbell_page)
146 {
147 	struct gdma_destroy_resource_range_req req = {};
148 	struct gdma_resp_hdr resp = {};
149 	int err;
150 
151 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE,
152 			     sizeof(req), sizeof(resp));
153 
154 	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
155 	req.num_resources = 1;
156 	req.allocated_resources = doorbell_page;
157 
158 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
159 	if (err || resp.status) {
160 		dev_err(gc->dev,
161 			"Failed to destroy doorbell page: ret %d, 0x%x\n",
162 			err, resp.status);
163 		return err ?: -EPROTO;
164 	}
165 
166 	return 0;
167 }
168 
mana_gd_allocate_doorbell_page(struct gdma_context * gc,int * doorbell_page)169 static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
170 					  int *doorbell_page)
171 {
172 	struct gdma_allocate_resource_range_req req = {};
173 	struct gdma_allocate_resource_range_resp resp = {};
174 	int err;
175 
176 	mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE,
177 			     sizeof(req), sizeof(resp));
178 
179 	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
180 	req.num_resources = 1;
181 	req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
182 
183 	/* Have GDMA start searching from 0 */
184 	req.allocated_resources = 0;
185 
186 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
187 	if (err || resp.hdr.status) {
188 		dev_err(gc->dev,
189 			"Failed to allocate doorbell page: ret %d, 0x%x\n",
190 			err, resp.hdr.status);
191 		return err ?: -EPROTO;
192 	}
193 
194 	*doorbell_page = resp.allocated_resources;
195 
196 	return 0;
197 }
198 
mana_ib_alloc_ucontext(struct ib_ucontext * ibcontext,struct ib_udata * udata)199 int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
200 			   struct ib_udata *udata)
201 {
202 	struct mana_ib_ucontext *ucontext =
203 		container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
204 	struct ib_device *ibdev = ibcontext->device;
205 	struct mana_ib_dev *mdev;
206 	struct gdma_context *gc;
207 	int doorbell_page;
208 	int ret;
209 
210 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
211 	gc = mdev_to_gc(mdev);
212 
213 	/* Allocate a doorbell page index */
214 	ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page);
215 	if (ret) {
216 		ibdev_dbg(ibdev, "Failed to allocate doorbell page %d\n", ret);
217 		return ret;
218 	}
219 
220 	ibdev_dbg(ibdev, "Doorbell page allocated %d\n", doorbell_page);
221 
222 	ucontext->doorbell = doorbell_page;
223 
224 	return 0;
225 }
226 
mana_ib_dealloc_ucontext(struct ib_ucontext * ibcontext)227 void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
228 {
229 	struct mana_ib_ucontext *mana_ucontext =
230 		container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
231 	struct ib_device *ibdev = ibcontext->device;
232 	struct mana_ib_dev *mdev;
233 	struct gdma_context *gc;
234 	int ret;
235 
236 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
237 	gc = mdev_to_gc(mdev);
238 
239 	ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
240 	if (ret)
241 		ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
242 }
243 
mana_ib_create_kernel_queue(struct mana_ib_dev * mdev,u32 size,enum gdma_queue_type type,struct mana_ib_queue * queue)244 int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
245 				struct mana_ib_queue *queue)
246 {
247 	struct gdma_queue_spec spec = {};
248 	int err;
249 
250 	queue->id = INVALID_QUEUE_ID;
251 	queue->gdma_region = GDMA_INVALID_DMA_REGION;
252 	spec.type = type;
253 	spec.monitor_avl_buf = false;
254 	spec.queue_size = size;
255 	err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem);
256 	if (err)
257 		return err;
258 	/* take ownership into mana_ib from mana */
259 	queue->gdma_region = queue->kmem->mem_info.dma_region_handle;
260 	queue->kmem->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
261 	return 0;
262 }
263 
mana_ib_create_queue(struct mana_ib_dev * mdev,u64 addr,u32 size,struct mana_ib_queue * queue)264 int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
265 			 struct mana_ib_queue *queue)
266 {
267 	struct ib_umem *umem;
268 	int err;
269 
270 	queue->umem = NULL;
271 	queue->id = INVALID_QUEUE_ID;
272 	queue->gdma_region = GDMA_INVALID_DMA_REGION;
273 
274 	umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
275 	if (IS_ERR(umem)) {
276 		err = PTR_ERR(umem);
277 		ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
278 		return err;
279 	}
280 
281 	err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
282 	if (err) {
283 		ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
284 		goto free_umem;
285 	}
286 	queue->umem = umem;
287 
288 	ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region);
289 
290 	return 0;
291 free_umem:
292 	ib_umem_release(umem);
293 	return err;
294 }
295 
mana_ib_destroy_queue(struct mana_ib_dev * mdev,struct mana_ib_queue * queue)296 void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
297 {
298 	/* Ignore return code as there is not much we can do about it.
299 	 * The error message is printed inside.
300 	 */
301 	mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
302 	ib_umem_release(queue->umem);
303 	if (queue->kmem)
304 		mana_gd_destroy_queue(mdev_to_gc(mdev), queue->kmem);
305 }
306 
307 static int
mana_ib_gd_first_dma_region(struct mana_ib_dev * dev,struct gdma_context * gc,struct gdma_create_dma_region_req * create_req,size_t num_pages,mana_handle_t * gdma_region,u32 expected_status)308 mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
309 			    struct gdma_context *gc,
310 			    struct gdma_create_dma_region_req *create_req,
311 			    size_t num_pages, mana_handle_t *gdma_region,
312 			    u32 expected_status)
313 {
314 	struct gdma_create_dma_region_resp create_resp = {};
315 	unsigned int create_req_msg_size;
316 	int err;
317 
318 	create_req_msg_size =
319 		struct_size(create_req, page_addr_list, num_pages);
320 	create_req->page_addr_list_len = num_pages;
321 
322 	err = mana_gd_send_request(gc, create_req_msg_size, create_req,
323 				   sizeof(create_resp), &create_resp);
324 	if (err || create_resp.hdr.status != expected_status) {
325 		ibdev_dbg(&dev->ib_dev,
326 			  "Failed to create DMA region: %d, 0x%x\n",
327 			  err, create_resp.hdr.status);
328 		if (!err)
329 			err = -EPROTO;
330 
331 		return err;
332 	}
333 
334 	*gdma_region = create_resp.dma_region_handle;
335 	ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n",
336 		  *gdma_region);
337 
338 	return 0;
339 }
340 
341 static int
mana_ib_gd_add_dma_region(struct mana_ib_dev * dev,struct gdma_context * gc,struct gdma_dma_region_add_pages_req * add_req,unsigned int num_pages,u32 expected_status)342 mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
343 			  struct gdma_dma_region_add_pages_req *add_req,
344 			  unsigned int num_pages, u32 expected_status)
345 {
346 	unsigned int add_req_msg_size =
347 		struct_size(add_req, page_addr_list, num_pages);
348 	struct gdma_general_resp add_resp = {};
349 	int err;
350 
351 	mana_gd_init_req_hdr(&add_req->hdr, GDMA_DMA_REGION_ADD_PAGES,
352 			     add_req_msg_size, sizeof(add_resp));
353 	add_req->page_addr_list_len = num_pages;
354 
355 	err = mana_gd_send_request(gc, add_req_msg_size, add_req,
356 				   sizeof(add_resp), &add_resp);
357 	if (err || add_resp.hdr.status != expected_status) {
358 		ibdev_dbg(&dev->ib_dev,
359 			  "Failed to create DMA region: %d, 0x%x\n",
360 			  err, add_resp.hdr.status);
361 
362 		if (!err)
363 			err = -EPROTO;
364 
365 		return err;
366 	}
367 
368 	return 0;
369 }
370 
mana_ib_gd_create_dma_region(struct mana_ib_dev * dev,struct ib_umem * umem,mana_handle_t * gdma_region,unsigned long page_sz)371 static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
372 					mana_handle_t *gdma_region, unsigned long page_sz)
373 {
374 	struct gdma_dma_region_add_pages_req *add_req = NULL;
375 	size_t num_pages_processed = 0, num_pages_to_handle;
376 	struct gdma_create_dma_region_req *create_req;
377 	unsigned int create_req_msg_size;
378 	struct hw_channel_context *hwc;
379 	struct ib_block_iter biter;
380 	size_t max_pgs_add_cmd = 0;
381 	size_t max_pgs_create_cmd;
382 	struct gdma_context *gc;
383 	size_t num_pages_total;
384 	unsigned int tail = 0;
385 	u64 *page_addr_list;
386 	void *request_buf;
387 	int err = 0;
388 
389 	gc = mdev_to_gc(dev);
390 	hwc = gc->hwc.driver_data;
391 
392 	num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
393 
394 	max_pgs_create_cmd =
395 		(hwc->max_req_msg_size - sizeof(*create_req)) / sizeof(u64);
396 	num_pages_to_handle =
397 		min_t(size_t, num_pages_total, max_pgs_create_cmd);
398 	create_req_msg_size =
399 		struct_size(create_req, page_addr_list, num_pages_to_handle);
400 
401 	request_buf = kzalloc(hwc->max_req_msg_size, GFP_KERNEL);
402 	if (!request_buf)
403 		return -ENOMEM;
404 
405 	create_req = request_buf;
406 	mana_gd_init_req_hdr(&create_req->hdr, GDMA_CREATE_DMA_REGION,
407 			     create_req_msg_size,
408 			     sizeof(struct gdma_create_dma_region_resp));
409 
410 	create_req->length = umem->length;
411 	create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
412 	create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
413 	create_req->page_count = num_pages_total;
414 
415 	ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
416 		  umem->length, num_pages_total);
417 
418 	ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n",
419 		  page_sz, create_req->offset_in_page);
420 
421 	ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u",
422 		  num_pages_to_handle, create_req->gdma_page_type);
423 
424 	page_addr_list = create_req->page_addr_list;
425 	rdma_umem_for_each_dma_block(umem, &biter, page_sz) {
426 		u32 expected_status = 0;
427 
428 		page_addr_list[tail++] = rdma_block_iter_dma_address(&biter);
429 		if (tail < num_pages_to_handle)
430 			continue;
431 
432 		if (num_pages_processed + num_pages_to_handle <
433 		    num_pages_total)
434 			expected_status = GDMA_STATUS_MORE_ENTRIES;
435 
436 		if (!num_pages_processed) {
437 			/* First create message */
438 			err = mana_ib_gd_first_dma_region(dev, gc, create_req,
439 							  tail, gdma_region,
440 							  expected_status);
441 			if (err)
442 				goto out;
443 
444 			max_pgs_add_cmd = (hwc->max_req_msg_size -
445 				sizeof(*add_req)) / sizeof(u64);
446 
447 			add_req = request_buf;
448 			add_req->dma_region_handle = *gdma_region;
449 			add_req->reserved3 = 0;
450 			page_addr_list = add_req->page_addr_list;
451 		} else {
452 			/* Subsequent create messages */
453 			err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
454 							expected_status);
455 			if (err)
456 				break;
457 		}
458 
459 		num_pages_processed += tail;
460 		tail = 0;
461 
462 		/* The remaining pages to create */
463 		num_pages_to_handle =
464 			min_t(size_t,
465 			      num_pages_total - num_pages_processed,
466 			      max_pgs_add_cmd);
467 	}
468 
469 	if (err)
470 		mana_ib_gd_destroy_dma_region(dev, *gdma_region);
471 
472 out:
473 	kfree(request_buf);
474 	return err;
475 }
476 
mana_ib_create_dma_region(struct mana_ib_dev * dev,struct ib_umem * umem,mana_handle_t * gdma_region,u64 virt)477 int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
478 			      mana_handle_t *gdma_region, u64 virt)
479 {
480 	unsigned long page_sz;
481 
482 	page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
483 	if (!page_sz) {
484 		ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
485 		return -EINVAL;
486 	}
487 
488 	return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
489 }
490 
mana_ib_create_zero_offset_dma_region(struct mana_ib_dev * dev,struct ib_umem * umem,mana_handle_t * gdma_region)491 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
492 					  mana_handle_t *gdma_region)
493 {
494 	unsigned long page_sz;
495 
496 	/* Hardware requires dma region to align to chosen page size */
497 	page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
498 	if (!page_sz) {
499 		ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
500 		return -EINVAL;
501 	}
502 
503 	return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
504 }
505 
mana_ib_gd_destroy_dma_region(struct mana_ib_dev * dev,u64 gdma_region)506 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
507 {
508 	struct gdma_context *gc = mdev_to_gc(dev);
509 
510 	ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
511 
512 	return mana_gd_destroy_dma_region(gc, gdma_region);
513 }
514 
mana_ib_mmap(struct ib_ucontext * ibcontext,struct vm_area_struct * vma)515 int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
516 {
517 	struct mana_ib_ucontext *mana_ucontext =
518 		container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
519 	struct ib_device *ibdev = ibcontext->device;
520 	struct mana_ib_dev *mdev;
521 	struct gdma_context *gc;
522 	phys_addr_t pfn;
523 	pgprot_t prot;
524 	int ret;
525 
526 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
527 	gc = mdev_to_gc(mdev);
528 
529 	if (vma->vm_pgoff != 0) {
530 		ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
531 		return -EINVAL;
532 	}
533 
534 	/* Map to the page indexed by ucontext->doorbell */
535 	pfn = (gc->phys_db_page_base +
536 	       gc->db_page_size * mana_ucontext->doorbell) >>
537 	      PAGE_SHIFT;
538 	prot = pgprot_writecombine(vma->vm_page_prot);
539 
540 	ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
541 				NULL);
542 	if (ret)
543 		ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
544 	else
545 		ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
546 			  pfn, PAGE_SIZE, ret);
547 
548 	return ret;
549 }
550 
mana_ib_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)551 int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
552 			       struct ib_port_immutable *immutable)
553 {
554 	struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
555 	struct ib_port_attr attr;
556 	int err;
557 
558 	err = ib_query_port(ibdev, port_num, &attr);
559 	if (err)
560 		return err;
561 
562 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
563 	immutable->gid_tbl_len = attr.gid_tbl_len;
564 
565 	if (mana_ib_is_rnic(dev)) {
566 		if (port_num == 1) {
567 			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
568 			immutable->max_mad_size = IB_MGMT_MAD_SIZE;
569 		} else {
570 			immutable->core_cap_flags = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
571 						    | RDMA_CORE_CAP_ETH_AH;
572 			immutable->max_mad_size = 0;
573 		}
574 	} else {
575 		immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
576 	}
577 
578 	return 0;
579 }
580 
mana_ib_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * uhw)581 int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
582 			 struct ib_udata *uhw)
583 {
584 	struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
585 	struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev);
586 
587 	memset(props, 0, sizeof(*props));
588 	props->vendor_id = pdev->vendor;
589 	props->vendor_part_id = dev->gdma_dev->dev_id.type;
590 	props->max_mr_size = MANA_IB_MAX_MR_SIZE;
591 	props->page_size_cap = dev->adapter_caps.page_size_cap;
592 	props->max_qp = dev->adapter_caps.max_qp_count;
593 	props->max_qp_wr = dev->adapter_caps.max_qp_wr;
594 	props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
595 	props->max_send_sge = dev->adapter_caps.max_send_sge_count;
596 	props->max_recv_sge = dev->adapter_caps.max_recv_sge_count;
597 	props->max_sge_rd = dev->adapter_caps.max_recv_sge_count;
598 	props->max_cq = dev->adapter_caps.max_cq_count;
599 	props->max_cqe = dev->adapter_caps.max_qp_wr;
600 	props->max_mr = dev->adapter_caps.max_mr_count;
601 	props->max_pd = dev->adapter_caps.max_pd_count;
602 	props->max_qp_rd_atom = dev->adapter_caps.max_inbound_read_limit;
603 	props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
604 	props->max_qp_init_rd_atom = dev->adapter_caps.max_outbound_read_limit;
605 	props->atomic_cap = IB_ATOMIC_NONE;
606 	props->masked_atomic_cap = IB_ATOMIC_NONE;
607 	props->max_ah = INT_MAX;
608 	props->max_pkeys = 1;
609 	props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
610 	if (!mana_ib_is_rnic(dev))
611 		props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM;
612 
613 	return 0;
614 }
615 
mana_ib_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)616 int mana_ib_query_port(struct ib_device *ibdev, u32 port,
617 		       struct ib_port_attr *props)
618 {
619 	struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
620 	struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
621 
622 	if (!ndev)
623 		return -EINVAL;
624 
625 	memset(props, 0, sizeof(*props));
626 	props->max_mtu = IB_MTU_4096;
627 	props->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
628 
629 	if (netif_carrier_ok(ndev) && netif_running(ndev)) {
630 		props->state = IB_PORT_ACTIVE;
631 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
632 	} else {
633 		props->state = IB_PORT_DOWN;
634 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
635 	}
636 
637 	props->active_width = IB_WIDTH_4X;
638 	props->active_speed = IB_SPEED_EDR;
639 	props->pkey_tbl_len = 1;
640 	if (mana_ib_is_rnic(dev)) {
641 		props->gid_tbl_len = 16;
642 		props->ip_gids = true;
643 		if (port == 1)
644 			props->port_cap_flags = IB_PORT_CM_SUP;
645 	}
646 
647 	return 0;
648 }
649 
mana_ib_get_link_layer(struct ib_device * device,u32 port_num)650 enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num)
651 {
652 	return IB_LINK_LAYER_ETHERNET;
653 }
654 
mana_ib_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)655 int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
656 {
657 	if (index != 0)
658 		return -EINVAL;
659 	*pkey = IB_DEFAULT_PKEY_FULL;
660 	return 0;
661 }
662 
mana_ib_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)663 int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
664 		      union ib_gid *gid)
665 {
666 	/* This version doesn't return GID properties */
667 	return 0;
668 }
669 
mana_ib_disassociate_ucontext(struct ib_ucontext * ibcontext)670 void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
671 {
672 }
673 
mana_ib_gd_query_adapter_caps(struct mana_ib_dev * dev)674 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
675 {
676 	struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
677 	struct mana_ib_query_adapter_caps_resp resp = {};
678 	struct mana_ib_query_adapter_caps_req req = {};
679 	int err;
680 
681 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
682 			     sizeof(resp));
683 	req.hdr.resp.msg_version = GDMA_MESSAGE_V4;
684 	req.hdr.dev_id = dev->gdma_dev->dev_id;
685 
686 	err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
687 				   &req, sizeof(resp), &resp);
688 
689 	if (err) {
690 		ibdev_err(&dev->ib_dev,
691 			  "Failed to query adapter caps err %d", err);
692 		return err;
693 	}
694 
695 	caps->max_sq_id = resp.max_sq_id;
696 	caps->max_rq_id = resp.max_rq_id;
697 	caps->max_cq_id = resp.max_cq_id;
698 	caps->max_qp_count = resp.max_qp_count;
699 	caps->max_cq_count = resp.max_cq_count;
700 	caps->max_mr_count = resp.max_mr_count;
701 	caps->max_pd_count = resp.max_pd_count;
702 	caps->max_inbound_read_limit = resp.max_inbound_read_limit;
703 	caps->max_outbound_read_limit = resp.max_outbound_read_limit;
704 	caps->mw_count = resp.mw_count;
705 	caps->max_srq_count = resp.max_srq_count;
706 	caps->max_qp_wr = min_t(u32,
707 				resp.max_requester_sq_size / GDMA_MAX_SQE_SIZE,
708 				resp.max_requester_rq_size / GDMA_MAX_RQE_SIZE);
709 	caps->max_inline_data_size = resp.max_inline_data_size;
710 	caps->max_send_sge_count = resp.max_send_sge_count;
711 	caps->max_recv_sge_count = resp.max_recv_sge_count;
712 	caps->feature_flags = resp.feature_flags;
713 
714 	caps->page_size_cap = PAGE_SZ_BM;
715 	if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
716 		caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
717 
718 	return 0;
719 }
720 
mana_eth_query_adapter_caps(struct mana_ib_dev * dev)721 int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
722 {
723 	struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
724 	struct gdma_query_max_resources_resp resp = {};
725 	struct gdma_general_req req = {};
726 	int err;
727 
728 	mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
729 			     sizeof(req), sizeof(resp));
730 
731 	err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
732 	if (err) {
733 		ibdev_err(&dev->ib_dev,
734 			  "Failed to query adapter caps err %d", err);
735 		return err;
736 	}
737 
738 	caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
739 	caps->max_cq_count = resp.max_cq;
740 	caps->max_mr_count = resp.max_mst;
741 	caps->max_pd_count = 0x6000;
742 	caps->max_qp_wr = min_t(u32,
743 				0x100000 / GDMA_MAX_SQE_SIZE,
744 				0x100000 / GDMA_MAX_RQE_SIZE);
745 	caps->max_send_sge_count = 30;
746 	caps->max_recv_sge_count = 15;
747 	caps->page_size_cap = PAGE_SZ_BM;
748 
749 	return 0;
750 }
751 
752 static void
mana_ib_event_handler(void * ctx,struct gdma_queue * q,struct gdma_event * event)753 mana_ib_event_handler(void *ctx, struct gdma_queue *q, struct gdma_event *event)
754 {
755 	struct mana_ib_dev *mdev = (struct mana_ib_dev *)ctx;
756 	struct mana_ib_qp *qp;
757 	struct ib_event ev;
758 	u32 qpn;
759 
760 	switch (event->type) {
761 	case GDMA_EQE_RNIC_QP_FATAL:
762 		qpn = event->details[0];
763 		qp = mana_get_qp_ref(mdev, qpn, false);
764 		if (!qp)
765 			break;
766 		if (qp->ibqp.event_handler) {
767 			ev.device = qp->ibqp.device;
768 			ev.element.qp = &qp->ibqp;
769 			ev.event = IB_EVENT_QP_FATAL;
770 			qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
771 		}
772 		mana_put_qp_ref(qp);
773 		break;
774 	default:
775 		break;
776 	}
777 }
778 
mana_ib_create_eqs(struct mana_ib_dev * mdev)779 int mana_ib_create_eqs(struct mana_ib_dev *mdev)
780 {
781 	struct gdma_context *gc = mdev_to_gc(mdev);
782 	struct gdma_queue_spec spec = {};
783 	int err, i;
784 
785 	spec.type = GDMA_EQ;
786 	spec.monitor_avl_buf = false;
787 	spec.queue_size = EQ_SIZE;
788 	spec.eq.callback = mana_ib_event_handler;
789 	spec.eq.context = mdev;
790 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
791 	spec.eq.msix_index = 0;
792 
793 	err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq);
794 	if (err)
795 		return err;
796 
797 	mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *),
798 			    GFP_KERNEL);
799 	if (!mdev->eqs) {
800 		err = -ENOMEM;
801 		goto destroy_fatal_eq;
802 	}
803 	spec.eq.callback = NULL;
804 	for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
805 		spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
806 		err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]);
807 		if (err)
808 			goto destroy_eqs;
809 	}
810 
811 	return 0;
812 
813 destroy_eqs:
814 	while (i-- > 0)
815 		mana_gd_destroy_queue(gc, mdev->eqs[i]);
816 	kfree(mdev->eqs);
817 destroy_fatal_eq:
818 	mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
819 	return err;
820 }
821 
mana_ib_destroy_eqs(struct mana_ib_dev * mdev)822 void mana_ib_destroy_eqs(struct mana_ib_dev *mdev)
823 {
824 	struct gdma_context *gc = mdev_to_gc(mdev);
825 	int i;
826 
827 	mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
828 
829 	for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++)
830 		mana_gd_destroy_queue(gc, mdev->eqs[i]);
831 
832 	kfree(mdev->eqs);
833 }
834 
mana_ib_gd_create_rnic_adapter(struct mana_ib_dev * mdev)835 int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
836 {
837 	struct mana_rnic_create_adapter_resp resp = {};
838 	struct mana_rnic_create_adapter_req req = {};
839 	struct gdma_context *gc = mdev_to_gc(mdev);
840 	int err;
841 
842 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
843 	req.hdr.req.msg_version = GDMA_MESSAGE_V2;
844 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
845 	req.notify_eq_id = mdev->fatal_err_eq->id;
846 
847 	if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
848 		req.feature_flags |= MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST;
849 
850 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
851 	if (err) {
852 		ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
853 		return err;
854 	}
855 	mdev->adapter_handle = resp.adapter;
856 
857 	return 0;
858 }
859 
mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev * mdev)860 int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
861 {
862 	struct mana_rnic_destroy_adapter_resp resp = {};
863 	struct mana_rnic_destroy_adapter_req req = {};
864 	struct gdma_context *gc;
865 	int err;
866 
867 	gc = mdev_to_gc(mdev);
868 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
869 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
870 	req.adapter = mdev->adapter_handle;
871 
872 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
873 	if (err) {
874 		ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err);
875 		return err;
876 	}
877 
878 	return 0;
879 }
880 
mana_ib_gd_add_gid(const struct ib_gid_attr * attr,void ** context)881 int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
882 {
883 	struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
884 	enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
885 	struct mana_rnic_config_addr_resp resp = {};
886 	struct gdma_context *gc = mdev_to_gc(mdev);
887 	struct mana_rnic_config_addr_req req = {};
888 	int err;
889 
890 	if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
891 		ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
892 		return -EINVAL;
893 	}
894 
895 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
896 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
897 	req.adapter = mdev->adapter_handle;
898 	req.op = ADDR_OP_ADD;
899 	req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
900 	copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
901 
902 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
903 	if (err) {
904 		ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
905 		return err;
906 	}
907 
908 	return 0;
909 }
910 
mana_ib_gd_del_gid(const struct ib_gid_attr * attr,void ** context)911 int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
912 {
913 	struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
914 	enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
915 	struct mana_rnic_config_addr_resp resp = {};
916 	struct gdma_context *gc = mdev_to_gc(mdev);
917 	struct mana_rnic_config_addr_req req = {};
918 	int err;
919 
920 	if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
921 		ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
922 		return -EINVAL;
923 	}
924 
925 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
926 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
927 	req.adapter = mdev->adapter_handle;
928 	req.op = ADDR_OP_REMOVE;
929 	req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
930 	copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
931 
932 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
933 	if (err) {
934 		ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
935 		return err;
936 	}
937 
938 	return 0;
939 }
940 
mana_ib_gd_config_mac(struct mana_ib_dev * mdev,enum mana_ib_addr_op op,u8 * mac)941 int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac)
942 {
943 	struct mana_rnic_config_mac_addr_resp resp = {};
944 	struct mana_rnic_config_mac_addr_req req = {};
945 	struct gdma_context *gc = mdev_to_gc(mdev);
946 	int err;
947 
948 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
949 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
950 	req.adapter = mdev->adapter_handle;
951 	req.op = op;
952 	copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
953 
954 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
955 	if (err) {
956 		ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err);
957 		return err;
958 	}
959 
960 	return 0;
961 }
962 
mana_ib_gd_create_cq(struct mana_ib_dev * mdev,struct mana_ib_cq * cq,u32 doorbell)963 int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell)
964 {
965 	struct gdma_context *gc = mdev_to_gc(mdev);
966 	struct mana_rnic_create_cq_resp resp = {};
967 	struct mana_rnic_create_cq_req req = {};
968 	int err;
969 
970 	if (!mdev->eqs)
971 		return -EINVAL;
972 
973 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
974 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
975 	req.adapter = mdev->adapter_handle;
976 	req.gdma_region = cq->queue.gdma_region;
977 	req.eq_id = mdev->eqs[cq->comp_vector]->id;
978 	req.doorbell_page = doorbell;
979 
980 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
981 
982 	if (err) {
983 		ibdev_err(&mdev->ib_dev, "Failed to create cq err %d", err);
984 		return err;
985 	}
986 
987 	cq->queue.id  = resp.cq_id;
988 	cq->cq_handle = resp.cq_handle;
989 	/* The GDMA region is now owned by the CQ handle */
990 	cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
991 
992 	return 0;
993 }
994 
mana_ib_gd_destroy_cq(struct mana_ib_dev * mdev,struct mana_ib_cq * cq)995 int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
996 {
997 	struct gdma_context *gc = mdev_to_gc(mdev);
998 	struct mana_rnic_destroy_cq_resp resp = {};
999 	struct mana_rnic_destroy_cq_req req = {};
1000 	int err;
1001 
1002 	if (cq->cq_handle == INVALID_MANA_HANDLE)
1003 		return 0;
1004 
1005 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
1006 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
1007 	req.adapter = mdev->adapter_handle;
1008 	req.cq_handle = cq->cq_handle;
1009 
1010 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1011 
1012 	if (err) {
1013 		ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err);
1014 		return err;
1015 	}
1016 
1017 	return 0;
1018 }
1019 
mana_ib_gd_create_rc_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp,struct ib_qp_init_attr * attr,u32 doorbell,u64 flags)1020 int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
1021 			    struct ib_qp_init_attr *attr, u32 doorbell, u64 flags)
1022 {
1023 	struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
1024 	struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
1025 	struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
1026 	struct gdma_context *gc = mdev_to_gc(mdev);
1027 	struct mana_rnic_create_qp_resp resp = {};
1028 	struct mana_rnic_create_qp_req req = {};
1029 	int err, i;
1030 
1031 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
1032 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
1033 	req.adapter = mdev->adapter_handle;
1034 	req.pd_handle = pd->pd_handle;
1035 	req.send_cq_handle = send_cq->cq_handle;
1036 	req.recv_cq_handle = recv_cq->cq_handle;
1037 	for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++)
1038 		req.dma_region[i] = qp->rc_qp.queues[i].gdma_region;
1039 	req.doorbell_page = doorbell;
1040 	req.max_send_wr = attr->cap.max_send_wr;
1041 	req.max_recv_wr = attr->cap.max_recv_wr;
1042 	req.max_send_sge = attr->cap.max_send_sge;
1043 	req.max_recv_sge = attr->cap.max_recv_sge;
1044 	req.flags = flags;
1045 
1046 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1047 	if (err) {
1048 		ibdev_err(&mdev->ib_dev, "Failed to create rc qp err %d", err);
1049 		return err;
1050 	}
1051 	qp->qp_handle = resp.rc_qp_handle;
1052 	for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++) {
1053 		qp->rc_qp.queues[i].id = resp.queue_ids[i];
1054 		/* The GDMA regions are now owned by the RNIC QP handle */
1055 		qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
1056 	}
1057 	return 0;
1058 }
1059 
mana_ib_gd_destroy_rc_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)1060 int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
1061 {
1062 	struct mana_rnic_destroy_rc_qp_resp resp = {0};
1063 	struct mana_rnic_destroy_rc_qp_req req = {0};
1064 	struct gdma_context *gc = mdev_to_gc(mdev);
1065 	int err;
1066 
1067 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
1068 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
1069 	req.adapter = mdev->adapter_handle;
1070 	req.rc_qp_handle = qp->qp_handle;
1071 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1072 	if (err) {
1073 		ibdev_err(&mdev->ib_dev, "Failed to destroy rc qp err %d", err);
1074 		return err;
1075 	}
1076 	return 0;
1077 }
1078 
mana_ib_gd_create_ud_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp,struct ib_qp_init_attr * attr,u32 doorbell,u32 type)1079 int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
1080 			    struct ib_qp_init_attr *attr, u32 doorbell, u32 type)
1081 {
1082 	struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
1083 	struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
1084 	struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
1085 	struct gdma_context *gc = mdev_to_gc(mdev);
1086 	struct mana_rnic_create_udqp_resp resp = {};
1087 	struct mana_rnic_create_udqp_req req = {};
1088 	int err, i;
1089 
1090 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
1091 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
1092 	req.adapter = mdev->adapter_handle;
1093 	req.pd_handle = pd->pd_handle;
1094 	req.send_cq_handle = send_cq->cq_handle;
1095 	req.recv_cq_handle = recv_cq->cq_handle;
1096 	for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++)
1097 		req.dma_region[i] = qp->ud_qp.queues[i].gdma_region;
1098 	req.doorbell_page = doorbell;
1099 	req.max_send_wr = attr->cap.max_send_wr;
1100 	req.max_recv_wr = attr->cap.max_recv_wr;
1101 	req.max_send_sge = attr->cap.max_send_sge;
1102 	req.max_recv_sge = attr->cap.max_recv_sge;
1103 	req.qp_type = type;
1104 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1105 	if (err) {
1106 		ibdev_err(&mdev->ib_dev, "Failed to create ud qp err %d", err);
1107 		return err;
1108 	}
1109 	qp->qp_handle = resp.qp_handle;
1110 	for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++) {
1111 		qp->ud_qp.queues[i].id = resp.queue_ids[i];
1112 		/* The GDMA regions are now owned by the RNIC QP handle */
1113 		qp->ud_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
1114 	}
1115 	return 0;
1116 }
1117 
mana_ib_gd_destroy_ud_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)1118 int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
1119 {
1120 	struct mana_rnic_destroy_udqp_resp resp = {0};
1121 	struct mana_rnic_destroy_udqp_req req = {0};
1122 	struct gdma_context *gc = mdev_to_gc(mdev);
1123 	int err;
1124 
1125 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
1126 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
1127 	req.adapter = mdev->adapter_handle;
1128 	req.qp_handle = qp->qp_handle;
1129 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1130 	if (err) {
1131 		ibdev_err(&mdev->ib_dev, "Failed to destroy ud qp err %d", err);
1132 		return err;
1133 	}
1134 	return 0;
1135 }
1136