1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6 #include "mana_ib.h"
7
mana_ib_uncfg_vport(struct mana_ib_dev * dev,struct mana_ib_pd * pd,u32 port)8 void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
9 u32 port)
10 {
11 struct mana_port_context *mpc;
12 struct net_device *ndev;
13
14 ndev = mana_ib_get_netdev(&dev->ib_dev, port);
15 mpc = netdev_priv(ndev);
16
17 mutex_lock(&pd->vport_mutex);
18
19 pd->vport_use_count--;
20 WARN_ON(pd->vport_use_count < 0);
21
22 if (!pd->vport_use_count)
23 mana_uncfg_vport(mpc);
24
25 mutex_unlock(&pd->vport_mutex);
26 }
27
mana_ib_cfg_vport(struct mana_ib_dev * dev,u32 port,struct mana_ib_pd * pd,u32 doorbell_id)28 int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
29 u32 doorbell_id)
30 {
31 struct mana_port_context *mpc;
32 struct net_device *ndev;
33 int err;
34
35 ndev = mana_ib_get_netdev(&dev->ib_dev, port);
36 mpc = netdev_priv(ndev);
37
38 mutex_lock(&pd->vport_mutex);
39
40 pd->vport_use_count++;
41 if (pd->vport_use_count > 1) {
42 ibdev_dbg(&dev->ib_dev,
43 "Skip as this PD is already configured vport\n");
44 mutex_unlock(&pd->vport_mutex);
45 return 0;
46 }
47
48 err = mana_cfg_vport(mpc, pd->pdn, doorbell_id);
49 if (err) {
50 pd->vport_use_count--;
51 mutex_unlock(&pd->vport_mutex);
52
53 ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err);
54 return err;
55 }
56
57 mutex_unlock(&pd->vport_mutex);
58
59 pd->tx_shortform_allowed = mpc->tx_shortform_allowed;
60 pd->tx_vp_offset = mpc->tx_vp_offset;
61
62 ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n",
63 mpc->port_handle, pd->pdn, doorbell_id);
64
65 return 0;
66 }
67
mana_ib_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)68 int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
69 {
70 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
71 struct ib_device *ibdev = ibpd->device;
72 struct gdma_create_pd_resp resp = {};
73 struct gdma_create_pd_req req = {};
74 enum gdma_pd_flags flags = 0;
75 struct mana_ib_dev *dev;
76 struct gdma_context *gc;
77 int err;
78
79 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
80 gc = mdev_to_gc(dev);
81
82 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
83 sizeof(resp));
84
85 if (!udata)
86 flags |= GDMA_PD_FLAG_ALLOW_GPA_MR;
87
88 req.flags = flags;
89 err = mana_gd_send_request(gc, sizeof(req), &req,
90 sizeof(resp), &resp);
91
92 if (err || resp.hdr.status) {
93 ibdev_dbg(&dev->ib_dev,
94 "Failed to get pd_id err %d status %u\n", err,
95 resp.hdr.status);
96 if (!err)
97 err = -EPROTO;
98
99 return err;
100 }
101
102 pd->pd_handle = resp.pd_handle;
103 pd->pdn = resp.pd_id;
104 ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
105 pd->pd_handle, pd->pdn);
106
107 mutex_init(&pd->vport_mutex);
108 pd->vport_use_count = 0;
109 return 0;
110 }
111
mana_ib_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)112 int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
113 {
114 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
115 struct ib_device *ibdev = ibpd->device;
116 struct gdma_destory_pd_resp resp = {};
117 struct gdma_destroy_pd_req req = {};
118 struct mana_ib_dev *dev;
119 struct gdma_context *gc;
120 int err;
121
122 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
123 gc = mdev_to_gc(dev);
124
125 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
126 sizeof(resp));
127
128 req.pd_handle = pd->pd_handle;
129 err = mana_gd_send_request(gc, sizeof(req), &req,
130 sizeof(resp), &resp);
131
132 if (err || resp.hdr.status) {
133 ibdev_dbg(&dev->ib_dev,
134 "Failed to destroy pd_handle 0x%llx err %d status %u",
135 pd->pd_handle, err, resp.hdr.status);
136 if (!err)
137 err = -EPROTO;
138 }
139
140 return err;
141 }
142
mana_gd_destroy_doorbell_page(struct gdma_context * gc,int doorbell_page)143 static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
144 int doorbell_page)
145 {
146 struct gdma_destroy_resource_range_req req = {};
147 struct gdma_resp_hdr resp = {};
148 int err;
149
150 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE,
151 sizeof(req), sizeof(resp));
152
153 req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
154 req.num_resources = 1;
155 req.allocated_resources = doorbell_page;
156
157 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
158 if (err || resp.status) {
159 dev_err(gc->dev,
160 "Failed to destroy doorbell page: ret %d, 0x%x\n",
161 err, resp.status);
162 return err ?: -EPROTO;
163 }
164
165 return 0;
166 }
167
mana_gd_allocate_doorbell_page(struct gdma_context * gc,int * doorbell_page)168 static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
169 int *doorbell_page)
170 {
171 struct gdma_allocate_resource_range_req req = {};
172 struct gdma_allocate_resource_range_resp resp = {};
173 int err;
174
175 mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE,
176 sizeof(req), sizeof(resp));
177
178 req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
179 req.num_resources = 1;
180 req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
181
182 /* Have GDMA start searching from 0 */
183 req.allocated_resources = 0;
184
185 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
186 if (err || resp.hdr.status) {
187 dev_err(gc->dev,
188 "Failed to allocate doorbell page: ret %d, 0x%x\n",
189 err, resp.hdr.status);
190 return err ?: -EPROTO;
191 }
192
193 *doorbell_page = resp.allocated_resources;
194
195 return 0;
196 }
197
mana_ib_alloc_ucontext(struct ib_ucontext * ibcontext,struct ib_udata * udata)198 int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
199 struct ib_udata *udata)
200 {
201 struct mana_ib_ucontext *ucontext =
202 container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
203 struct ib_device *ibdev = ibcontext->device;
204 struct mana_ib_dev *mdev;
205 struct gdma_context *gc;
206 int doorbell_page;
207 int ret;
208
209 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
210 gc = mdev_to_gc(mdev);
211
212 /* Allocate a doorbell page index */
213 ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page);
214 if (ret) {
215 ibdev_dbg(ibdev, "Failed to allocate doorbell page %d\n", ret);
216 return ret;
217 }
218
219 ibdev_dbg(ibdev, "Doorbell page allocated %d\n", doorbell_page);
220
221 ucontext->doorbell = doorbell_page;
222
223 return 0;
224 }
225
mana_ib_dealloc_ucontext(struct ib_ucontext * ibcontext)226 void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
227 {
228 struct mana_ib_ucontext *mana_ucontext =
229 container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
230 struct ib_device *ibdev = ibcontext->device;
231 struct mana_ib_dev *mdev;
232 struct gdma_context *gc;
233 int ret;
234
235 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
236 gc = mdev_to_gc(mdev);
237
238 ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
239 if (ret)
240 ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
241 }
242
mana_ib_create_kernel_queue(struct mana_ib_dev * mdev,u32 size,enum gdma_queue_type type,struct mana_ib_queue * queue)243 int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
244 struct mana_ib_queue *queue)
245 {
246 struct gdma_context *gc = mdev_to_gc(mdev);
247 struct gdma_queue_spec spec = {};
248 int err;
249
250 queue->id = INVALID_QUEUE_ID;
251 queue->gdma_region = GDMA_INVALID_DMA_REGION;
252 spec.type = type;
253 spec.monitor_avl_buf = false;
254 spec.queue_size = size;
255 err = mana_gd_create_mana_wq_cq(&gc->mana_ib, &spec, &queue->kmem);
256 if (err)
257 return err;
258 /* take ownership into mana_ib from mana */
259 queue->gdma_region = queue->kmem->mem_info.dma_region_handle;
260 queue->kmem->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
261 return 0;
262 }
263
mana_ib_create_queue(struct mana_ib_dev * mdev,u64 addr,u32 size,struct mana_ib_queue * queue)264 int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
265 struct mana_ib_queue *queue)
266 {
267 struct ib_umem *umem;
268 int err;
269
270 queue->umem = NULL;
271 queue->id = INVALID_QUEUE_ID;
272 queue->gdma_region = GDMA_INVALID_DMA_REGION;
273
274 umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
275 if (IS_ERR(umem)) {
276 err = PTR_ERR(umem);
277 ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
278 return err;
279 }
280
281 err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
282 if (err) {
283 ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
284 goto free_umem;
285 }
286 queue->umem = umem;
287
288 ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region);
289
290 return 0;
291 free_umem:
292 ib_umem_release(umem);
293 return err;
294 }
295
mana_ib_destroy_queue(struct mana_ib_dev * mdev,struct mana_ib_queue * queue)296 void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
297 {
298 /* Ignore return code as there is not much we can do about it.
299 * The error message is printed inside.
300 */
301 mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
302 ib_umem_release(queue->umem);
303 if (queue->kmem)
304 mana_gd_destroy_queue(mdev_to_gc(mdev), queue->kmem);
305 }
306
307 static int
mana_ib_gd_first_dma_region(struct mana_ib_dev * dev,struct gdma_context * gc,struct gdma_create_dma_region_req * create_req,size_t num_pages,mana_handle_t * gdma_region,u32 expected_status)308 mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
309 struct gdma_context *gc,
310 struct gdma_create_dma_region_req *create_req,
311 size_t num_pages, mana_handle_t *gdma_region,
312 u32 expected_status)
313 {
314 struct gdma_create_dma_region_resp create_resp = {};
315 unsigned int create_req_msg_size;
316 int err;
317
318 create_req_msg_size =
319 struct_size(create_req, page_addr_list, num_pages);
320 create_req->page_addr_list_len = num_pages;
321
322 err = mana_gd_send_request(gc, create_req_msg_size, create_req,
323 sizeof(create_resp), &create_resp);
324 if (err || create_resp.hdr.status != expected_status) {
325 ibdev_dbg(&dev->ib_dev,
326 "Failed to create DMA region: %d, 0x%x\n",
327 err, create_resp.hdr.status);
328 if (!err)
329 err = -EPROTO;
330
331 return err;
332 }
333
334 *gdma_region = create_resp.dma_region_handle;
335 ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n",
336 *gdma_region);
337
338 return 0;
339 }
340
341 static int
mana_ib_gd_add_dma_region(struct mana_ib_dev * dev,struct gdma_context * gc,struct gdma_dma_region_add_pages_req * add_req,unsigned int num_pages,u32 expected_status)342 mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
343 struct gdma_dma_region_add_pages_req *add_req,
344 unsigned int num_pages, u32 expected_status)
345 {
346 unsigned int add_req_msg_size =
347 struct_size(add_req, page_addr_list, num_pages);
348 struct gdma_general_resp add_resp = {};
349 int err;
350
351 mana_gd_init_req_hdr(&add_req->hdr, GDMA_DMA_REGION_ADD_PAGES,
352 add_req_msg_size, sizeof(add_resp));
353 add_req->page_addr_list_len = num_pages;
354
355 err = mana_gd_send_request(gc, add_req_msg_size, add_req,
356 sizeof(add_resp), &add_resp);
357 if (err || add_resp.hdr.status != expected_status) {
358 ibdev_dbg(&dev->ib_dev,
359 "Failed to create DMA region: %d, 0x%x\n",
360 err, add_resp.hdr.status);
361
362 if (!err)
363 err = -EPROTO;
364
365 return err;
366 }
367
368 return 0;
369 }
370
mana_ib_gd_create_dma_region(struct mana_ib_dev * dev,struct ib_umem * umem,mana_handle_t * gdma_region,unsigned long page_sz)371 static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
372 mana_handle_t *gdma_region, unsigned long page_sz)
373 {
374 struct gdma_dma_region_add_pages_req *add_req = NULL;
375 size_t num_pages_processed = 0, num_pages_to_handle;
376 struct gdma_create_dma_region_req *create_req;
377 unsigned int create_req_msg_size;
378 struct hw_channel_context *hwc;
379 struct ib_block_iter biter;
380 size_t max_pgs_add_cmd = 0;
381 size_t max_pgs_create_cmd;
382 struct gdma_context *gc;
383 size_t num_pages_total;
384 unsigned int tail = 0;
385 u64 *page_addr_list;
386 void *request_buf;
387 int err = 0;
388
389 gc = mdev_to_gc(dev);
390 hwc = gc->hwc.driver_data;
391
392 num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
393
394 max_pgs_create_cmd =
395 (hwc->max_req_msg_size - sizeof(*create_req)) / sizeof(u64);
396 num_pages_to_handle =
397 min_t(size_t, num_pages_total, max_pgs_create_cmd);
398 create_req_msg_size =
399 struct_size(create_req, page_addr_list, num_pages_to_handle);
400
401 request_buf = kzalloc(hwc->max_req_msg_size, GFP_KERNEL);
402 if (!request_buf)
403 return -ENOMEM;
404
405 create_req = request_buf;
406 mana_gd_init_req_hdr(&create_req->hdr, GDMA_CREATE_DMA_REGION,
407 create_req_msg_size,
408 sizeof(struct gdma_create_dma_region_resp));
409
410 create_req->length = umem->length;
411 create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
412 create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
413 create_req->page_count = num_pages_total;
414
415 ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
416 umem->length, num_pages_total);
417
418 ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n",
419 page_sz, create_req->offset_in_page);
420
421 ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u",
422 num_pages_to_handle, create_req->gdma_page_type);
423
424 page_addr_list = create_req->page_addr_list;
425 rdma_umem_for_each_dma_block(umem, &biter, page_sz) {
426 u32 expected_status = 0;
427
428 page_addr_list[tail++] = rdma_block_iter_dma_address(&biter);
429 if (tail < num_pages_to_handle)
430 continue;
431
432 if (num_pages_processed + num_pages_to_handle <
433 num_pages_total)
434 expected_status = GDMA_STATUS_MORE_ENTRIES;
435
436 if (!num_pages_processed) {
437 /* First create message */
438 err = mana_ib_gd_first_dma_region(dev, gc, create_req,
439 tail, gdma_region,
440 expected_status);
441 if (err)
442 goto out;
443
444 max_pgs_add_cmd = (hwc->max_req_msg_size -
445 sizeof(*add_req)) / sizeof(u64);
446
447 add_req = request_buf;
448 add_req->dma_region_handle = *gdma_region;
449 add_req->reserved3 = 0;
450 page_addr_list = add_req->page_addr_list;
451 } else {
452 /* Subsequent create messages */
453 err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
454 expected_status);
455 if (err)
456 break;
457 }
458
459 num_pages_processed += tail;
460 tail = 0;
461
462 /* The remaining pages to create */
463 num_pages_to_handle =
464 min_t(size_t,
465 num_pages_total - num_pages_processed,
466 max_pgs_add_cmd);
467 }
468
469 if (err)
470 mana_ib_gd_destroy_dma_region(dev, *gdma_region);
471
472 out:
473 kfree(request_buf);
474 return err;
475 }
476
mana_ib_create_dma_region(struct mana_ib_dev * dev,struct ib_umem * umem,mana_handle_t * gdma_region,u64 virt)477 int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
478 mana_handle_t *gdma_region, u64 virt)
479 {
480 unsigned long page_sz;
481
482 page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
483 if (!page_sz) {
484 ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
485 return -EINVAL;
486 }
487
488 return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
489 }
490
mana_ib_create_zero_offset_dma_region(struct mana_ib_dev * dev,struct ib_umem * umem,mana_handle_t * gdma_region)491 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
492 mana_handle_t *gdma_region)
493 {
494 unsigned long page_sz;
495
496 /* Hardware requires dma region to align to chosen page size */
497 page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
498 if (!page_sz) {
499 ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
500 return -EINVAL;
501 }
502
503 return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
504 }
505
mana_ib_gd_destroy_dma_region(struct mana_ib_dev * dev,u64 gdma_region)506 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
507 {
508 struct gdma_context *gc = mdev_to_gc(dev);
509
510 ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
511
512 return mana_gd_destroy_dma_region(gc, gdma_region);
513 }
514
mana_ib_mmap(struct ib_ucontext * ibcontext,struct vm_area_struct * vma)515 int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
516 {
517 struct mana_ib_ucontext *mana_ucontext =
518 container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
519 struct ib_device *ibdev = ibcontext->device;
520 struct mana_ib_dev *mdev;
521 struct gdma_context *gc;
522 phys_addr_t pfn;
523 pgprot_t prot;
524 int ret;
525
526 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
527 gc = mdev_to_gc(mdev);
528
529 if (vma->vm_pgoff != 0) {
530 ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
531 return -EINVAL;
532 }
533
534 /* Map to the page indexed by ucontext->doorbell */
535 pfn = (gc->phys_db_page_base +
536 gc->db_page_size * mana_ucontext->doorbell) >>
537 PAGE_SHIFT;
538 prot = pgprot_writecombine(vma->vm_page_prot);
539
540 ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
541 NULL);
542 if (ret)
543 ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
544 else
545 ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
546 pfn, PAGE_SIZE, ret);
547
548 return ret;
549 }
550
mana_ib_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)551 int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
552 struct ib_port_immutable *immutable)
553 {
554 struct ib_port_attr attr;
555 int err;
556
557 err = ib_query_port(ibdev, port_num, &attr);
558 if (err)
559 return err;
560
561 immutable->pkey_tbl_len = attr.pkey_tbl_len;
562 immutable->gid_tbl_len = attr.gid_tbl_len;
563 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
564 if (port_num == 1) {
565 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
566 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
567 }
568
569 return 0;
570 }
571
mana_ib_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * uhw)572 int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
573 struct ib_udata *uhw)
574 {
575 struct mana_ib_dev *dev = container_of(ibdev,
576 struct mana_ib_dev, ib_dev);
577
578 memset(props, 0, sizeof(*props));
579 props->max_mr_size = MANA_IB_MAX_MR_SIZE;
580 props->page_size_cap = PAGE_SZ_BM;
581 props->max_qp = dev->adapter_caps.max_qp_count;
582 props->max_qp_wr = dev->adapter_caps.max_qp_wr;
583 props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
584 props->max_send_sge = dev->adapter_caps.max_send_sge_count;
585 props->max_recv_sge = dev->adapter_caps.max_recv_sge_count;
586 props->max_sge_rd = dev->adapter_caps.max_recv_sge_count;
587 props->max_cq = dev->adapter_caps.max_cq_count;
588 props->max_cqe = dev->adapter_caps.max_qp_wr;
589 props->max_mr = dev->adapter_caps.max_mr_count;
590 props->max_pd = dev->adapter_caps.max_pd_count;
591 props->max_qp_rd_atom = dev->adapter_caps.max_inbound_read_limit;
592 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
593 props->max_qp_init_rd_atom = dev->adapter_caps.max_outbound_read_limit;
594 props->atomic_cap = IB_ATOMIC_NONE;
595 props->masked_atomic_cap = IB_ATOMIC_NONE;
596 props->max_ah = INT_MAX;
597 props->max_pkeys = 1;
598 props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
599
600 return 0;
601 }
602
mana_ib_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)603 int mana_ib_query_port(struct ib_device *ibdev, u32 port,
604 struct ib_port_attr *props)
605 {
606 struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
607
608 if (!ndev)
609 return -EINVAL;
610
611 memset(props, 0, sizeof(*props));
612 props->max_mtu = IB_MTU_4096;
613 props->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
614
615 if (netif_carrier_ok(ndev) && netif_running(ndev)) {
616 props->state = IB_PORT_ACTIVE;
617 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
618 } else {
619 props->state = IB_PORT_DOWN;
620 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
621 }
622
623 props->active_width = IB_WIDTH_4X;
624 props->active_speed = IB_SPEED_EDR;
625 props->pkey_tbl_len = 1;
626 if (port == 1) {
627 props->gid_tbl_len = 16;
628 props->port_cap_flags = IB_PORT_CM_SUP;
629 props->ip_gids = true;
630 }
631
632 return 0;
633 }
634
mana_ib_get_link_layer(struct ib_device * device,u32 port_num)635 enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num)
636 {
637 return IB_LINK_LAYER_ETHERNET;
638 }
639
mana_ib_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)640 int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
641 {
642 if (index != 0)
643 return -EINVAL;
644 *pkey = IB_DEFAULT_PKEY_FULL;
645 return 0;
646 }
647
mana_ib_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)648 int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
649 union ib_gid *gid)
650 {
651 /* This version doesn't return GID properties */
652 return 0;
653 }
654
mana_ib_disassociate_ucontext(struct ib_ucontext * ibcontext)655 void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
656 {
657 }
658
mana_ib_gd_query_adapter_caps(struct mana_ib_dev * dev)659 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
660 {
661 struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
662 struct mana_ib_query_adapter_caps_resp resp = {};
663 struct mana_ib_query_adapter_caps_req req = {};
664 int err;
665
666 mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
667 sizeof(resp));
668 req.hdr.resp.msg_version = GDMA_MESSAGE_V4;
669 req.hdr.dev_id = dev->gdma_dev->dev_id;
670
671 err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
672 &req, sizeof(resp), &resp);
673
674 if (err) {
675 ibdev_err(&dev->ib_dev,
676 "Failed to query adapter caps err %d", err);
677 return err;
678 }
679
680 caps->max_sq_id = resp.max_sq_id;
681 caps->max_rq_id = resp.max_rq_id;
682 caps->max_cq_id = resp.max_cq_id;
683 caps->max_qp_count = resp.max_qp_count;
684 caps->max_cq_count = resp.max_cq_count;
685 caps->max_mr_count = resp.max_mr_count;
686 caps->max_pd_count = resp.max_pd_count;
687 caps->max_inbound_read_limit = resp.max_inbound_read_limit;
688 caps->max_outbound_read_limit = resp.max_outbound_read_limit;
689 caps->mw_count = resp.mw_count;
690 caps->max_srq_count = resp.max_srq_count;
691 caps->max_qp_wr = min_t(u32,
692 resp.max_requester_sq_size / GDMA_MAX_SQE_SIZE,
693 resp.max_requester_rq_size / GDMA_MAX_RQE_SIZE);
694 caps->max_inline_data_size = resp.max_inline_data_size;
695 caps->max_send_sge_count = resp.max_send_sge_count;
696 caps->max_recv_sge_count = resp.max_recv_sge_count;
697 caps->feature_flags = resp.feature_flags;
698
699 return 0;
700 }
701
702 static void
mana_ib_event_handler(void * ctx,struct gdma_queue * q,struct gdma_event * event)703 mana_ib_event_handler(void *ctx, struct gdma_queue *q, struct gdma_event *event)
704 {
705 struct mana_ib_dev *mdev = (struct mana_ib_dev *)ctx;
706 struct mana_ib_qp *qp;
707 struct ib_event ev;
708 u32 qpn;
709
710 switch (event->type) {
711 case GDMA_EQE_RNIC_QP_FATAL:
712 qpn = event->details[0];
713 qp = mana_get_qp_ref(mdev, qpn, false);
714 if (!qp)
715 break;
716 if (qp->ibqp.event_handler) {
717 ev.device = qp->ibqp.device;
718 ev.element.qp = &qp->ibqp;
719 ev.event = IB_EVENT_QP_FATAL;
720 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
721 }
722 mana_put_qp_ref(qp);
723 break;
724 default:
725 break;
726 }
727 }
728
mana_ib_create_eqs(struct mana_ib_dev * mdev)729 int mana_ib_create_eqs(struct mana_ib_dev *mdev)
730 {
731 struct gdma_context *gc = mdev_to_gc(mdev);
732 struct gdma_queue_spec spec = {};
733 int err, i;
734
735 spec.type = GDMA_EQ;
736 spec.monitor_avl_buf = false;
737 spec.queue_size = EQ_SIZE;
738 spec.eq.callback = mana_ib_event_handler;
739 spec.eq.context = mdev;
740 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
741 spec.eq.msix_index = 0;
742
743 err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq);
744 if (err)
745 return err;
746
747 mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *),
748 GFP_KERNEL);
749 if (!mdev->eqs) {
750 err = -ENOMEM;
751 goto destroy_fatal_eq;
752 }
753 spec.eq.callback = NULL;
754 for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
755 spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
756 err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]);
757 if (err)
758 goto destroy_eqs;
759 }
760
761 return 0;
762
763 destroy_eqs:
764 while (i-- > 0)
765 mana_gd_destroy_queue(gc, mdev->eqs[i]);
766 kfree(mdev->eqs);
767 destroy_fatal_eq:
768 mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
769 return err;
770 }
771
mana_ib_destroy_eqs(struct mana_ib_dev * mdev)772 void mana_ib_destroy_eqs(struct mana_ib_dev *mdev)
773 {
774 struct gdma_context *gc = mdev_to_gc(mdev);
775 int i;
776
777 mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
778
779 for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++)
780 mana_gd_destroy_queue(gc, mdev->eqs[i]);
781
782 kfree(mdev->eqs);
783 }
784
mana_ib_gd_create_rnic_adapter(struct mana_ib_dev * mdev)785 int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
786 {
787 struct mana_rnic_create_adapter_resp resp = {};
788 struct mana_rnic_create_adapter_req req = {};
789 struct gdma_context *gc = mdev_to_gc(mdev);
790 int err;
791
792 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
793 req.hdr.req.msg_version = GDMA_MESSAGE_V2;
794 req.hdr.dev_id = gc->mana_ib.dev_id;
795 req.notify_eq_id = mdev->fatal_err_eq->id;
796
797 if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
798 req.feature_flags |= MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST;
799
800 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
801 if (err) {
802 ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
803 return err;
804 }
805 mdev->adapter_handle = resp.adapter;
806
807 return 0;
808 }
809
mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev * mdev)810 int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
811 {
812 struct mana_rnic_destroy_adapter_resp resp = {};
813 struct mana_rnic_destroy_adapter_req req = {};
814 struct gdma_context *gc;
815 int err;
816
817 gc = mdev_to_gc(mdev);
818 mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
819 req.hdr.dev_id = gc->mana_ib.dev_id;
820 req.adapter = mdev->adapter_handle;
821
822 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
823 if (err) {
824 ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err);
825 return err;
826 }
827
828 return 0;
829 }
830
mana_ib_gd_add_gid(const struct ib_gid_attr * attr,void ** context)831 int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
832 {
833 struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
834 enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
835 struct mana_rnic_config_addr_resp resp = {};
836 struct gdma_context *gc = mdev_to_gc(mdev);
837 struct mana_rnic_config_addr_req req = {};
838 int err;
839
840 if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
841 ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
842 return -EINVAL;
843 }
844
845 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
846 req.hdr.dev_id = gc->mana_ib.dev_id;
847 req.adapter = mdev->adapter_handle;
848 req.op = ADDR_OP_ADD;
849 req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
850 copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
851
852 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
853 if (err) {
854 ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
855 return err;
856 }
857
858 return 0;
859 }
860
mana_ib_gd_del_gid(const struct ib_gid_attr * attr,void ** context)861 int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
862 {
863 struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
864 enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
865 struct mana_rnic_config_addr_resp resp = {};
866 struct gdma_context *gc = mdev_to_gc(mdev);
867 struct mana_rnic_config_addr_req req = {};
868 int err;
869
870 if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
871 ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
872 return -EINVAL;
873 }
874
875 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
876 req.hdr.dev_id = gc->mana_ib.dev_id;
877 req.adapter = mdev->adapter_handle;
878 req.op = ADDR_OP_REMOVE;
879 req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
880 copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
881
882 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
883 if (err) {
884 ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
885 return err;
886 }
887
888 return 0;
889 }
890
mana_ib_gd_config_mac(struct mana_ib_dev * mdev,enum mana_ib_addr_op op,u8 * mac)891 int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac)
892 {
893 struct mana_rnic_config_mac_addr_resp resp = {};
894 struct mana_rnic_config_mac_addr_req req = {};
895 struct gdma_context *gc = mdev_to_gc(mdev);
896 int err;
897
898 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
899 req.hdr.dev_id = gc->mana_ib.dev_id;
900 req.adapter = mdev->adapter_handle;
901 req.op = op;
902 copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
903
904 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
905 if (err) {
906 ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err);
907 return err;
908 }
909
910 return 0;
911 }
912
mana_ib_gd_create_cq(struct mana_ib_dev * mdev,struct mana_ib_cq * cq,u32 doorbell)913 int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell)
914 {
915 struct gdma_context *gc = mdev_to_gc(mdev);
916 struct mana_rnic_create_cq_resp resp = {};
917 struct mana_rnic_create_cq_req req = {};
918 int err;
919
920 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
921 req.hdr.dev_id = gc->mana_ib.dev_id;
922 req.adapter = mdev->adapter_handle;
923 req.gdma_region = cq->queue.gdma_region;
924 req.eq_id = mdev->eqs[cq->comp_vector]->id;
925 req.doorbell_page = doorbell;
926
927 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
928
929 if (err) {
930 ibdev_err(&mdev->ib_dev, "Failed to create cq err %d", err);
931 return err;
932 }
933
934 cq->queue.id = resp.cq_id;
935 cq->cq_handle = resp.cq_handle;
936 /* The GDMA region is now owned by the CQ handle */
937 cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
938
939 return 0;
940 }
941
mana_ib_gd_destroy_cq(struct mana_ib_dev * mdev,struct mana_ib_cq * cq)942 int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
943 {
944 struct gdma_context *gc = mdev_to_gc(mdev);
945 struct mana_rnic_destroy_cq_resp resp = {};
946 struct mana_rnic_destroy_cq_req req = {};
947 int err;
948
949 if (cq->cq_handle == INVALID_MANA_HANDLE)
950 return 0;
951
952 mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
953 req.hdr.dev_id = gc->mana_ib.dev_id;
954 req.adapter = mdev->adapter_handle;
955 req.cq_handle = cq->cq_handle;
956
957 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
958
959 if (err) {
960 ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err);
961 return err;
962 }
963
964 return 0;
965 }
966
mana_ib_gd_create_rc_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp,struct ib_qp_init_attr * attr,u32 doorbell,u64 flags)967 int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
968 struct ib_qp_init_attr *attr, u32 doorbell, u64 flags)
969 {
970 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
971 struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
972 struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
973 struct gdma_context *gc = mdev_to_gc(mdev);
974 struct mana_rnic_create_qp_resp resp = {};
975 struct mana_rnic_create_qp_req req = {};
976 int err, i;
977
978 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
979 req.hdr.dev_id = gc->mana_ib.dev_id;
980 req.adapter = mdev->adapter_handle;
981 req.pd_handle = pd->pd_handle;
982 req.send_cq_handle = send_cq->cq_handle;
983 req.recv_cq_handle = recv_cq->cq_handle;
984 for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++)
985 req.dma_region[i] = qp->rc_qp.queues[i].gdma_region;
986 req.doorbell_page = doorbell;
987 req.max_send_wr = attr->cap.max_send_wr;
988 req.max_recv_wr = attr->cap.max_recv_wr;
989 req.max_send_sge = attr->cap.max_send_sge;
990 req.max_recv_sge = attr->cap.max_recv_sge;
991 req.flags = flags;
992
993 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
994 if (err) {
995 ibdev_err(&mdev->ib_dev, "Failed to create rc qp err %d", err);
996 return err;
997 }
998 qp->qp_handle = resp.rc_qp_handle;
999 for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++) {
1000 qp->rc_qp.queues[i].id = resp.queue_ids[i];
1001 /* The GDMA regions are now owned by the RNIC QP handle */
1002 qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
1003 }
1004 return 0;
1005 }
1006
mana_ib_gd_destroy_rc_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)1007 int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
1008 {
1009 struct mana_rnic_destroy_rc_qp_resp resp = {0};
1010 struct mana_rnic_destroy_rc_qp_req req = {0};
1011 struct gdma_context *gc = mdev_to_gc(mdev);
1012 int err;
1013
1014 mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
1015 req.hdr.dev_id = gc->mana_ib.dev_id;
1016 req.adapter = mdev->adapter_handle;
1017 req.rc_qp_handle = qp->qp_handle;
1018 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1019 if (err) {
1020 ibdev_err(&mdev->ib_dev, "Failed to destroy rc qp err %d", err);
1021 return err;
1022 }
1023 return 0;
1024 }
1025
mana_ib_gd_create_ud_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp,struct ib_qp_init_attr * attr,u32 doorbell,u32 type)1026 int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
1027 struct ib_qp_init_attr *attr, u32 doorbell, u32 type)
1028 {
1029 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
1030 struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
1031 struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
1032 struct gdma_context *gc = mdev_to_gc(mdev);
1033 struct mana_rnic_create_udqp_resp resp = {};
1034 struct mana_rnic_create_udqp_req req = {};
1035 int err, i;
1036
1037 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
1038 req.hdr.dev_id = gc->mana_ib.dev_id;
1039 req.adapter = mdev->adapter_handle;
1040 req.pd_handle = pd->pd_handle;
1041 req.send_cq_handle = send_cq->cq_handle;
1042 req.recv_cq_handle = recv_cq->cq_handle;
1043 for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++)
1044 req.dma_region[i] = qp->ud_qp.queues[i].gdma_region;
1045 req.doorbell_page = doorbell;
1046 req.max_send_wr = attr->cap.max_send_wr;
1047 req.max_recv_wr = attr->cap.max_recv_wr;
1048 req.max_send_sge = attr->cap.max_send_sge;
1049 req.max_recv_sge = attr->cap.max_recv_sge;
1050 req.qp_type = type;
1051 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1052 if (err) {
1053 ibdev_err(&mdev->ib_dev, "Failed to create ud qp err %d", err);
1054 return err;
1055 }
1056 qp->qp_handle = resp.qp_handle;
1057 for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++) {
1058 qp->ud_qp.queues[i].id = resp.queue_ids[i];
1059 /* The GDMA regions are now owned by the RNIC QP handle */
1060 qp->ud_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
1061 }
1062 return 0;
1063 }
1064
mana_ib_gd_destroy_ud_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)1065 int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
1066 {
1067 struct mana_rnic_destroy_udqp_resp resp = {0};
1068 struct mana_rnic_destroy_udqp_req req = {0};
1069 struct gdma_context *gc = mdev_to_gc(mdev);
1070 int err;
1071
1072 mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
1073 req.hdr.dev_id = gc->mana_ib.dev_id;
1074 req.adapter = mdev->adapter_handle;
1075 req.qp_handle = qp->qp_handle;
1076 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1077 if (err) {
1078 ibdev_err(&mdev->ib_dev, "Failed to destroy ud qp err %d", err);
1079 return err;
1080 }
1081 return 0;
1082 }
1083