1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6 #include "mana_ib.h"
7
mana_ib_cfg_vport_steering(struct mana_ib_dev * dev,struct net_device * ndev,mana_handle_t default_rxobj,mana_handle_t ind_table[],u32 log_ind_tbl_size,u32 rx_hash_key_len,u8 * rx_hash_key)8 static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9 struct net_device *ndev,
10 mana_handle_t default_rxobj,
11 mana_handle_t ind_table[],
12 u32 log_ind_tbl_size, u32 rx_hash_key_len,
13 u8 *rx_hash_key)
14 {
15 struct mana_port_context *mpc = netdev_priv(ndev);
16 struct mana_cfg_rx_steer_req_v2 *req;
17 struct mana_cfg_rx_steer_resp resp = {};
18 struct gdma_context *gc;
19 u32 req_buf_size;
20 int i, err;
21
22 gc = mdev_to_gc(dev);
23
24 req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_DEF_SIZE);
25 req = kzalloc(req_buf_size, GFP_KERNEL);
26 if (!req)
27 return -ENOMEM;
28
29 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
30 sizeof(resp));
31
32 req->hdr.req.msg_version = GDMA_MESSAGE_V2;
33
34 req->vport = mpc->port_handle;
35 req->rx_enable = 1;
36 req->update_default_rxobj = 1;
37 req->default_rxobj = default_rxobj;
38 req->hdr.dev_id = gc->mana.dev_id;
39
40 /* If there are more than 1 entries in indirection table, enable RSS */
41 if (log_ind_tbl_size)
42 req->rss_enable = true;
43
44 req->num_indir_entries = MANA_INDIRECT_TABLE_DEF_SIZE;
45 req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
46 indir_tab);
47 req->update_indir_tab = true;
48 req->cqe_coalescing_enable = 1;
49
50 /* The ind table passed to the hardware must have
51 * MANA_INDIRECT_TABLE_DEF_SIZE entries. Adjust the verb
52 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
53 */
54 ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
55 for (i = 0; i < MANA_INDIRECT_TABLE_DEF_SIZE; i++) {
56 req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
57 ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
58 req->indir_tab[i]);
59 }
60
61 req->update_hashkey = true;
62 if (rx_hash_key_len)
63 memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
64 else
65 netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
66
67 ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
68 req->vport, default_rxobj);
69
70 err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
71 if (err) {
72 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
73 goto out;
74 }
75
76 if (resp.hdr.status) {
77 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
78 resp.hdr.status);
79 err = -EPROTO;
80 goto out;
81 }
82
83 netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
84 mpc->port_handle, log_ind_tbl_size);
85
86 out:
87 kfree(req);
88 return err;
89 }
90
mana_ib_create_qp_rss(struct ib_qp * ibqp,struct ib_pd * pd,struct ib_qp_init_attr * attr,struct ib_udata * udata)91 static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
92 struct ib_qp_init_attr *attr,
93 struct ib_udata *udata)
94 {
95 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
96 struct mana_ib_dev *mdev =
97 container_of(pd->device, struct mana_ib_dev, ib_dev);
98 struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
99 struct mana_ib_create_qp_rss_resp resp = {};
100 struct mana_ib_create_qp_rss ucmd = {};
101 mana_handle_t *mana_ind_table;
102 struct mana_port_context *mpc;
103 unsigned int ind_tbl_size;
104 struct net_device *ndev;
105 struct mana_ib_cq *cq;
106 struct mana_ib_wq *wq;
107 struct mana_eq *eq;
108 struct ib_cq *ibcq;
109 struct ib_wq *ibwq;
110 int i = 0;
111 u32 port;
112 int ret;
113
114 if (!udata || udata->inlen < sizeof(ucmd))
115 return -EINVAL;
116
117 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
118 if (ret) {
119 ibdev_dbg(&mdev->ib_dev,
120 "Failed copy from udata for create rss-qp, err %d\n",
121 ret);
122 return ret;
123 }
124
125 if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
126 ibdev_dbg(&mdev->ib_dev,
127 "Requested max_recv_wr %d exceeding limit\n",
128 attr->cap.max_recv_wr);
129 return -EINVAL;
130 }
131
132 if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
133 ibdev_dbg(&mdev->ib_dev,
134 "Requested max_recv_sge %d exceeding limit\n",
135 attr->cap.max_recv_sge);
136 return -EINVAL;
137 }
138
139 ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
140 if (ind_tbl_size > MANA_INDIRECT_TABLE_DEF_SIZE) {
141 ibdev_dbg(&mdev->ib_dev,
142 "Indirect table size %d exceeding limit\n",
143 ind_tbl_size);
144 return -EINVAL;
145 }
146
147 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
148 ibdev_dbg(&mdev->ib_dev,
149 "RX Hash function is not supported, %d\n",
150 ucmd.rx_hash_function);
151 return -EINVAL;
152 }
153
154 /* IB ports start with 1, MANA start with 0 */
155 port = ucmd.port;
156 ndev = mana_ib_get_netdev(pd->device, port);
157 if (!ndev) {
158 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
159 port);
160 return -EINVAL;
161 }
162 mpc = netdev_priv(ndev);
163
164 ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
165 ucmd.rx_hash_function, port);
166
167 mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
168 GFP_KERNEL);
169 if (!mana_ind_table) {
170 ret = -ENOMEM;
171 goto fail;
172 }
173
174 qp->port = port;
175
176 for (i = 0; i < ind_tbl_size; i++) {
177 struct mana_obj_spec wq_spec = {};
178 struct mana_obj_spec cq_spec = {};
179
180 ibwq = ind_tbl->ind_tbl[i];
181 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
182
183 ibcq = ibwq->cq;
184 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
185
186 wq_spec.gdma_region = wq->queue.gdma_region;
187 wq_spec.queue_size = wq->wq_buf_size;
188
189 cq_spec.gdma_region = cq->queue.gdma_region;
190 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
191 cq_spec.modr_ctx_id = 0;
192 eq = &mpc->ac->eqs[cq->comp_vector];
193 cq_spec.attached_eq = eq->eq->id;
194
195 ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
196 &wq_spec, &cq_spec, &wq->rx_object);
197 if (ret) {
198 /* Do cleanup starting with index i-1 */
199 i--;
200 goto fail;
201 }
202
203 /* The GDMA regions are now owned by the WQ object */
204 wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
205 cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
206
207 wq->queue.id = wq_spec.queue_index;
208 cq->queue.id = cq_spec.queue_index;
209
210 ibdev_dbg(&mdev->ib_dev,
211 "rx_object 0x%llx wq id %llu cq id %llu\n",
212 wq->rx_object, wq->queue.id, cq->queue.id);
213
214 resp.entries[i].cqid = cq->queue.id;
215 resp.entries[i].wqid = wq->queue.id;
216
217 mana_ind_table[i] = wq->rx_object;
218
219 /* Create CQ table entry */
220 ret = mana_ib_install_cq_cb(mdev, cq);
221 if (ret)
222 goto fail;
223 }
224 resp.num_entries = i;
225
226 ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
227 mana_ind_table,
228 ind_tbl->log_ind_tbl_size,
229 ucmd.rx_hash_key_len,
230 ucmd.rx_hash_key);
231 if (ret)
232 goto fail;
233
234 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
235 if (ret) {
236 ibdev_dbg(&mdev->ib_dev,
237 "Failed to copy to udata create rss-qp, %d\n",
238 ret);
239 goto fail;
240 }
241
242 kfree(mana_ind_table);
243
244 return 0;
245
246 fail:
247 while (i-- > 0) {
248 ibwq = ind_tbl->ind_tbl[i];
249 ibcq = ibwq->cq;
250 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
251 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
252
253 mana_ib_remove_cq_cb(mdev, cq);
254 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
255 }
256
257 kfree(mana_ind_table);
258
259 return ret;
260 }
261
mana_ib_create_qp_raw(struct ib_qp * ibqp,struct ib_pd * ibpd,struct ib_qp_init_attr * attr,struct ib_udata * udata)262 static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
263 struct ib_qp_init_attr *attr,
264 struct ib_udata *udata)
265 {
266 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
267 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
268 struct mana_ib_dev *mdev =
269 container_of(ibpd->device, struct mana_ib_dev, ib_dev);
270 struct mana_ib_cq *send_cq =
271 container_of(attr->send_cq, struct mana_ib_cq, ibcq);
272 struct mana_ib_ucontext *mana_ucontext =
273 rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
274 ibucontext);
275 struct mana_ib_create_qp_resp resp = {};
276 struct mana_ib_create_qp ucmd = {};
277 struct mana_obj_spec wq_spec = {};
278 struct mana_obj_spec cq_spec = {};
279 struct mana_port_context *mpc;
280 struct net_device *ndev;
281 struct mana_eq *eq;
282 int eq_vec;
283 u32 port;
284 int err;
285
286 if (!mana_ucontext || udata->inlen < sizeof(ucmd))
287 return -EINVAL;
288
289 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
290 if (err) {
291 ibdev_dbg(&mdev->ib_dev,
292 "Failed to copy from udata create qp-raw, %d\n", err);
293 return err;
294 }
295
296 if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
297 ibdev_dbg(&mdev->ib_dev,
298 "Requested max_send_wr %d exceeding limit\n",
299 attr->cap.max_send_wr);
300 return -EINVAL;
301 }
302
303 if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
304 ibdev_dbg(&mdev->ib_dev,
305 "Requested max_send_sge %d exceeding limit\n",
306 attr->cap.max_send_sge);
307 return -EINVAL;
308 }
309
310 port = ucmd.port;
311 ndev = mana_ib_get_netdev(ibpd->device, port);
312 if (!ndev) {
313 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
314 port);
315 return -EINVAL;
316 }
317 mpc = netdev_priv(ndev);
318 ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
319
320 err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
321 if (err)
322 return -ENODEV;
323
324 qp->port = port;
325
326 ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
327 ucmd.sq_buf_addr, ucmd.port);
328
329 err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq);
330 if (err) {
331 ibdev_dbg(&mdev->ib_dev,
332 "Failed to create queue for create qp-raw, err %d\n", err);
333 goto err_free_vport;
334 }
335
336 /* Create a WQ on the same port handle used by the Ethernet */
337 wq_spec.gdma_region = qp->raw_sq.gdma_region;
338 wq_spec.queue_size = ucmd.sq_buf_size;
339
340 cq_spec.gdma_region = send_cq->queue.gdma_region;
341 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
342 cq_spec.modr_ctx_id = 0;
343 eq_vec = send_cq->comp_vector;
344 eq = &mpc->ac->eqs[eq_vec];
345 cq_spec.attached_eq = eq->eq->id;
346
347 err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
348 &cq_spec, &qp->qp_handle);
349 if (err) {
350 ibdev_dbg(&mdev->ib_dev,
351 "Failed to create wq for create raw-qp, err %d\n",
352 err);
353 goto err_destroy_queue;
354 }
355
356 /* The GDMA regions are now owned by the WQ object */
357 qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION;
358 send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
359
360 qp->raw_sq.id = wq_spec.queue_index;
361 send_cq->queue.id = cq_spec.queue_index;
362
363 /* Create CQ table entry */
364 err = mana_ib_install_cq_cb(mdev, send_cq);
365 if (err)
366 goto err_destroy_wq_obj;
367
368 ibdev_dbg(&mdev->ib_dev,
369 "qp->qp_handle 0x%llx sq id %llu cq id %llu\n",
370 qp->qp_handle, qp->raw_sq.id, send_cq->queue.id);
371
372 resp.sqid = qp->raw_sq.id;
373 resp.cqid = send_cq->queue.id;
374 resp.tx_vp_offset = pd->tx_vp_offset;
375
376 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
377 if (err) {
378 ibdev_dbg(&mdev->ib_dev,
379 "Failed copy udata for create qp-raw, %d\n",
380 err);
381 goto err_remove_cq_cb;
382 }
383
384 return 0;
385
386 err_remove_cq_cb:
387 mana_ib_remove_cq_cb(mdev, send_cq);
388
389 err_destroy_wq_obj:
390 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
391
392 err_destroy_queue:
393 mana_ib_destroy_queue(mdev, &qp->raw_sq);
394
395 err_free_vport:
396 mana_ib_uncfg_vport(mdev, pd, port);
397
398 return err;
399 }
400
mana_ib_wqe_size(u32 sge,u32 oob_size)401 static u32 mana_ib_wqe_size(u32 sge, u32 oob_size)
402 {
403 u32 wqe_size = sge * sizeof(struct gdma_sge) + sizeof(struct gdma_wqe) + oob_size;
404
405 return ALIGN(wqe_size, GDMA_WQE_BU_SIZE);
406 }
407
mana_ib_queue_size(struct ib_qp_init_attr * attr,u32 queue_type)408 static u32 mana_ib_queue_size(struct ib_qp_init_attr *attr, u32 queue_type)
409 {
410 u32 queue_size;
411
412 switch (attr->qp_type) {
413 case IB_QPT_UD:
414 case IB_QPT_GSI:
415 if (queue_type == MANA_UD_SEND_QUEUE)
416 queue_size = attr->cap.max_send_wr *
417 mana_ib_wqe_size(attr->cap.max_send_sge, INLINE_OOB_LARGE_SIZE);
418 else
419 queue_size = attr->cap.max_recv_wr *
420 mana_ib_wqe_size(attr->cap.max_recv_sge, INLINE_OOB_SMALL_SIZE);
421 break;
422 default:
423 return 0;
424 }
425
426 return MANA_PAGE_ALIGN(roundup_pow_of_two(queue_size));
427 }
428
mana_ib_queue_type(struct ib_qp_init_attr * attr,u32 queue_type)429 static enum gdma_queue_type mana_ib_queue_type(struct ib_qp_init_attr *attr, u32 queue_type)
430 {
431 enum gdma_queue_type type;
432
433 switch (attr->qp_type) {
434 case IB_QPT_UD:
435 case IB_QPT_GSI:
436 if (queue_type == MANA_UD_SEND_QUEUE)
437 type = GDMA_SQ;
438 else
439 type = GDMA_RQ;
440 break;
441 default:
442 type = GDMA_INVALID_QUEUE;
443 }
444 return type;
445 }
446
mana_table_store_rc_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)447 static int mana_table_store_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
448 {
449 return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp,
450 GFP_KERNEL);
451 }
452
mana_table_remove_rc_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)453 static void mana_table_remove_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
454 {
455 xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num);
456 }
457
mana_table_store_ud_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)458 static int mana_table_store_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
459 {
460 u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK;
461 u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
462 int err;
463
464 err = xa_insert_irq(&mdev->qp_table_wq, qids, qp, GFP_KERNEL);
465 if (err)
466 return err;
467
468 err = xa_insert_irq(&mdev->qp_table_wq, qidr, qp, GFP_KERNEL);
469 if (err)
470 goto remove_sq;
471
472 return 0;
473
474 remove_sq:
475 xa_erase_irq(&mdev->qp_table_wq, qids);
476 return err;
477 }
478
mana_table_remove_ud_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)479 static void mana_table_remove_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
480 {
481 u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK;
482 u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
483
484 xa_erase_irq(&mdev->qp_table_wq, qids);
485 xa_erase_irq(&mdev->qp_table_wq, qidr);
486 }
487
mana_table_store_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)488 static int mana_table_store_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
489 {
490 refcount_set(&qp->refcount, 1);
491 init_completion(&qp->free);
492
493 switch (qp->ibqp.qp_type) {
494 case IB_QPT_RC:
495 return mana_table_store_rc_qp(mdev, qp);
496 case IB_QPT_UD:
497 case IB_QPT_GSI:
498 return mana_table_store_ud_qp(mdev, qp);
499 default:
500 ibdev_dbg(&mdev->ib_dev, "Unknown QP type for storing in mana table, %d\n",
501 qp->ibqp.qp_type);
502 }
503
504 return -EINVAL;
505 }
506
mana_table_remove_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)507 static void mana_table_remove_qp(struct mana_ib_dev *mdev,
508 struct mana_ib_qp *qp)
509 {
510 switch (qp->ibqp.qp_type) {
511 case IB_QPT_RC:
512 mana_table_remove_rc_qp(mdev, qp);
513 break;
514 case IB_QPT_UD:
515 case IB_QPT_GSI:
516 mana_table_remove_ud_qp(mdev, qp);
517 break;
518 default:
519 ibdev_dbg(&mdev->ib_dev, "Unknown QP type for removing from mana table, %d\n",
520 qp->ibqp.qp_type);
521 return;
522 }
523 mana_put_qp_ref(qp);
524 wait_for_completion(&qp->free);
525 }
526
mana_ib_create_rc_qp(struct ib_qp * ibqp,struct ib_pd * ibpd,struct ib_qp_init_attr * attr,struct ib_udata * udata)527 static int mana_ib_create_rc_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
528 struct ib_qp_init_attr *attr, struct ib_udata *udata)
529 {
530 struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
531 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
532 struct mana_ib_create_rc_qp_resp resp = {};
533 struct mana_ib_ucontext *mana_ucontext;
534 struct mana_ib_create_rc_qp ucmd = {};
535 int i, err, j;
536 u64 flags = 0;
537 u32 doorbell;
538
539 if (!udata || udata->inlen < sizeof(ucmd))
540 return -EINVAL;
541
542 mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, ibucontext);
543 doorbell = mana_ucontext->doorbell;
544 flags = MANA_RC_FLAG_NO_FMR;
545 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
546 if (err) {
547 ibdev_dbg(&mdev->ib_dev, "Failed to copy from udata, %d\n", err);
548 return err;
549 }
550
551 for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) {
552 /* skip FMR for user-level RC QPs */
553 if (i == MANA_RC_SEND_QUEUE_FMR) {
554 qp->rc_qp.queues[i].id = INVALID_QUEUE_ID;
555 qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
556 continue;
557 }
558 err = mana_ib_create_queue(mdev, ucmd.queue_buf[j], ucmd.queue_size[j],
559 &qp->rc_qp.queues[i]);
560 if (err) {
561 ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n", i, err);
562 goto destroy_queues;
563 }
564 j++;
565 }
566
567 err = mana_ib_gd_create_rc_qp(mdev, qp, attr, doorbell, flags);
568 if (err) {
569 ibdev_err(&mdev->ib_dev, "Failed to create rc qp %d\n", err);
570 goto destroy_queues;
571 }
572 qp->ibqp.qp_num = qp->rc_qp.queues[MANA_RC_RECV_QUEUE_RESPONDER].id;
573 qp->port = attr->port_num;
574
575 if (udata) {
576 for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) {
577 if (i == MANA_RC_SEND_QUEUE_FMR)
578 continue;
579 resp.queue_id[j] = qp->rc_qp.queues[i].id;
580 j++;
581 }
582 err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
583 if (err) {
584 ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
585 goto destroy_qp;
586 }
587 }
588
589 err = mana_table_store_qp(mdev, qp);
590 if (err)
591 goto destroy_qp;
592
593 return 0;
594
595 destroy_qp:
596 mana_ib_gd_destroy_rc_qp(mdev, qp);
597 destroy_queues:
598 while (i-- > 0)
599 mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]);
600 return err;
601 }
602
mana_add_qp_to_cqs(struct mana_ib_qp * qp)603 static void mana_add_qp_to_cqs(struct mana_ib_qp *qp)
604 {
605 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
606 struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
607 unsigned long flags;
608
609 spin_lock_irqsave(&send_cq->cq_lock, flags);
610 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
611 spin_unlock_irqrestore(&send_cq->cq_lock, flags);
612
613 spin_lock_irqsave(&recv_cq->cq_lock, flags);
614 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
615 spin_unlock_irqrestore(&recv_cq->cq_lock, flags);
616 }
617
mana_remove_qp_from_cqs(struct mana_ib_qp * qp)618 static void mana_remove_qp_from_cqs(struct mana_ib_qp *qp)
619 {
620 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
621 struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
622 unsigned long flags;
623
624 spin_lock_irqsave(&send_cq->cq_lock, flags);
625 list_del(&qp->cq_send_list);
626 spin_unlock_irqrestore(&send_cq->cq_lock, flags);
627
628 spin_lock_irqsave(&recv_cq->cq_lock, flags);
629 list_del(&qp->cq_recv_list);
630 spin_unlock_irqrestore(&recv_cq->cq_lock, flags);
631 }
632
mana_ib_create_ud_qp(struct ib_qp * ibqp,struct ib_pd * ibpd,struct ib_qp_init_attr * attr,struct ib_udata * udata)633 static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
634 struct ib_qp_init_attr *attr, struct ib_udata *udata)
635 {
636 struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
637 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
638 struct gdma_context *gc = mdev_to_gc(mdev);
639 u32 doorbell, queue_size;
640 int i, err;
641
642 if (udata) {
643 ibdev_dbg(&mdev->ib_dev, "User-level UD QPs are not supported\n");
644 return -EOPNOTSUPP;
645 }
646
647 for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i) {
648 queue_size = mana_ib_queue_size(attr, i);
649 err = mana_ib_create_kernel_queue(mdev, queue_size, mana_ib_queue_type(attr, i),
650 &qp->ud_qp.queues[i]);
651 if (err) {
652 ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n",
653 i, err);
654 goto destroy_queues;
655 }
656 }
657 doorbell = gc->mana_ib.doorbell;
658
659 err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr,
660 sizeof(struct ud_rq_shadow_wqe));
661 if (err) {
662 ibdev_err(&mdev->ib_dev, "Failed to create shadow rq err %d\n", err);
663 goto destroy_queues;
664 }
665 err = create_shadow_queue(&qp->shadow_sq, attr->cap.max_send_wr,
666 sizeof(struct ud_sq_shadow_wqe));
667 if (err) {
668 ibdev_err(&mdev->ib_dev, "Failed to create shadow sq err %d\n", err);
669 goto destroy_shadow_queues;
670 }
671
672 err = mana_ib_gd_create_ud_qp(mdev, qp, attr, doorbell, attr->qp_type);
673 if (err) {
674 ibdev_err(&mdev->ib_dev, "Failed to create ud qp %d\n", err);
675 goto destroy_shadow_queues;
676 }
677 qp->ibqp.qp_num = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
678 qp->port = attr->port_num;
679
680 for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
681 qp->ud_qp.queues[i].kmem->id = qp->ud_qp.queues[i].id;
682
683 err = mana_table_store_qp(mdev, qp);
684 if (err)
685 goto destroy_qp;
686
687 mana_add_qp_to_cqs(qp);
688
689 return 0;
690
691 destroy_qp:
692 mana_ib_gd_destroy_ud_qp(mdev, qp);
693 destroy_shadow_queues:
694 destroy_shadow_queue(&qp->shadow_rq);
695 destroy_shadow_queue(&qp->shadow_sq);
696 destroy_queues:
697 while (i-- > 0)
698 mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]);
699 return err;
700 }
701
mana_ib_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * attr,struct ib_udata * udata)702 int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
703 struct ib_udata *udata)
704 {
705 switch (attr->qp_type) {
706 case IB_QPT_RAW_PACKET:
707 /* When rwq_ind_tbl is used, it's for creating WQs for RSS */
708 if (attr->rwq_ind_tbl)
709 return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
710 udata);
711
712 return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
713 case IB_QPT_RC:
714 return mana_ib_create_rc_qp(ibqp, ibqp->pd, attr, udata);
715 case IB_QPT_UD:
716 case IB_QPT_GSI:
717 return mana_ib_create_ud_qp(ibqp, ibqp->pd, attr, udata);
718 default:
719 ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
720 attr->qp_type);
721 }
722
723 return -EINVAL;
724 }
725
mana_ib_gd_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)726 static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
727 int attr_mask, struct ib_udata *udata)
728 {
729 struct mana_ib_dev *mdev = container_of(ibqp->device, struct mana_ib_dev, ib_dev);
730 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
731 struct mana_rnic_set_qp_state_resp resp = {};
732 struct mana_rnic_set_qp_state_req req = {};
733 struct gdma_context *gc = mdev_to_gc(mdev);
734 struct mana_port_context *mpc;
735 struct net_device *ndev;
736 int err;
737
738 mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
739 req.hdr.dev_id = gc->mana_ib.dev_id;
740 req.adapter = mdev->adapter_handle;
741 req.qp_handle = qp->qp_handle;
742 req.qp_state = attr->qp_state;
743 req.attr_mask = attr_mask;
744 req.path_mtu = attr->path_mtu;
745 req.rq_psn = attr->rq_psn;
746 req.sq_psn = attr->sq_psn;
747 req.dest_qpn = attr->dest_qp_num;
748 req.max_dest_rd_atomic = attr->max_dest_rd_atomic;
749 req.retry_cnt = attr->retry_cnt;
750 req.rnr_retry = attr->rnr_retry;
751 req.min_rnr_timer = attr->min_rnr_timer;
752 if (attr_mask & IB_QP_AV) {
753 ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port);
754 if (!ndev) {
755 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in QP %u\n",
756 ibqp->port, ibqp->qp_num);
757 return -EINVAL;
758 }
759 mpc = netdev_priv(ndev);
760 copy_in_reverse(req.ah_attr.src_mac, mpc->mac_addr, ETH_ALEN);
761 copy_in_reverse(req.ah_attr.dest_mac, attr->ah_attr.roce.dmac, ETH_ALEN);
762 copy_in_reverse(req.ah_attr.src_addr, attr->ah_attr.grh.sgid_attr->gid.raw,
763 sizeof(union ib_gid));
764 copy_in_reverse(req.ah_attr.dest_addr, attr->ah_attr.grh.dgid.raw,
765 sizeof(union ib_gid));
766 if (rdma_gid_attr_network_type(attr->ah_attr.grh.sgid_attr) == RDMA_NETWORK_IPV4) {
767 req.ah_attr.src_addr_type = SGID_TYPE_IPV4;
768 req.ah_attr.dest_addr_type = SGID_TYPE_IPV4;
769 } else {
770 req.ah_attr.src_addr_type = SGID_TYPE_IPV6;
771 req.ah_attr.dest_addr_type = SGID_TYPE_IPV6;
772 }
773 req.ah_attr.dest_port = ROCE_V2_UDP_DPORT;
774 req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
775 ibqp->qp_num, attr->dest_qp_num);
776 req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class;
777 req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
778 }
779
780 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
781 if (err) {
782 ibdev_err(&mdev->ib_dev, "Failed modify qp err %d", err);
783 return err;
784 }
785
786 return 0;
787 }
788
mana_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)789 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
790 int attr_mask, struct ib_udata *udata)
791 {
792 switch (ibqp->qp_type) {
793 case IB_QPT_RC:
794 case IB_QPT_UD:
795 case IB_QPT_GSI:
796 return mana_ib_gd_modify_qp(ibqp, attr, attr_mask, udata);
797 default:
798 ibdev_dbg(ibqp->device, "Modify QP type %u not supported", ibqp->qp_type);
799 return -EOPNOTSUPP;
800 }
801 }
802
mana_ib_destroy_qp_rss(struct mana_ib_qp * qp,struct ib_rwq_ind_table * ind_tbl,struct ib_udata * udata)803 static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
804 struct ib_rwq_ind_table *ind_tbl,
805 struct ib_udata *udata)
806 {
807 struct mana_ib_dev *mdev =
808 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
809 struct mana_port_context *mpc;
810 struct net_device *ndev;
811 struct mana_ib_wq *wq;
812 struct ib_wq *ibwq;
813 int i;
814
815 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
816 mpc = netdev_priv(ndev);
817
818 for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
819 ibwq = ind_tbl->ind_tbl[i];
820 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
821 ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
822 wq->rx_object);
823 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
824 }
825
826 return 0;
827 }
828
mana_ib_destroy_qp_raw(struct mana_ib_qp * qp,struct ib_udata * udata)829 static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
830 {
831 struct mana_ib_dev *mdev =
832 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
833 struct ib_pd *ibpd = qp->ibqp.pd;
834 struct mana_port_context *mpc;
835 struct net_device *ndev;
836 struct mana_ib_pd *pd;
837
838 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
839 mpc = netdev_priv(ndev);
840 pd = container_of(ibpd, struct mana_ib_pd, ibpd);
841
842 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
843
844 mana_ib_destroy_queue(mdev, &qp->raw_sq);
845
846 mana_ib_uncfg_vport(mdev, pd, qp->port);
847
848 return 0;
849 }
850
mana_ib_destroy_rc_qp(struct mana_ib_qp * qp,struct ib_udata * udata)851 static int mana_ib_destroy_rc_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
852 {
853 struct mana_ib_dev *mdev =
854 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
855 int i;
856
857 mana_table_remove_qp(mdev, qp);
858
859 /* Ignore return code as there is not much we can do about it.
860 * The error message is printed inside.
861 */
862 mana_ib_gd_destroy_rc_qp(mdev, qp);
863 for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i)
864 mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]);
865
866 return 0;
867 }
868
mana_ib_destroy_ud_qp(struct mana_ib_qp * qp,struct ib_udata * udata)869 static int mana_ib_destroy_ud_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
870 {
871 struct mana_ib_dev *mdev =
872 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
873 int i;
874
875 mana_remove_qp_from_cqs(qp);
876 mana_table_remove_qp(mdev, qp);
877
878 destroy_shadow_queue(&qp->shadow_rq);
879 destroy_shadow_queue(&qp->shadow_sq);
880
881 /* Ignore return code as there is not much we can do about it.
882 * The error message is printed inside.
883 */
884 mana_ib_gd_destroy_ud_qp(mdev, qp);
885 for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
886 mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]);
887
888 return 0;
889 }
890
mana_ib_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)891 int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
892 {
893 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
894
895 switch (ibqp->qp_type) {
896 case IB_QPT_RAW_PACKET:
897 if (ibqp->rwq_ind_tbl)
898 return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
899 udata);
900
901 return mana_ib_destroy_qp_raw(qp, udata);
902 case IB_QPT_RC:
903 return mana_ib_destroy_rc_qp(qp, udata);
904 case IB_QPT_UD:
905 case IB_QPT_GSI:
906 return mana_ib_destroy_ud_qp(qp, udata);
907 default:
908 ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
909 ibqp->qp_type);
910 }
911
912 return -ENOENT;
913 }
914