1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2024, Microsoft Corporation. All rights reserved.
4 */
5
6 #include "mana_ib.h"
7
8 #define MAX_WR_SGL_NUM (2)
9
mana_ib_post_recv_ud(struct mana_ib_qp * qp,const struct ib_recv_wr * wr)10 static int mana_ib_post_recv_ud(struct mana_ib_qp *qp, const struct ib_recv_wr *wr)
11 {
12 struct mana_ib_dev *mdev = container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
13 struct gdma_queue *queue = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].kmem;
14 struct gdma_posted_wqe_info wqe_info = {0};
15 struct gdma_sge gdma_sgl[MAX_WR_SGL_NUM];
16 struct gdma_wqe_request wqe_req = {0};
17 struct ud_rq_shadow_wqe *shadow_wqe;
18 int err, i;
19
20 if (shadow_queue_full(&qp->shadow_rq))
21 return -EINVAL;
22
23 if (wr->num_sge > MAX_WR_SGL_NUM)
24 return -EINVAL;
25
26 for (i = 0; i < wr->num_sge; ++i) {
27 gdma_sgl[i].address = wr->sg_list[i].addr;
28 gdma_sgl[i].mem_key = wr->sg_list[i].lkey;
29 gdma_sgl[i].size = wr->sg_list[i].length;
30 }
31 wqe_req.num_sge = wr->num_sge;
32 wqe_req.sgl = gdma_sgl;
33
34 err = mana_gd_post_work_request(queue, &wqe_req, &wqe_info);
35 if (err)
36 return err;
37
38 shadow_wqe = shadow_queue_producer_entry(&qp->shadow_rq);
39 memset(shadow_wqe, 0, sizeof(*shadow_wqe));
40 shadow_wqe->header.opcode = IB_WC_RECV;
41 shadow_wqe->header.wr_id = wr->wr_id;
42 shadow_wqe->header.posted_wqe_size = wqe_info.wqe_size_in_bu;
43 shadow_queue_advance_producer(&qp->shadow_rq);
44
45 mana_gd_wq_ring_doorbell(mdev_to_gc(mdev), queue);
46 return 0;
47 }
48
mana_ib_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)49 int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
50 const struct ib_recv_wr **bad_wr)
51 {
52 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
53 int err = 0;
54
55 for (; wr; wr = wr->next) {
56 switch (ibqp->qp_type) {
57 case IB_QPT_UD:
58 case IB_QPT_GSI:
59 err = mana_ib_post_recv_ud(qp, wr);
60 if (unlikely(err)) {
61 *bad_wr = wr;
62 return err;
63 }
64 break;
65 default:
66 ibdev_dbg(ibqp->device, "Posting recv wr on qp type %u is not supported\n",
67 ibqp->qp_type);
68 return -EINVAL;
69 }
70 }
71
72 return err;
73 }
74
mana_ib_post_send_ud(struct mana_ib_qp * qp,const struct ib_ud_wr * wr)75 static int mana_ib_post_send_ud(struct mana_ib_qp *qp, const struct ib_ud_wr *wr)
76 {
77 struct mana_ib_dev *mdev = container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
78 struct mana_ib_ah *ah = container_of(wr->ah, struct mana_ib_ah, ibah);
79 struct net_device *ndev = mana_ib_get_netdev(&mdev->ib_dev, qp->port);
80 struct gdma_queue *queue = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].kmem;
81 struct gdma_sge gdma_sgl[MAX_WR_SGL_NUM + 1];
82 struct gdma_posted_wqe_info wqe_info = {0};
83 struct gdma_wqe_request wqe_req = {0};
84 struct rdma_send_oob send_oob = {0};
85 struct ud_sq_shadow_wqe *shadow_wqe;
86 int err, i;
87
88 if (!ndev) {
89 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in QP %u\n",
90 qp->port, qp->ibqp.qp_num);
91 return -EINVAL;
92 }
93
94 if (wr->wr.opcode != IB_WR_SEND)
95 return -EINVAL;
96
97 if (shadow_queue_full(&qp->shadow_sq))
98 return -EINVAL;
99
100 if (wr->wr.num_sge > MAX_WR_SGL_NUM)
101 return -EINVAL;
102
103 gdma_sgl[0].address = ah->dma_handle;
104 gdma_sgl[0].mem_key = qp->ibqp.pd->local_dma_lkey;
105 gdma_sgl[0].size = sizeof(struct mana_ib_av);
106 for (i = 0; i < wr->wr.num_sge; ++i) {
107 gdma_sgl[i + 1].address = wr->wr.sg_list[i].addr;
108 gdma_sgl[i + 1].mem_key = wr->wr.sg_list[i].lkey;
109 gdma_sgl[i + 1].size = wr->wr.sg_list[i].length;
110 }
111
112 wqe_req.num_sge = wr->wr.num_sge + 1;
113 wqe_req.sgl = gdma_sgl;
114 wqe_req.inline_oob_size = sizeof(struct rdma_send_oob);
115 wqe_req.inline_oob_data = &send_oob;
116 wqe_req.flags = GDMA_WR_OOB_IN_SGL;
117 wqe_req.client_data_unit = ib_mtu_enum_to_int(ib_mtu_int_to_enum(ndev->mtu));
118
119 send_oob.wqe_type = WQE_TYPE_UD_SEND;
120 send_oob.fence = !!(wr->wr.send_flags & IB_SEND_FENCE);
121 send_oob.signaled = !!(wr->wr.send_flags & IB_SEND_SIGNALED);
122 send_oob.solicited = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
123 send_oob.psn = qp->ud_qp.sq_psn;
124 send_oob.ssn_or_rqpn = wr->remote_qpn;
125 send_oob.ud_send.remote_qkey =
126 qp->ibqp.qp_type == IB_QPT_GSI ? IB_QP1_QKEY : wr->remote_qkey;
127
128 err = mana_gd_post_work_request(queue, &wqe_req, &wqe_info);
129 if (err)
130 return err;
131
132 qp->ud_qp.sq_psn++;
133 shadow_wqe = shadow_queue_producer_entry(&qp->shadow_sq);
134 memset(shadow_wqe, 0, sizeof(*shadow_wqe));
135 shadow_wqe->header.opcode = IB_WC_SEND;
136 shadow_wqe->header.wr_id = wr->wr.wr_id;
137 shadow_wqe->header.posted_wqe_size = wqe_info.wqe_size_in_bu;
138 shadow_queue_advance_producer(&qp->shadow_sq);
139
140 mana_gd_wq_ring_doorbell(mdev_to_gc(mdev), queue);
141 return 0;
142 }
143
mana_ib_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)144 int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
145 const struct ib_send_wr **bad_wr)
146 {
147 int err;
148 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
149
150 for (; wr; wr = wr->next) {
151 switch (ibqp->qp_type) {
152 case IB_QPT_UD:
153 case IB_QPT_GSI:
154 err = mana_ib_post_send_ud(qp, ud_wr(wr));
155 if (unlikely(err)) {
156 *bad_wr = wr;
157 return err;
158 }
159 break;
160 default:
161 ibdev_dbg(ibqp->device, "Posting send wr on qp type %u is not supported\n",
162 ibqp->qp_type);
163 return -EINVAL;
164 }
165 }
166
167 return err;
168 }
169