1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6 #include "mana_ib.h"
7
8 #define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
9 IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
10
11 #define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
12
13 static enum gdma_mr_access_flags
mana_ib_verbs_to_gdma_access_flags(int access_flags)14 mana_ib_verbs_to_gdma_access_flags(int access_flags)
15 {
16 enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
17
18 if (access_flags & IB_ACCESS_LOCAL_WRITE)
19 flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
20
21 if (access_flags & IB_ACCESS_REMOTE_WRITE)
22 flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
23
24 if (access_flags & IB_ACCESS_REMOTE_READ)
25 flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
26
27 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
28 flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
29
30 return flags;
31 }
32
mana_ib_gd_create_mr(struct mana_ib_dev * dev,struct mana_ib_mr * mr,struct gdma_create_mr_params * mr_params)33 static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
34 struct gdma_create_mr_params *mr_params)
35 {
36 struct gdma_create_mr_response resp = {};
37 struct gdma_create_mr_request req = {};
38 struct gdma_context *gc = mdev_to_gc(dev);
39 int err;
40
41 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
42 sizeof(resp));
43 req.pd_handle = mr_params->pd_handle;
44 req.mr_type = mr_params->mr_type;
45
46 switch (mr_params->mr_type) {
47 case GDMA_MR_TYPE_GPA:
48 break;
49 case GDMA_MR_TYPE_GVA:
50 req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
51 req.gva.virtual_address = mr_params->gva.virtual_address;
52 req.gva.access_flags = mr_params->gva.access_flags;
53 break;
54 case GDMA_MR_TYPE_ZBVA:
55 req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
56 req.zbva.access_flags = mr_params->zbva.access_flags;
57 break;
58 default:
59 ibdev_dbg(&dev->ib_dev,
60 "invalid param (GDMA_MR_TYPE) passed, type %d\n",
61 req.mr_type);
62 return -EINVAL;
63 }
64
65 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
66
67 if (err || resp.hdr.status) {
68 ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
69 resp.hdr.status);
70 if (!err)
71 err = -EPROTO;
72
73 return err;
74 }
75
76 mr->ibmr.lkey = resp.lkey;
77 mr->ibmr.rkey = resp.rkey;
78 mr->mr_handle = resp.mr_handle;
79
80 return 0;
81 }
82
mana_ib_gd_destroy_mr(struct mana_ib_dev * dev,u64 mr_handle)83 static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
84 {
85 struct gdma_destroy_mr_response resp = {};
86 struct gdma_destroy_mr_request req = {};
87 struct gdma_context *gc = mdev_to_gc(dev);
88 int err;
89
90 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
91 sizeof(resp));
92
93 req.mr_handle = mr_handle;
94
95 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
96 if (err || resp.hdr.status) {
97 dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
98 resp.hdr.status);
99 if (!err)
100 err = -EPROTO;
101 return err;
102 }
103
104 return 0;
105 }
106
mana_ib_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 iova,int access_flags,struct ib_dmah * dmah,struct ib_udata * udata)107 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
108 u64 iova, int access_flags,
109 struct ib_dmah *dmah,
110 struct ib_udata *udata)
111 {
112 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
113 struct gdma_create_mr_params mr_params = {};
114 struct ib_device *ibdev = ibpd->device;
115 struct mana_ib_dev *dev;
116 struct mana_ib_mr *mr;
117 u64 dma_region_handle;
118 int err;
119
120 if (dmah)
121 return ERR_PTR(-EOPNOTSUPP);
122
123 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
124
125 ibdev_dbg(ibdev,
126 "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
127 start, iova, length, access_flags);
128
129 access_flags &= ~IB_ACCESS_OPTIONAL;
130 if (access_flags & ~VALID_MR_FLAGS)
131 return ERR_PTR(-EINVAL);
132
133 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
134 if (!mr)
135 return ERR_PTR(-ENOMEM);
136
137 mr->umem = ib_umem_get(ibdev, start, length, access_flags);
138 if (IS_ERR(mr->umem)) {
139 err = PTR_ERR(mr->umem);
140 ibdev_dbg(ibdev,
141 "Failed to get umem for register user-mr, %d\n", err);
142 goto err_free;
143 }
144
145 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
146 if (err) {
147 ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
148 err);
149 goto err_umem;
150 }
151
152 ibdev_dbg(ibdev,
153 "created dma region for user-mr 0x%llx\n",
154 dma_region_handle);
155
156 mr_params.pd_handle = pd->pd_handle;
157 if (access_flags & IB_ZERO_BASED) {
158 mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
159 mr_params.zbva.dma_region_handle = dma_region_handle;
160 mr_params.zbva.access_flags =
161 mana_ib_verbs_to_gdma_access_flags(access_flags);
162 } else {
163 mr_params.mr_type = GDMA_MR_TYPE_GVA;
164 mr_params.gva.dma_region_handle = dma_region_handle;
165 mr_params.gva.virtual_address = iova;
166 mr_params.gva.access_flags =
167 mana_ib_verbs_to_gdma_access_flags(access_flags);
168 }
169
170 err = mana_ib_gd_create_mr(dev, mr, &mr_params);
171 if (err)
172 goto err_dma_region;
173
174 /*
175 * There is no need to keep track of dma_region_handle after MR is
176 * successfully created. The dma_region_handle is tracked in the PF
177 * as part of the lifecycle of this MR.
178 */
179
180 return &mr->ibmr;
181
182 err_dma_region:
183 mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
184
185 err_umem:
186 ib_umem_release(mr->umem);
187
188 err_free:
189 kfree(mr);
190 return ERR_PTR(err);
191 }
192
mana_ib_reg_user_mr_dmabuf(struct ib_pd * ibpd,u64 start,u64 length,u64 iova,int fd,int access_flags,struct ib_dmah * dmah,struct uverbs_attr_bundle * attrs)193 struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
194 u64 iova, int fd, int access_flags,
195 struct ib_dmah *dmah,
196 struct uverbs_attr_bundle *attrs)
197 {
198 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
199 struct gdma_create_mr_params mr_params = {};
200 struct ib_device *ibdev = ibpd->device;
201 struct ib_umem_dmabuf *umem_dmabuf;
202 struct mana_ib_dev *dev;
203 struct mana_ib_mr *mr;
204 u64 dma_region_handle;
205 int err;
206
207 if (dmah)
208 return ERR_PTR(-EOPNOTSUPP);
209
210 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
211
212 access_flags &= ~IB_ACCESS_OPTIONAL;
213 if (access_flags & ~VALID_MR_FLAGS)
214 return ERR_PTR(-EOPNOTSUPP);
215
216 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
217 if (!mr)
218 return ERR_PTR(-ENOMEM);
219
220 umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
221 if (IS_ERR(umem_dmabuf)) {
222 err = PTR_ERR(umem_dmabuf);
223 ibdev_dbg(ibdev, "Failed to get dmabuf umem, %d\n", err);
224 goto err_free;
225 }
226
227 mr->umem = &umem_dmabuf->umem;
228
229 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
230 if (err) {
231 ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
232 err);
233 goto err_umem;
234 }
235
236 mr_params.pd_handle = pd->pd_handle;
237 mr_params.mr_type = GDMA_MR_TYPE_GVA;
238 mr_params.gva.dma_region_handle = dma_region_handle;
239 mr_params.gva.virtual_address = iova;
240 mr_params.gva.access_flags =
241 mana_ib_verbs_to_gdma_access_flags(access_flags);
242
243 err = mana_ib_gd_create_mr(dev, mr, &mr_params);
244 if (err)
245 goto err_dma_region;
246
247 /*
248 * There is no need to keep track of dma_region_handle after MR is
249 * successfully created. The dma_region_handle is tracked in the PF
250 * as part of the lifecycle of this MR.
251 */
252
253 return &mr->ibmr;
254
255 err_dma_region:
256 mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
257
258 err_umem:
259 ib_umem_release(mr->umem);
260
261 err_free:
262 kfree(mr);
263 return ERR_PTR(err);
264 }
265
mana_ib_get_dma_mr(struct ib_pd * ibpd,int access_flags)266 struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags)
267 {
268 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
269 struct gdma_create_mr_params mr_params = {};
270 struct ib_device *ibdev = ibpd->device;
271 struct mana_ib_dev *dev;
272 struct mana_ib_mr *mr;
273 int err;
274
275 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
276
277 if (access_flags & ~VALID_DMA_MR_FLAGS)
278 return ERR_PTR(-EINVAL);
279
280 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
281 if (!mr)
282 return ERR_PTR(-ENOMEM);
283
284 mr_params.pd_handle = pd->pd_handle;
285 mr_params.mr_type = GDMA_MR_TYPE_GPA;
286
287 err = mana_ib_gd_create_mr(dev, mr, &mr_params);
288 if (err)
289 goto err_free;
290
291 return &mr->ibmr;
292
293 err_free:
294 kfree(mr);
295 return ERR_PTR(err);
296 }
297
mana_ib_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)298 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
299 {
300 struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
301 struct ib_device *ibdev = ibmr->device;
302 struct mana_ib_dev *dev;
303 int err;
304
305 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
306
307 err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
308 if (err)
309 return err;
310
311 if (mr->umem)
312 ib_umem_release(mr->umem);
313
314 kfree(mr);
315
316 return 0;
317 }
318