1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/vmalloc.h>
9 #include <linux/log2.h>
10
11 #include <rdma/ib_addr.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/ib_user_verbs.h>
14 #include <rdma/ib_verbs.h>
15 #include <rdma/uverbs_ioctl.h>
16 #define UVERBS_MODULE_NAME efa_ib
17 #include <rdma/uverbs_named_ioctl.h>
18 #include <rdma/ib_user_ioctl_cmds.h>
19
20 #include "efa.h"
21 #include "efa_io_defs.h"
22
23 enum {
24 EFA_MMAP_DMA_PAGE = 0,
25 EFA_MMAP_IO_WC,
26 EFA_MMAP_IO_NC,
27 };
28
29 struct efa_user_mmap_entry {
30 struct rdma_user_mmap_entry rdma_entry;
31 u64 address;
32 u8 mmap_flag;
33 };
34
35 #define EFA_DEFINE_DEVICE_STATS(op) \
36 op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
37 op(EFA_COMPLETED_CMDS, "completed_cmds") \
38 op(EFA_CMDS_ERR, "cmds_err") \
39 op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
40 op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
41 op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
42 op(EFA_CREATE_QP_ERR, "create_qp_err") \
43 op(EFA_CREATE_CQ_ERR, "create_cq_err") \
44 op(EFA_REG_MR_ERR, "reg_mr_err") \
45 op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
46 op(EFA_CREATE_AH_ERR, "create_ah_err") \
47 op(EFA_MMAP_ERR, "mmap_err")
48
49 #define EFA_DEFINE_PORT_STATS(op) \
50 op(EFA_TX_BYTES, "tx_bytes") \
51 op(EFA_TX_PKTS, "tx_pkts") \
52 op(EFA_RX_BYTES, "rx_bytes") \
53 op(EFA_RX_PKTS, "rx_pkts") \
54 op(EFA_RX_DROPS, "rx_drops") \
55 op(EFA_SEND_BYTES, "send_bytes") \
56 op(EFA_SEND_WRS, "send_wrs") \
57 op(EFA_RECV_BYTES, "recv_bytes") \
58 op(EFA_RECV_WRS, "recv_wrs") \
59 op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
60 op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
61 op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
62 op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
63 op(EFA_RDMA_WRITE_WRS, "rdma_write_wrs") \
64 op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \
65 op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \
66 op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \
67 op(EFA_RETRANS_BYTES, "retrans_bytes") \
68 op(EFA_RETRANS_PKTS, "retrans_pkts") \
69 op(EFA_RETRANS_TIMEOUT_EVENS, "retrans_timeout_events") \
70 op(EFA_UNRESPONSIVE_REMOTE_EVENTS, "unresponsive_remote_events") \
71 op(EFA_IMPAIRED_REMOTE_CONN_EVENTS, "impaired_remote_conn_events") \
72
73 #define EFA_STATS_ENUM(ename, name) ename,
74 #define EFA_STATS_STR(ename, nam) \
75 [ename].name = nam,
76
77 enum efa_hw_device_stats {
78 EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
79 };
80
81 static const struct rdma_stat_desc efa_device_stats_descs[] = {
82 EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
83 };
84
85 enum efa_hw_port_stats {
86 EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
87 };
88
89 static const struct rdma_stat_desc efa_port_stats_descs[] = {
90 EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
91 };
92
93 #define EFA_DEFAULT_LINK_SPEED_GBPS 100
94
95 #define EFA_CHUNK_PAYLOAD_SHIFT 12
96 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
97 #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
98
99 #define EFA_CHUNK_SHIFT 12
100 #define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
101 #define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
102
103 #define EFA_PTRS_PER_CHUNK \
104 ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
105
106 #define EFA_CHUNK_USED_SIZE \
107 ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
108
109 struct pbl_chunk {
110 dma_addr_t dma_addr;
111 u64 *buf;
112 u32 length;
113 };
114
115 struct pbl_chunk_list {
116 struct pbl_chunk *chunks;
117 unsigned int size;
118 };
119
120 struct pbl_context {
121 union {
122 struct {
123 dma_addr_t dma_addr;
124 } continuous;
125 struct {
126 u32 pbl_buf_size_in_pages;
127 struct scatterlist *sgl;
128 int sg_dma_cnt;
129 struct pbl_chunk_list chunk_list;
130 } indirect;
131 } phys;
132 u64 *pbl_buf;
133 u32 pbl_buf_size_in_bytes;
134 u8 physically_continuous;
135 };
136
to_edev(struct ib_device * ibdev)137 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
138 {
139 return container_of(ibdev, struct efa_dev, ibdev);
140 }
141
to_eucontext(struct ib_ucontext * ibucontext)142 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
143 {
144 return container_of(ibucontext, struct efa_ucontext, ibucontext);
145 }
146
to_epd(struct ib_pd * ibpd)147 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
148 {
149 return container_of(ibpd, struct efa_pd, ibpd);
150 }
151
to_emr(struct ib_mr * ibmr)152 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
153 {
154 return container_of(ibmr, struct efa_mr, ibmr);
155 }
156
to_eqp(struct ib_qp * ibqp)157 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
158 {
159 return container_of(ibqp, struct efa_qp, ibqp);
160 }
161
to_ecq(struct ib_cq * ibcq)162 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
163 {
164 return container_of(ibcq, struct efa_cq, ibcq);
165 }
166
to_eah(struct ib_ah * ibah)167 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
168 {
169 return container_of(ibah, struct efa_ah, ibah);
170 }
171
172 static inline struct efa_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry * rdma_entry)173 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
174 {
175 return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
176 }
177
178 #define EFA_DEV_CAP(dev, cap) \
179 ((dev)->dev_attr.device_caps & \
180 EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
181
182 #define is_reserved_cleared(reserved) \
183 !memchr_inv(reserved, 0, sizeof(reserved))
184
efa_zalloc_mapped(struct efa_dev * dev,dma_addr_t * dma_addr,size_t size,enum dma_data_direction dir)185 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
186 size_t size, enum dma_data_direction dir)
187 {
188 void *addr;
189
190 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
191 if (!addr)
192 return NULL;
193
194 *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
195 if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
196 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
197 free_pages_exact(addr, size);
198 return NULL;
199 }
200
201 return addr;
202 }
203
efa_free_mapped(struct efa_dev * dev,void * cpu_addr,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)204 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
205 dma_addr_t dma_addr,
206 size_t size, enum dma_data_direction dir)
207 {
208 dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
209 free_pages_exact(cpu_addr, size);
210 }
211
efa_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)212 int efa_query_device(struct ib_device *ibdev,
213 struct ib_device_attr *props,
214 struct ib_udata *udata)
215 {
216 struct efa_com_get_device_attr_result *dev_attr;
217 struct efa_ibv_ex_query_device_resp resp = {};
218 struct efa_dev *dev = to_edev(ibdev);
219 int err;
220
221 if (udata && udata->inlen &&
222 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
223 ibdev_dbg(ibdev,
224 "Incompatible ABI params, udata not cleared\n");
225 return -EINVAL;
226 }
227
228 dev_attr = &dev->dev_attr;
229
230 memset(props, 0, sizeof(*props));
231 props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
232 props->page_size_cap = dev_attr->page_size_cap;
233 props->vendor_id = dev->pdev->vendor;
234 props->vendor_part_id = dev->pdev->device;
235 props->hw_ver = dev->pdev->subsystem_device;
236 props->max_qp = dev_attr->max_qp;
237 props->max_cq = dev_attr->max_cq;
238 props->max_pd = dev_attr->max_pd;
239 props->max_mr = dev_attr->max_mr;
240 props->max_ah = dev_attr->max_ah;
241 props->max_cqe = dev_attr->max_cq_depth;
242 props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
243 dev_attr->max_rq_depth);
244 props->max_send_sge = dev_attr->max_sq_sge;
245 props->max_recv_sge = dev_attr->max_rq_sge;
246 props->max_sge_rd = dev_attr->max_wr_rdma_sge;
247 props->max_pkeys = 1;
248
249 if (udata && udata->outlen) {
250 resp.max_sq_sge = dev_attr->max_sq_sge;
251 resp.max_rq_sge = dev_attr->max_rq_sge;
252 resp.max_sq_wr = dev_attr->max_sq_depth;
253 resp.max_rq_wr = dev_attr->max_rq_depth;
254 resp.max_rdma_size = dev_attr->max_rdma_size;
255
256 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
257 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM;
258 if (EFA_DEV_CAP(dev, RDMA_READ))
259 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
260
261 if (EFA_DEV_CAP(dev, RNR_RETRY))
262 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
263
264 if (EFA_DEV_CAP(dev, DATA_POLLING_128))
265 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128;
266
267 if (EFA_DEV_CAP(dev, RDMA_WRITE))
268 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE;
269
270 if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
271 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV;
272
273 if (dev->neqs)
274 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
275
276 err = ib_copy_to_udata(udata, &resp,
277 min(sizeof(resp), udata->outlen));
278 if (err) {
279 ibdev_dbg(ibdev,
280 "Failed to copy udata for query_device\n");
281 return err;
282 }
283 }
284
285 return 0;
286 }
287
efa_link_gbps_to_speed_and_width(u16 gbps,enum ib_port_speed * speed,enum ib_port_width * width)288 static void efa_link_gbps_to_speed_and_width(u16 gbps,
289 enum ib_port_speed *speed,
290 enum ib_port_width *width)
291 {
292 if (gbps >= 400) {
293 *width = IB_WIDTH_8X;
294 *speed = IB_SPEED_HDR;
295 } else if (gbps >= 200) {
296 *width = IB_WIDTH_4X;
297 *speed = IB_SPEED_HDR;
298 } else if (gbps >= 120) {
299 *width = IB_WIDTH_12X;
300 *speed = IB_SPEED_FDR10;
301 } else if (gbps >= 100) {
302 *width = IB_WIDTH_4X;
303 *speed = IB_SPEED_EDR;
304 } else if (gbps >= 60) {
305 *width = IB_WIDTH_12X;
306 *speed = IB_SPEED_DDR;
307 } else if (gbps >= 50) {
308 *width = IB_WIDTH_1X;
309 *speed = IB_SPEED_HDR;
310 } else if (gbps >= 40) {
311 *width = IB_WIDTH_4X;
312 *speed = IB_SPEED_FDR10;
313 } else if (gbps >= 30) {
314 *width = IB_WIDTH_12X;
315 *speed = IB_SPEED_SDR;
316 } else {
317 *width = IB_WIDTH_1X;
318 *speed = IB_SPEED_EDR;
319 }
320 }
321
efa_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)322 int efa_query_port(struct ib_device *ibdev, u32 port,
323 struct ib_port_attr *props)
324 {
325 struct efa_dev *dev = to_edev(ibdev);
326 enum ib_port_speed link_speed;
327 enum ib_port_width link_width;
328 u16 link_gbps;
329
330 props->lmc = 1;
331
332 props->state = IB_PORT_ACTIVE;
333 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
334 props->gid_tbl_len = 1;
335 props->pkey_tbl_len = 1;
336 link_gbps = dev->dev_attr.max_link_speed_gbps ?: EFA_DEFAULT_LINK_SPEED_GBPS;
337 efa_link_gbps_to_speed_and_width(link_gbps, &link_speed, &link_width);
338 props->active_speed = link_speed;
339 props->active_width = link_width;
340 props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
341 props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
342 props->max_msg_sz = dev->dev_attr.mtu;
343 props->max_vl_num = 1;
344
345 return 0;
346 }
347
efa_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)348 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
349 int qp_attr_mask,
350 struct ib_qp_init_attr *qp_init_attr)
351 {
352 struct efa_dev *dev = to_edev(ibqp->device);
353 struct efa_com_query_qp_params params = {};
354 struct efa_com_query_qp_result result;
355 struct efa_qp *qp = to_eqp(ibqp);
356 int err;
357
358 #define EFA_QUERY_QP_SUPP_MASK \
359 (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
360 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
361
362 if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
363 ibdev_dbg(&dev->ibdev,
364 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
365 qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
366 return -EOPNOTSUPP;
367 }
368
369 memset(qp_attr, 0, sizeof(*qp_attr));
370 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
371
372 params.qp_handle = qp->qp_handle;
373 err = efa_com_query_qp(&dev->edev, ¶ms, &result);
374 if (err)
375 return err;
376
377 qp_attr->qp_state = result.qp_state;
378 qp_attr->qkey = result.qkey;
379 qp_attr->sq_psn = result.sq_psn;
380 qp_attr->sq_draining = result.sq_draining;
381 qp_attr->port_num = 1;
382 qp_attr->rnr_retry = result.rnr_retry;
383
384 qp_attr->cap.max_send_wr = qp->max_send_wr;
385 qp_attr->cap.max_recv_wr = qp->max_recv_wr;
386 qp_attr->cap.max_send_sge = qp->max_send_sge;
387 qp_attr->cap.max_recv_sge = qp->max_recv_sge;
388 qp_attr->cap.max_inline_data = qp->max_inline_data;
389
390 qp_init_attr->qp_type = ibqp->qp_type;
391 qp_init_attr->recv_cq = ibqp->recv_cq;
392 qp_init_attr->send_cq = ibqp->send_cq;
393 qp_init_attr->qp_context = ibqp->qp_context;
394 qp_init_attr->cap = qp_attr->cap;
395
396 return 0;
397 }
398
efa_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)399 int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
400 union ib_gid *gid)
401 {
402 struct efa_dev *dev = to_edev(ibdev);
403
404 memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
405
406 return 0;
407 }
408
efa_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)409 int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
410 u16 *pkey)
411 {
412 if (index > 0)
413 return -EINVAL;
414
415 *pkey = 0xffff;
416 return 0;
417 }
418
efa_pd_dealloc(struct efa_dev * dev,u16 pdn)419 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
420 {
421 struct efa_com_dealloc_pd_params params = {
422 .pdn = pdn,
423 };
424
425 return efa_com_dealloc_pd(&dev->edev, ¶ms);
426 }
427
efa_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)428 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
429 {
430 struct efa_dev *dev = to_edev(ibpd->device);
431 struct efa_ibv_alloc_pd_resp resp = {};
432 struct efa_com_alloc_pd_result result;
433 struct efa_pd *pd = to_epd(ibpd);
434 int err;
435
436 if (udata->inlen &&
437 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
438 ibdev_dbg(&dev->ibdev,
439 "Incompatible ABI params, udata not cleared\n");
440 err = -EINVAL;
441 goto err_out;
442 }
443
444 err = efa_com_alloc_pd(&dev->edev, &result);
445 if (err)
446 goto err_out;
447
448 pd->pdn = result.pdn;
449 resp.pdn = result.pdn;
450
451 if (udata->outlen) {
452 err = ib_copy_to_udata(udata, &resp,
453 min(sizeof(resp), udata->outlen));
454 if (err) {
455 ibdev_dbg(&dev->ibdev,
456 "Failed to copy udata for alloc_pd\n");
457 goto err_dealloc_pd;
458 }
459 }
460
461 ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
462
463 return 0;
464
465 err_dealloc_pd:
466 efa_pd_dealloc(dev, result.pdn);
467 err_out:
468 atomic64_inc(&dev->stats.alloc_pd_err);
469 return err;
470 }
471
efa_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)472 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
473 {
474 struct efa_dev *dev = to_edev(ibpd->device);
475 struct efa_pd *pd = to_epd(ibpd);
476
477 ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
478 efa_pd_dealloc(dev, pd->pdn);
479 return 0;
480 }
481
efa_destroy_qp_handle(struct efa_dev * dev,u32 qp_handle)482 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
483 {
484 struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
485
486 return efa_com_destroy_qp(&dev->edev, ¶ms);
487 }
488
efa_qp_user_mmap_entries_remove(struct efa_qp * qp)489 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
490 {
491 rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
492 rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
493 rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
494 rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
495 }
496
efa_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)497 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
498 {
499 struct efa_dev *dev = to_edev(ibqp->pd->device);
500 struct efa_qp *qp = to_eqp(ibqp);
501 int err;
502
503 ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
504
505 err = efa_destroy_qp_handle(dev, qp->qp_handle);
506 if (err)
507 return err;
508
509 efa_qp_user_mmap_entries_remove(qp);
510
511 if (qp->rq_cpu_addr) {
512 ibdev_dbg(&dev->ibdev,
513 "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
514 qp->rq_cpu_addr, qp->rq_size,
515 &qp->rq_dma_addr);
516 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
517 qp->rq_size, DMA_TO_DEVICE);
518 }
519
520 return 0;
521 }
522
523 static struct rdma_user_mmap_entry*
efa_user_mmap_entry_insert(struct ib_ucontext * ucontext,u64 address,size_t length,u8 mmap_flag,u64 * offset)524 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
525 u64 address, size_t length,
526 u8 mmap_flag, u64 *offset)
527 {
528 struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
529 int err;
530
531 if (!entry)
532 return NULL;
533
534 entry->address = address;
535 entry->mmap_flag = mmap_flag;
536
537 err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
538 length);
539 if (err) {
540 kfree(entry);
541 return NULL;
542 }
543 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
544
545 return &entry->rdma_entry;
546 }
547
qp_mmap_entries_setup(struct efa_qp * qp,struct efa_dev * dev,struct efa_ucontext * ucontext,struct efa_com_create_qp_params * params,struct efa_ibv_create_qp_resp * resp)548 static int qp_mmap_entries_setup(struct efa_qp *qp,
549 struct efa_dev *dev,
550 struct efa_ucontext *ucontext,
551 struct efa_com_create_qp_params *params,
552 struct efa_ibv_create_qp_resp *resp)
553 {
554 size_t length;
555 u64 address;
556
557 address = dev->db_bar_addr + resp->sq_db_offset;
558 qp->sq_db_mmap_entry =
559 efa_user_mmap_entry_insert(&ucontext->ibucontext,
560 address,
561 PAGE_SIZE, EFA_MMAP_IO_NC,
562 &resp->sq_db_mmap_key);
563 if (!qp->sq_db_mmap_entry)
564 return -ENOMEM;
565
566 resp->sq_db_offset &= ~PAGE_MASK;
567
568 address = dev->mem_bar_addr + resp->llq_desc_offset;
569 length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
570 offset_in_page(resp->llq_desc_offset));
571
572 qp->llq_desc_mmap_entry =
573 efa_user_mmap_entry_insert(&ucontext->ibucontext,
574 address, length,
575 EFA_MMAP_IO_WC,
576 &resp->llq_desc_mmap_key);
577 if (!qp->llq_desc_mmap_entry)
578 goto err_remove_mmap;
579
580 resp->llq_desc_offset &= ~PAGE_MASK;
581
582 if (qp->rq_size) {
583 address = dev->db_bar_addr + resp->rq_db_offset;
584
585 qp->rq_db_mmap_entry =
586 efa_user_mmap_entry_insert(&ucontext->ibucontext,
587 address, PAGE_SIZE,
588 EFA_MMAP_IO_NC,
589 &resp->rq_db_mmap_key);
590 if (!qp->rq_db_mmap_entry)
591 goto err_remove_mmap;
592
593 resp->rq_db_offset &= ~PAGE_MASK;
594
595 address = virt_to_phys(qp->rq_cpu_addr);
596 qp->rq_mmap_entry =
597 efa_user_mmap_entry_insert(&ucontext->ibucontext,
598 address, qp->rq_size,
599 EFA_MMAP_DMA_PAGE,
600 &resp->rq_mmap_key);
601 if (!qp->rq_mmap_entry)
602 goto err_remove_mmap;
603
604 resp->rq_mmap_size = qp->rq_size;
605 }
606
607 return 0;
608
609 err_remove_mmap:
610 efa_qp_user_mmap_entries_remove(qp);
611
612 return -ENOMEM;
613 }
614
efa_qp_validate_cap(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)615 static int efa_qp_validate_cap(struct efa_dev *dev,
616 struct ib_qp_init_attr *init_attr)
617 {
618 if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
619 ibdev_dbg(&dev->ibdev,
620 "qp: requested send wr[%u] exceeds the max[%u]\n",
621 init_attr->cap.max_send_wr,
622 dev->dev_attr.max_sq_depth);
623 return -EINVAL;
624 }
625 if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
626 ibdev_dbg(&dev->ibdev,
627 "qp: requested receive wr[%u] exceeds the max[%u]\n",
628 init_attr->cap.max_recv_wr,
629 dev->dev_attr.max_rq_depth);
630 return -EINVAL;
631 }
632 if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
633 ibdev_dbg(&dev->ibdev,
634 "qp: requested sge send[%u] exceeds the max[%u]\n",
635 init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
636 return -EINVAL;
637 }
638 if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
639 ibdev_dbg(&dev->ibdev,
640 "qp: requested sge recv[%u] exceeds the max[%u]\n",
641 init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
642 return -EINVAL;
643 }
644 if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
645 ibdev_dbg(&dev->ibdev,
646 "qp: requested inline data[%u] exceeds the max[%u]\n",
647 init_attr->cap.max_inline_data,
648 dev->dev_attr.inline_buf_size);
649 return -EINVAL;
650 }
651
652 return 0;
653 }
654
efa_qp_validate_attr(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)655 static int efa_qp_validate_attr(struct efa_dev *dev,
656 struct ib_qp_init_attr *init_attr)
657 {
658 if (init_attr->qp_type != IB_QPT_DRIVER &&
659 init_attr->qp_type != IB_QPT_UD) {
660 ibdev_dbg(&dev->ibdev,
661 "Unsupported qp type %d\n", init_attr->qp_type);
662 return -EOPNOTSUPP;
663 }
664
665 if (init_attr->srq) {
666 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
667 return -EOPNOTSUPP;
668 }
669
670 if (init_attr->create_flags) {
671 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
672 return -EOPNOTSUPP;
673 }
674
675 return 0;
676 }
677
efa_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)678 int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
679 struct ib_udata *udata)
680 {
681 struct efa_com_create_qp_params create_qp_params = {};
682 struct efa_com_create_qp_result create_qp_resp;
683 struct efa_dev *dev = to_edev(ibqp->device);
684 struct efa_ibv_create_qp_resp resp = {};
685 struct efa_ibv_create_qp cmd = {};
686 struct efa_qp *qp = to_eqp(ibqp);
687 struct efa_ucontext *ucontext;
688 u16 supported_efa_flags = 0;
689 int err;
690
691 ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
692 ibucontext);
693
694 err = efa_qp_validate_cap(dev, init_attr);
695 if (err)
696 goto err_out;
697
698 err = efa_qp_validate_attr(dev, init_attr);
699 if (err)
700 goto err_out;
701
702 if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
703 ibdev_dbg(&dev->ibdev,
704 "Incompatible ABI params, no input udata\n");
705 err = -EINVAL;
706 goto err_out;
707 }
708
709 if (udata->inlen > sizeof(cmd) &&
710 !ib_is_udata_cleared(udata, sizeof(cmd),
711 udata->inlen - sizeof(cmd))) {
712 ibdev_dbg(&dev->ibdev,
713 "Incompatible ABI params, unknown fields in udata\n");
714 err = -EINVAL;
715 goto err_out;
716 }
717
718 err = ib_copy_from_udata(&cmd, udata,
719 min(sizeof(cmd), udata->inlen));
720 if (err) {
721 ibdev_dbg(&dev->ibdev,
722 "Cannot copy udata for create_qp\n");
723 goto err_out;
724 }
725
726 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_98)) {
727 ibdev_dbg(&dev->ibdev,
728 "Incompatible ABI params, unknown fields in udata\n");
729 err = -EINVAL;
730 goto err_out;
731 }
732
733 if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
734 supported_efa_flags |= EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV;
735
736 if (cmd.flags & ~supported_efa_flags) {
737 ibdev_dbg(&dev->ibdev, "Unsupported EFA QP create flags[%#x], supported[%#x]\n",
738 cmd.flags, supported_efa_flags);
739 err = -EOPNOTSUPP;
740 goto err_out;
741 }
742
743 create_qp_params.uarn = ucontext->uarn;
744 create_qp_params.pd = to_epd(ibqp->pd)->pdn;
745
746 if (init_attr->qp_type == IB_QPT_UD) {
747 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
748 } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
749 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
750 } else {
751 ibdev_dbg(&dev->ibdev,
752 "Unsupported qp type %d driver qp type %d\n",
753 init_attr->qp_type, cmd.driver_qp_type);
754 err = -EOPNOTSUPP;
755 goto err_out;
756 }
757
758 ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
759 init_attr->qp_type, cmd.driver_qp_type);
760 create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
761 create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
762 create_qp_params.sq_depth = init_attr->cap.max_send_wr;
763 create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
764
765 create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
766 create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
767 qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
768 if (qp->rq_size) {
769 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
770 qp->rq_size, DMA_TO_DEVICE);
771 if (!qp->rq_cpu_addr) {
772 err = -ENOMEM;
773 goto err_out;
774 }
775
776 ibdev_dbg(&dev->ibdev,
777 "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
778 qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
779 create_qp_params.rq_base_addr = qp->rq_dma_addr;
780 }
781
782 create_qp_params.sl = cmd.sl;
783
784 if (cmd.flags & EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV)
785 create_qp_params.unsolicited_write_recv = true;
786
787 err = efa_com_create_qp(&dev->edev, &create_qp_params,
788 &create_qp_resp);
789 if (err)
790 goto err_free_mapped;
791
792 resp.sq_db_offset = create_qp_resp.sq_db_offset;
793 resp.rq_db_offset = create_qp_resp.rq_db_offset;
794 resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
795 resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
796 resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
797
798 err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
799 &resp);
800 if (err)
801 goto err_destroy_qp;
802
803 qp->qp_handle = create_qp_resp.qp_handle;
804 qp->ibqp.qp_num = create_qp_resp.qp_num;
805 qp->max_send_wr = init_attr->cap.max_send_wr;
806 qp->max_recv_wr = init_attr->cap.max_recv_wr;
807 qp->max_send_sge = init_attr->cap.max_send_sge;
808 qp->max_recv_sge = init_attr->cap.max_recv_sge;
809 qp->max_inline_data = init_attr->cap.max_inline_data;
810
811 if (udata->outlen) {
812 err = ib_copy_to_udata(udata, &resp,
813 min(sizeof(resp), udata->outlen));
814 if (err) {
815 ibdev_dbg(&dev->ibdev,
816 "Failed to copy udata for qp[%u]\n",
817 create_qp_resp.qp_num);
818 goto err_remove_mmap_entries;
819 }
820 }
821
822 ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
823
824 return 0;
825
826 err_remove_mmap_entries:
827 efa_qp_user_mmap_entries_remove(qp);
828 err_destroy_qp:
829 efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
830 err_free_mapped:
831 if (qp->rq_size)
832 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
833 qp->rq_size, DMA_TO_DEVICE);
834 err_out:
835 atomic64_inc(&dev->stats.create_qp_err);
836 return err;
837 }
838
839 static const struct {
840 int valid;
841 enum ib_qp_attr_mask req_param;
842 enum ib_qp_attr_mask opt_param;
843 } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
844 [IB_QPS_RESET] = {
845 [IB_QPS_RESET] = { .valid = 1 },
846 [IB_QPS_INIT] = {
847 .valid = 1,
848 .req_param = IB_QP_PKEY_INDEX |
849 IB_QP_PORT |
850 IB_QP_QKEY,
851 },
852 },
853 [IB_QPS_INIT] = {
854 [IB_QPS_RESET] = { .valid = 1 },
855 [IB_QPS_ERR] = { .valid = 1 },
856 [IB_QPS_INIT] = {
857 .valid = 1,
858 .opt_param = IB_QP_PKEY_INDEX |
859 IB_QP_PORT |
860 IB_QP_QKEY,
861 },
862 [IB_QPS_RTR] = {
863 .valid = 1,
864 .opt_param = IB_QP_PKEY_INDEX |
865 IB_QP_QKEY,
866 },
867 },
868 [IB_QPS_RTR] = {
869 [IB_QPS_RESET] = { .valid = 1 },
870 [IB_QPS_ERR] = { .valid = 1 },
871 [IB_QPS_RTS] = {
872 .valid = 1,
873 .req_param = IB_QP_SQ_PSN,
874 .opt_param = IB_QP_CUR_STATE |
875 IB_QP_QKEY |
876 IB_QP_RNR_RETRY,
877
878 }
879 },
880 [IB_QPS_RTS] = {
881 [IB_QPS_RESET] = { .valid = 1 },
882 [IB_QPS_ERR] = { .valid = 1 },
883 [IB_QPS_RTS] = {
884 .valid = 1,
885 .opt_param = IB_QP_CUR_STATE |
886 IB_QP_QKEY,
887 },
888 [IB_QPS_SQD] = {
889 .valid = 1,
890 .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
891 },
892 },
893 [IB_QPS_SQD] = {
894 [IB_QPS_RESET] = { .valid = 1 },
895 [IB_QPS_ERR] = { .valid = 1 },
896 [IB_QPS_RTS] = {
897 .valid = 1,
898 .opt_param = IB_QP_CUR_STATE |
899 IB_QP_QKEY,
900 },
901 [IB_QPS_SQD] = {
902 .valid = 1,
903 .opt_param = IB_QP_PKEY_INDEX |
904 IB_QP_QKEY,
905 }
906 },
907 [IB_QPS_SQE] = {
908 [IB_QPS_RESET] = { .valid = 1 },
909 [IB_QPS_ERR] = { .valid = 1 },
910 [IB_QPS_RTS] = {
911 .valid = 1,
912 .opt_param = IB_QP_CUR_STATE |
913 IB_QP_QKEY,
914 }
915 },
916 [IB_QPS_ERR] = {
917 [IB_QPS_RESET] = { .valid = 1 },
918 [IB_QPS_ERR] = { .valid = 1 },
919 }
920 };
921
efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_attr_mask mask)922 static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
923 enum ib_qp_state next_state,
924 enum ib_qp_attr_mask mask)
925 {
926 enum ib_qp_attr_mask req_param, opt_param;
927
928 if (mask & IB_QP_CUR_STATE &&
929 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
930 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
931 return false;
932
933 if (!srd_qp_state_table[cur_state][next_state].valid)
934 return false;
935
936 req_param = srd_qp_state_table[cur_state][next_state].req_param;
937 opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
938
939 if ((mask & req_param) != req_param)
940 return false;
941
942 if (mask & ~(req_param | opt_param | IB_QP_STATE))
943 return false;
944
945 return true;
946 }
947
efa_modify_qp_validate(struct efa_dev * dev,struct efa_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)948 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
949 struct ib_qp_attr *qp_attr, int qp_attr_mask,
950 enum ib_qp_state cur_state,
951 enum ib_qp_state new_state)
952 {
953 int err;
954
955 #define EFA_MODIFY_QP_SUPP_MASK \
956 (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
957 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
958 IB_QP_RNR_RETRY)
959
960 if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
961 ibdev_dbg(&dev->ibdev,
962 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
963 qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
964 return -EOPNOTSUPP;
965 }
966
967 if (qp->ibqp.qp_type == IB_QPT_DRIVER)
968 err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
969 qp_attr_mask);
970 else
971 err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
972 qp_attr_mask);
973
974 if (err) {
975 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
976 return -EINVAL;
977 }
978
979 if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
980 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
981 return -EOPNOTSUPP;
982 }
983
984 if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
985 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
986 return -EOPNOTSUPP;
987 }
988
989 return 0;
990 }
991
efa_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)992 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
993 int qp_attr_mask, struct ib_udata *udata)
994 {
995 struct efa_dev *dev = to_edev(ibqp->device);
996 struct efa_com_modify_qp_params params = {};
997 struct efa_qp *qp = to_eqp(ibqp);
998 enum ib_qp_state cur_state;
999 enum ib_qp_state new_state;
1000 int err;
1001
1002 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1003 return -EOPNOTSUPP;
1004
1005 if (udata->inlen &&
1006 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1007 ibdev_dbg(&dev->ibdev,
1008 "Incompatible ABI params, udata not cleared\n");
1009 return -EINVAL;
1010 }
1011
1012 cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
1013 qp->state;
1014 new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
1015
1016 err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
1017 new_state);
1018 if (err)
1019 return err;
1020
1021 params.qp_handle = qp->qp_handle;
1022
1023 if (qp_attr_mask & IB_QP_STATE) {
1024 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
1025 1);
1026 EFA_SET(¶ms.modify_mask,
1027 EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
1028 params.cur_qp_state = cur_state;
1029 params.qp_state = new_state;
1030 }
1031
1032 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1033 EFA_SET(¶ms.modify_mask,
1034 EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
1035 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
1036 }
1037
1038 if (qp_attr_mask & IB_QP_QKEY) {
1039 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
1040 params.qkey = qp_attr->qkey;
1041 }
1042
1043 if (qp_attr_mask & IB_QP_SQ_PSN) {
1044 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
1045 params.sq_psn = qp_attr->sq_psn;
1046 }
1047
1048 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1049 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
1050 1);
1051 params.rnr_retry = qp_attr->rnr_retry;
1052 }
1053
1054 err = efa_com_modify_qp(&dev->edev, ¶ms);
1055 if (err)
1056 return err;
1057
1058 qp->state = new_state;
1059
1060 return 0;
1061 }
1062
efa_destroy_cq_idx(struct efa_dev * dev,int cq_idx)1063 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
1064 {
1065 struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
1066
1067 return efa_com_destroy_cq(&dev->edev, ¶ms);
1068 }
1069
efa_cq_user_mmap_entries_remove(struct efa_cq * cq)1070 static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq)
1071 {
1072 rdma_user_mmap_entry_remove(cq->db_mmap_entry);
1073 rdma_user_mmap_entry_remove(cq->mmap_entry);
1074 }
1075
efa_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)1076 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1077 {
1078 struct efa_dev *dev = to_edev(ibcq->device);
1079 struct efa_cq *cq = to_ecq(ibcq);
1080
1081 ibdev_dbg(&dev->ibdev,
1082 "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
1083 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
1084
1085 efa_destroy_cq_idx(dev, cq->cq_idx);
1086 efa_cq_user_mmap_entries_remove(cq);
1087 if (cq->eq) {
1088 xa_erase(&dev->cqs_xa, cq->cq_idx);
1089 synchronize_irq(cq->eq->irq.irqn);
1090 }
1091
1092 if (cq->umem)
1093 ib_umem_release(cq->umem);
1094 else
1095 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
1096 return 0;
1097 }
1098
efa_vec2eq(struct efa_dev * dev,int vec)1099 static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec)
1100 {
1101 return &dev->eqs[vec];
1102 }
1103
cq_mmap_entries_setup(struct efa_dev * dev,struct efa_cq * cq,struct efa_ibv_create_cq_resp * resp,bool db_valid)1104 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1105 struct efa_ibv_create_cq_resp *resp,
1106 bool db_valid)
1107 {
1108 resp->q_mmap_size = cq->size;
1109 cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1110 virt_to_phys(cq->cpu_addr),
1111 cq->size, EFA_MMAP_DMA_PAGE,
1112 &resp->q_mmap_key);
1113 if (!cq->mmap_entry)
1114 return -ENOMEM;
1115
1116 if (db_valid) {
1117 cq->db_mmap_entry =
1118 efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1119 dev->db_bar_addr + resp->db_off,
1120 PAGE_SIZE, EFA_MMAP_IO_NC,
1121 &resp->db_mmap_key);
1122 if (!cq->db_mmap_entry) {
1123 rdma_user_mmap_entry_remove(cq->mmap_entry);
1124 return -ENOMEM;
1125 }
1126
1127 resp->db_off &= ~PAGE_MASK;
1128 resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF;
1129 }
1130
1131 return 0;
1132 }
1133
efa_create_cq_umem(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_umem * umem,struct uverbs_attr_bundle * attrs)1134 int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1135 struct ib_umem *umem, struct uverbs_attr_bundle *attrs)
1136 {
1137 struct ib_udata *udata = &attrs->driver_udata;
1138 struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
1139 udata, struct efa_ucontext, ibucontext);
1140 struct efa_com_create_cq_params params = {};
1141 struct efa_ibv_create_cq_resp resp = {};
1142 struct efa_com_create_cq_result result;
1143 struct ib_device *ibdev = ibcq->device;
1144 struct efa_dev *dev = to_edev(ibdev);
1145 struct efa_ibv_create_cq cmd = {};
1146 struct efa_cq *cq = to_ecq(ibcq);
1147 int entries = attr->cqe;
1148 bool set_src_addr;
1149 int err;
1150
1151 ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
1152
1153 if (attr->flags)
1154 return -EOPNOTSUPP;
1155
1156 if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
1157 ibdev_dbg(ibdev,
1158 "cq: requested entries[%u] non-positive or greater than max[%u]\n",
1159 entries, dev->dev_attr.max_cq_depth);
1160 err = -EINVAL;
1161 goto err_out;
1162 }
1163
1164 if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
1165 ibdev_dbg(ibdev,
1166 "Incompatible ABI params, no input udata\n");
1167 err = -EINVAL;
1168 goto err_out;
1169 }
1170
1171 if (udata->inlen > sizeof(cmd) &&
1172 !ib_is_udata_cleared(udata, sizeof(cmd),
1173 udata->inlen - sizeof(cmd))) {
1174 ibdev_dbg(ibdev,
1175 "Incompatible ABI params, unknown fields in udata\n");
1176 err = -EINVAL;
1177 goto err_out;
1178 }
1179
1180 err = ib_copy_from_udata(&cmd, udata,
1181 min(sizeof(cmd), udata->inlen));
1182 if (err) {
1183 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
1184 goto err_out;
1185 }
1186
1187 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) {
1188 ibdev_dbg(ibdev,
1189 "Incompatible ABI params, unknown fields in udata\n");
1190 err = -EINVAL;
1191 goto err_out;
1192 }
1193
1194 set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID);
1195 if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) &&
1196 (set_src_addr ||
1197 cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) {
1198 ibdev_dbg(ibdev,
1199 "Invalid entry size [%u]\n", cmd.cq_entry_size);
1200 err = -EINVAL;
1201 goto err_out;
1202 }
1203
1204 if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
1205 ibdev_dbg(ibdev,
1206 "Invalid number of sub cqs[%u] expected[%u]\n",
1207 cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
1208 err = -EINVAL;
1209 goto err_out;
1210 }
1211
1212 cq->ucontext = ucontext;
1213 cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
1214
1215 if (umem) {
1216 if (umem->length < cq->size) {
1217 ibdev_dbg(&dev->ibdev, "External memory too small\n");
1218 err = -EINVAL;
1219 goto err_free_mem;
1220 }
1221
1222 if (!ib_umem_is_contiguous(umem)) {
1223 ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
1224 err = -EINVAL;
1225 goto err_free_mem;
1226 }
1227
1228 cq->cpu_addr = NULL;
1229 cq->dma_addr = ib_umem_start_dma_addr(umem);
1230 cq->umem = umem;
1231 } else {
1232 cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
1233 DMA_FROM_DEVICE);
1234 if (!cq->cpu_addr) {
1235 err = -ENOMEM;
1236 goto err_out;
1237 }
1238 }
1239
1240 params.uarn = cq->ucontext->uarn;
1241 params.sub_cq_depth = entries;
1242 params.dma_addr = cq->dma_addr;
1243 params.entry_size_in_bytes = cmd.cq_entry_size;
1244 params.num_sub_cqs = cmd.num_sub_cqs;
1245 params.set_src_addr = set_src_addr;
1246 if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
1247 cq->eq = efa_vec2eq(dev, attr->comp_vector);
1248 params.eqn = cq->eq->eeq.eqn;
1249 params.interrupt_mode_enabled = true;
1250 }
1251
1252 err = efa_com_create_cq(&dev->edev, ¶ms, &result);
1253 if (err)
1254 goto err_free_mem;
1255
1256 resp.db_off = result.db_off;
1257 resp.cq_idx = result.cq_idx;
1258 cq->cq_idx = result.cq_idx;
1259 cq->ibcq.cqe = result.actual_depth;
1260 WARN_ON_ONCE(entries != result.actual_depth);
1261
1262 if (!umem)
1263 err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
1264
1265 if (err) {
1266 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
1267 cq->cq_idx);
1268 goto err_destroy_cq;
1269 }
1270
1271 if (cq->eq) {
1272 err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL));
1273 if (err) {
1274 ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n",
1275 cq->cq_idx);
1276 goto err_remove_mmap;
1277 }
1278 }
1279
1280 if (udata->outlen) {
1281 err = ib_copy_to_udata(udata, &resp,
1282 min(sizeof(resp), udata->outlen));
1283 if (err) {
1284 ibdev_dbg(ibdev,
1285 "Failed to copy udata for create_cq\n");
1286 goto err_xa_erase;
1287 }
1288 }
1289
1290 ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1291 cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1292
1293 return 0;
1294
1295 err_xa_erase:
1296 if (cq->eq)
1297 xa_erase(&dev->cqs_xa, cq->cq_idx);
1298 err_remove_mmap:
1299 efa_cq_user_mmap_entries_remove(cq);
1300 err_destroy_cq:
1301 efa_destroy_cq_idx(dev, cq->cq_idx);
1302 err_free_mem:
1303 if (umem)
1304 ib_umem_release(umem);
1305 else
1306 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
1307
1308 err_out:
1309 atomic64_inc(&dev->stats.create_cq_err);
1310 return err;
1311 }
1312
efa_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)1313 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1314 struct uverbs_attr_bundle *attrs)
1315 {
1316 return efa_create_cq_umem(ibcq, attr, NULL, attrs);
1317 }
1318
umem_to_page_list(struct efa_dev * dev,struct ib_umem * umem,u64 * page_list,u32 hp_cnt,u8 hp_shift)1319 static int umem_to_page_list(struct efa_dev *dev,
1320 struct ib_umem *umem,
1321 u64 *page_list,
1322 u32 hp_cnt,
1323 u8 hp_shift)
1324 {
1325 u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1326 struct ib_block_iter biter;
1327 unsigned int hp_idx = 0;
1328
1329 ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1330 hp_cnt, pages_in_hp);
1331
1332 rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
1333 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1334
1335 return 0;
1336 }
1337
efa_vmalloc_buf_to_sg(u64 * buf,int page_cnt)1338 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1339 {
1340 struct scatterlist *sglist;
1341 struct page *pg;
1342 int i;
1343
1344 sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL);
1345 if (!sglist)
1346 return NULL;
1347 sg_init_table(sglist, page_cnt);
1348 for (i = 0; i < page_cnt; i++) {
1349 pg = vmalloc_to_page(buf);
1350 if (!pg)
1351 goto err;
1352 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1353 buf += PAGE_SIZE / sizeof(*buf);
1354 }
1355 return sglist;
1356
1357 err:
1358 kfree(sglist);
1359 return NULL;
1360 }
1361
1362 /*
1363 * create a chunk list of physical pages dma addresses from the supplied
1364 * scatter gather list
1365 */
pbl_chunk_list_create(struct efa_dev * dev,struct pbl_context * pbl)1366 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1367 {
1368 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1369 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1370 struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1371 unsigned int chunk_list_size, chunk_idx, payload_idx;
1372 int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1373 struct efa_com_ctrl_buff_info *ctrl_buf;
1374 u64 *cur_chunk_buf, *prev_chunk_buf;
1375 struct ib_block_iter biter;
1376 dma_addr_t dma_addr;
1377 int i;
1378
1379 /* allocate a chunk list that consists of 4KB chunks */
1380 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1381
1382 chunk_list->size = chunk_list_size;
1383 chunk_list->chunks = kcalloc(chunk_list_size,
1384 sizeof(*chunk_list->chunks),
1385 GFP_KERNEL);
1386 if (!chunk_list->chunks)
1387 return -ENOMEM;
1388
1389 ibdev_dbg(&dev->ibdev,
1390 "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1391 page_cnt);
1392
1393 /* allocate chunk buffers: */
1394 for (i = 0; i < chunk_list_size; i++) {
1395 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1396 if (!chunk_list->chunks[i].buf)
1397 goto chunk_list_dealloc;
1398
1399 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1400 }
1401 chunk_list->chunks[chunk_list_size - 1].length =
1402 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1403 EFA_CHUNK_PTR_SIZE;
1404
1405 /* fill the dma addresses of sg list pages to chunks: */
1406 chunk_idx = 0;
1407 payload_idx = 0;
1408 cur_chunk_buf = chunk_list->chunks[0].buf;
1409 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1410 EFA_CHUNK_PAYLOAD_SIZE) {
1411 cur_chunk_buf[payload_idx++] =
1412 rdma_block_iter_dma_address(&biter);
1413
1414 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1415 chunk_idx++;
1416 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1417 payload_idx = 0;
1418 }
1419 }
1420
1421 /* map chunks to dma and fill chunks next ptrs */
1422 for (i = chunk_list_size - 1; i >= 0; i--) {
1423 dma_addr = dma_map_single(&dev->pdev->dev,
1424 chunk_list->chunks[i].buf,
1425 chunk_list->chunks[i].length,
1426 DMA_TO_DEVICE);
1427 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1428 ibdev_err(&dev->ibdev,
1429 "chunk[%u] dma_map_failed\n", i);
1430 goto chunk_list_unmap;
1431 }
1432
1433 chunk_list->chunks[i].dma_addr = dma_addr;
1434 ibdev_dbg(&dev->ibdev,
1435 "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1436
1437 if (!i)
1438 break;
1439
1440 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1441
1442 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1443 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1444 ctrl_buf->length = chunk_list->chunks[i].length;
1445
1446 efa_com_set_dma_addr(dma_addr,
1447 &ctrl_buf->address.mem_addr_high,
1448 &ctrl_buf->address.mem_addr_low);
1449 }
1450
1451 return 0;
1452
1453 chunk_list_unmap:
1454 for (; i < chunk_list_size; i++) {
1455 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1456 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1457 }
1458 chunk_list_dealloc:
1459 for (i = 0; i < chunk_list_size; i++)
1460 kfree(chunk_list->chunks[i].buf);
1461
1462 kfree(chunk_list->chunks);
1463 return -ENOMEM;
1464 }
1465
pbl_chunk_list_destroy(struct efa_dev * dev,struct pbl_context * pbl)1466 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1467 {
1468 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1469 int i;
1470
1471 for (i = 0; i < chunk_list->size; i++) {
1472 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1473 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1474 kfree(chunk_list->chunks[i].buf);
1475 }
1476
1477 kfree(chunk_list->chunks);
1478 }
1479
1480 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
pbl_continuous_initialize(struct efa_dev * dev,struct pbl_context * pbl)1481 static int pbl_continuous_initialize(struct efa_dev *dev,
1482 struct pbl_context *pbl)
1483 {
1484 dma_addr_t dma_addr;
1485
1486 dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1487 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1488 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1489 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1490 return -ENOMEM;
1491 }
1492
1493 pbl->phys.continuous.dma_addr = dma_addr;
1494 ibdev_dbg(&dev->ibdev,
1495 "pbl continuous - dma_addr = %pad, size[%u]\n",
1496 &dma_addr, pbl->pbl_buf_size_in_bytes);
1497
1498 return 0;
1499 }
1500
1501 /*
1502 * initialize pbl indirect mode:
1503 * create a chunk list out of the dma addresses of the physical pages of
1504 * pbl buffer.
1505 */
pbl_indirect_initialize(struct efa_dev * dev,struct pbl_context * pbl)1506 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1507 {
1508 u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
1509 struct scatterlist *sgl;
1510 int sg_dma_cnt, err;
1511
1512 BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1513 sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1514 if (!sgl)
1515 return -ENOMEM;
1516
1517 sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1518 if (!sg_dma_cnt) {
1519 err = -EINVAL;
1520 goto err_map;
1521 }
1522
1523 pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1524 pbl->phys.indirect.sgl = sgl;
1525 pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1526 err = pbl_chunk_list_create(dev, pbl);
1527 if (err) {
1528 ibdev_dbg(&dev->ibdev,
1529 "chunk_list creation failed[%d]\n", err);
1530 goto err_chunk;
1531 }
1532
1533 ibdev_dbg(&dev->ibdev,
1534 "pbl indirect - size[%u], chunks[%u]\n",
1535 pbl->pbl_buf_size_in_bytes,
1536 pbl->phys.indirect.chunk_list.size);
1537
1538 return 0;
1539
1540 err_chunk:
1541 dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1542 err_map:
1543 kfree(sgl);
1544 return err;
1545 }
1546
pbl_indirect_terminate(struct efa_dev * dev,struct pbl_context * pbl)1547 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1548 {
1549 pbl_chunk_list_destroy(dev, pbl);
1550 dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1551 pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1552 kfree(pbl->phys.indirect.sgl);
1553 }
1554
1555 /* create a page buffer list from a mapped user memory region */
pbl_create(struct efa_dev * dev,struct pbl_context * pbl,struct ib_umem * umem,int hp_cnt,u8 hp_shift)1556 static int pbl_create(struct efa_dev *dev,
1557 struct pbl_context *pbl,
1558 struct ib_umem *umem,
1559 int hp_cnt,
1560 u8 hp_shift)
1561 {
1562 int err;
1563
1564 pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1565 pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1566 if (!pbl->pbl_buf)
1567 return -ENOMEM;
1568
1569 if (is_vmalloc_addr(pbl->pbl_buf)) {
1570 pbl->physically_continuous = 0;
1571 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1572 hp_shift);
1573 if (err)
1574 goto err_free;
1575
1576 err = pbl_indirect_initialize(dev, pbl);
1577 if (err)
1578 goto err_free;
1579 } else {
1580 pbl->physically_continuous = 1;
1581 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1582 hp_shift);
1583 if (err)
1584 goto err_free;
1585
1586 err = pbl_continuous_initialize(dev, pbl);
1587 if (err)
1588 goto err_free;
1589 }
1590
1591 ibdev_dbg(&dev->ibdev,
1592 "user_pbl_created: user_pages[%u], continuous[%u]\n",
1593 hp_cnt, pbl->physically_continuous);
1594
1595 return 0;
1596
1597 err_free:
1598 kvfree(pbl->pbl_buf);
1599 return err;
1600 }
1601
pbl_destroy(struct efa_dev * dev,struct pbl_context * pbl)1602 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1603 {
1604 if (pbl->physically_continuous)
1605 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1606 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1607 else
1608 pbl_indirect_terminate(dev, pbl);
1609
1610 kvfree(pbl->pbl_buf);
1611 }
1612
efa_create_inline_pbl(struct efa_dev * dev,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1613 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1614 struct efa_com_reg_mr_params *params)
1615 {
1616 int err;
1617
1618 params->inline_pbl = 1;
1619 err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1620 params->page_num, params->page_shift);
1621 if (err)
1622 return err;
1623
1624 ibdev_dbg(&dev->ibdev,
1625 "inline_pbl_array - pages[%u]\n", params->page_num);
1626
1627 return 0;
1628 }
1629
efa_create_pbl(struct efa_dev * dev,struct pbl_context * pbl,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1630 static int efa_create_pbl(struct efa_dev *dev,
1631 struct pbl_context *pbl,
1632 struct efa_mr *mr,
1633 struct efa_com_reg_mr_params *params)
1634 {
1635 int err;
1636
1637 err = pbl_create(dev, pbl, mr->umem, params->page_num,
1638 params->page_shift);
1639 if (err) {
1640 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1641 return err;
1642 }
1643
1644 params->inline_pbl = 0;
1645 params->indirect = !pbl->physically_continuous;
1646 if (pbl->physically_continuous) {
1647 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1648
1649 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1650 ¶ms->pbl.pbl.address.mem_addr_high,
1651 ¶ms->pbl.pbl.address.mem_addr_low);
1652 } else {
1653 params->pbl.pbl.length =
1654 pbl->phys.indirect.chunk_list.chunks[0].length;
1655
1656 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1657 ¶ms->pbl.pbl.address.mem_addr_high,
1658 ¶ms->pbl.pbl.address.mem_addr_low);
1659 }
1660
1661 return 0;
1662 }
1663
efa_alloc_mr(struct ib_pd * ibpd,int access_flags,struct ib_udata * udata)1664 static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags,
1665 struct ib_udata *udata)
1666 {
1667 struct efa_dev *dev = to_edev(ibpd->device);
1668 int supp_access_flags;
1669 struct efa_mr *mr;
1670
1671 if (udata && udata->inlen &&
1672 !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1673 ibdev_dbg(&dev->ibdev,
1674 "Incompatible ABI params, udata not cleared\n");
1675 return ERR_PTR(-EINVAL);
1676 }
1677
1678 supp_access_flags =
1679 IB_ACCESS_LOCAL_WRITE |
1680 (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) |
1681 (EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0);
1682
1683 access_flags &= ~IB_ACCESS_OPTIONAL;
1684 if (access_flags & ~supp_access_flags) {
1685 ibdev_dbg(&dev->ibdev,
1686 "Unsupported access flags[%#x], supported[%#x]\n",
1687 access_flags, supp_access_flags);
1688 return ERR_PTR(-EOPNOTSUPP);
1689 }
1690
1691 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1692 if (!mr)
1693 return ERR_PTR(-ENOMEM);
1694
1695 return mr;
1696 }
1697
efa_register_mr(struct ib_pd * ibpd,struct efa_mr * mr,u64 start,u64 length,u64 virt_addr,int access_flags)1698 static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
1699 u64 length, u64 virt_addr, int access_flags)
1700 {
1701 struct efa_dev *dev = to_edev(ibpd->device);
1702 struct efa_com_reg_mr_params params = {};
1703 struct efa_com_reg_mr_result result = {};
1704 struct pbl_context pbl;
1705 unsigned int pg_sz;
1706 int inline_size;
1707 int err;
1708
1709 params.pd = to_epd(ibpd)->pdn;
1710 params.iova = virt_addr;
1711 params.mr_length_in_bytes = length;
1712 params.permissions = access_flags;
1713
1714 pg_sz = ib_umem_find_best_pgsz(mr->umem,
1715 dev->dev_attr.page_size_cap,
1716 virt_addr);
1717 if (!pg_sz) {
1718 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1719 dev->dev_attr.page_size_cap);
1720 return -EOPNOTSUPP;
1721 }
1722
1723 params.page_shift = order_base_2(pg_sz);
1724 params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
1725
1726 ibdev_dbg(&dev->ibdev,
1727 "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1728 start, length, params.page_shift, params.page_num);
1729
1730 inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1731 if (params.page_num <= inline_size) {
1732 err = efa_create_inline_pbl(dev, mr, ¶ms);
1733 if (err)
1734 return err;
1735
1736 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1737 if (err)
1738 return err;
1739 } else {
1740 err = efa_create_pbl(dev, &pbl, mr, ¶ms);
1741 if (err)
1742 return err;
1743
1744 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1745 pbl_destroy(dev, &pbl);
1746
1747 if (err)
1748 return err;
1749 }
1750
1751 mr->ibmr.lkey = result.l_key;
1752 mr->ibmr.rkey = result.r_key;
1753 mr->ibmr.length = length;
1754 mr->ic_info.recv_ic_id = result.ic_info.recv_ic_id;
1755 mr->ic_info.rdma_read_ic_id = result.ic_info.rdma_read_ic_id;
1756 mr->ic_info.rdma_recv_ic_id = result.ic_info.rdma_recv_ic_id;
1757 mr->ic_info.recv_ic_id_valid = result.ic_info.recv_ic_id_valid;
1758 mr->ic_info.rdma_read_ic_id_valid = result.ic_info.rdma_read_ic_id_valid;
1759 mr->ic_info.rdma_recv_ic_id_valid = result.ic_info.rdma_recv_ic_id_valid;
1760 ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1761
1762 return 0;
1763 }
1764
efa_reg_user_mr_dmabuf(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int fd,int access_flags,struct ib_dmah * dmah,struct uverbs_attr_bundle * attrs)1765 struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
1766 u64 length, u64 virt_addr,
1767 int fd, int access_flags,
1768 struct ib_dmah *dmah,
1769 struct uverbs_attr_bundle *attrs)
1770 {
1771 struct efa_dev *dev = to_edev(ibpd->device);
1772 struct ib_umem_dmabuf *umem_dmabuf;
1773 struct efa_mr *mr;
1774 int err;
1775
1776 if (dmah) {
1777 err = -EOPNOTSUPP;
1778 goto err_out;
1779 }
1780
1781 mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata);
1782 if (IS_ERR(mr)) {
1783 err = PTR_ERR(mr);
1784 goto err_out;
1785 }
1786
1787 umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd,
1788 access_flags);
1789 if (IS_ERR(umem_dmabuf)) {
1790 err = PTR_ERR(umem_dmabuf);
1791 ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
1792 goto err_free;
1793 }
1794
1795 mr->umem = &umem_dmabuf->umem;
1796 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
1797 if (err)
1798 goto err_release;
1799
1800 return &mr->ibmr;
1801
1802 err_release:
1803 ib_umem_release(mr->umem);
1804 err_free:
1805 kfree(mr);
1806 err_out:
1807 atomic64_inc(&dev->stats.reg_mr_err);
1808 return ERR_PTR(err);
1809 }
1810
efa_reg_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_dmah * dmah,struct ib_udata * udata)1811 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1812 u64 virt_addr, int access_flags,
1813 struct ib_dmah *dmah,
1814 struct ib_udata *udata)
1815 {
1816 struct efa_dev *dev = to_edev(ibpd->device);
1817 struct efa_mr *mr;
1818 int err;
1819
1820 if (dmah) {
1821 err = -EOPNOTSUPP;
1822 goto err_out;
1823 }
1824
1825 mr = efa_alloc_mr(ibpd, access_flags, udata);
1826 if (IS_ERR(mr)) {
1827 err = PTR_ERR(mr);
1828 goto err_out;
1829 }
1830
1831 mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1832 if (IS_ERR(mr->umem)) {
1833 err = PTR_ERR(mr->umem);
1834 ibdev_dbg(&dev->ibdev,
1835 "Failed to pin and map user space memory[%d]\n", err);
1836 goto err_free;
1837 }
1838
1839 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
1840 if (err)
1841 goto err_release;
1842
1843 return &mr->ibmr;
1844
1845 err_release:
1846 ib_umem_release(mr->umem);
1847 err_free:
1848 kfree(mr);
1849 err_out:
1850 atomic64_inc(&dev->stats.reg_mr_err);
1851 return ERR_PTR(err);
1852 }
1853
UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)1854 static int UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)(struct uverbs_attr_bundle *attrs)
1855 {
1856 struct ib_mr *ibmr = uverbs_attr_get_obj(attrs, EFA_IB_ATTR_QUERY_MR_HANDLE);
1857 struct efa_mr *mr = to_emr(ibmr);
1858 u16 ic_id_validity = 0;
1859 int ret;
1860
1861 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
1862 &mr->ic_info.recv_ic_id, sizeof(mr->ic_info.recv_ic_id));
1863 if (ret)
1864 return ret;
1865
1866 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
1867 &mr->ic_info.rdma_read_ic_id, sizeof(mr->ic_info.rdma_read_ic_id));
1868 if (ret)
1869 return ret;
1870
1871 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
1872 &mr->ic_info.rdma_recv_ic_id, sizeof(mr->ic_info.rdma_recv_ic_id));
1873 if (ret)
1874 return ret;
1875
1876 if (mr->ic_info.recv_ic_id_valid)
1877 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RECV_IC_ID;
1878 if (mr->ic_info.rdma_read_ic_id_valid)
1879 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID;
1880 if (mr->ic_info.rdma_recv_ic_id_valid)
1881 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID;
1882
1883 return uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
1884 &ic_id_validity, sizeof(ic_id_validity));
1885 }
1886
efa_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1887 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1888 {
1889 struct efa_dev *dev = to_edev(ibmr->device);
1890 struct efa_com_dereg_mr_params params;
1891 struct efa_mr *mr = to_emr(ibmr);
1892 int err;
1893
1894 ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1895
1896 params.l_key = mr->ibmr.lkey;
1897 err = efa_com_dereg_mr(&dev->edev, ¶ms);
1898 if (err)
1899 return err;
1900
1901 ib_umem_release(mr->umem);
1902 kfree(mr);
1903
1904 return 0;
1905 }
1906
efa_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)1907 int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
1908 struct ib_port_immutable *immutable)
1909 {
1910 struct ib_port_attr attr;
1911 int err;
1912
1913 err = ib_query_port(ibdev, port_num, &attr);
1914 if (err) {
1915 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1916 return err;
1917 }
1918
1919 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1920 immutable->gid_tbl_len = attr.gid_tbl_len;
1921
1922 return 0;
1923 }
1924
efa_dealloc_uar(struct efa_dev * dev,u16 uarn)1925 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1926 {
1927 struct efa_com_dealloc_uar_params params = {
1928 .uarn = uarn,
1929 };
1930
1931 return efa_com_dealloc_uar(&dev->edev, ¶ms);
1932 }
1933
1934 #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
1935 (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
1936 NULL : #_attr)
1937
efa_user_comp_handshake(const struct ib_ucontext * ibucontext,const struct efa_ibv_alloc_ucontext_cmd * cmd)1938 static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
1939 const struct efa_ibv_alloc_ucontext_cmd *cmd)
1940 {
1941 struct efa_dev *dev = to_edev(ibucontext->device);
1942 char *attr_str;
1943
1944 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
1945 EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
1946 goto err;
1947
1948 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
1949 EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
1950 attr_str))
1951 goto err;
1952
1953 return 0;
1954
1955 err:
1956 ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1957 attr_str);
1958 return -EOPNOTSUPP;
1959 }
1960
efa_alloc_ucontext(struct ib_ucontext * ibucontext,struct ib_udata * udata)1961 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1962 {
1963 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1964 struct efa_dev *dev = to_edev(ibucontext->device);
1965 struct efa_ibv_alloc_ucontext_resp resp = {};
1966 struct efa_ibv_alloc_ucontext_cmd cmd = {};
1967 struct efa_com_alloc_uar_result result;
1968 int err;
1969
1970 /*
1971 * it's fine if the driver does not know all request fields,
1972 * we will ack input fields in our response.
1973 */
1974
1975 err = ib_copy_from_udata(&cmd, udata,
1976 min(sizeof(cmd), udata->inlen));
1977 if (err) {
1978 ibdev_dbg(&dev->ibdev,
1979 "Cannot copy udata for alloc_ucontext\n");
1980 goto err_out;
1981 }
1982
1983 err = efa_user_comp_handshake(ibucontext, &cmd);
1984 if (err)
1985 goto err_out;
1986
1987 err = efa_com_alloc_uar(&dev->edev, &result);
1988 if (err)
1989 goto err_out;
1990
1991 ucontext->uarn = result.uarn;
1992
1993 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1994 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1995 resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1996 resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1997 resp.max_llq_size = dev->dev_attr.max_llq_size;
1998 resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1999 resp.min_sq_wr = dev->dev_attr.min_sq_depth;
2000
2001 err = ib_copy_to_udata(udata, &resp,
2002 min(sizeof(resp), udata->outlen));
2003 if (err)
2004 goto err_dealloc_uar;
2005
2006 return 0;
2007
2008 err_dealloc_uar:
2009 efa_dealloc_uar(dev, result.uarn);
2010 err_out:
2011 atomic64_inc(&dev->stats.alloc_ucontext_err);
2012 return err;
2013 }
2014
efa_dealloc_ucontext(struct ib_ucontext * ibucontext)2015 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
2016 {
2017 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
2018 struct efa_dev *dev = to_edev(ibucontext->device);
2019
2020 efa_dealloc_uar(dev, ucontext->uarn);
2021 }
2022
efa_mmap_free(struct rdma_user_mmap_entry * rdma_entry)2023 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
2024 {
2025 struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
2026
2027 kfree(entry);
2028 }
2029
__efa_mmap(struct efa_dev * dev,struct efa_ucontext * ucontext,struct vm_area_struct * vma)2030 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
2031 struct vm_area_struct *vma)
2032 {
2033 struct rdma_user_mmap_entry *rdma_entry;
2034 struct efa_user_mmap_entry *entry;
2035 unsigned long va;
2036 int err = 0;
2037 u64 pfn;
2038
2039 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
2040 if (!rdma_entry) {
2041 ibdev_dbg(&dev->ibdev,
2042 "pgoff[%#lx] does not have valid entry\n",
2043 vma->vm_pgoff);
2044 atomic64_inc(&dev->stats.mmap_err);
2045 return -EINVAL;
2046 }
2047 entry = to_emmap(rdma_entry);
2048
2049 ibdev_dbg(&dev->ibdev,
2050 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
2051 entry->address, rdma_entry->npages * PAGE_SIZE,
2052 entry->mmap_flag);
2053
2054 pfn = entry->address >> PAGE_SHIFT;
2055 switch (entry->mmap_flag) {
2056 case EFA_MMAP_IO_NC:
2057 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
2058 entry->rdma_entry.npages * PAGE_SIZE,
2059 pgprot_noncached(vma->vm_page_prot),
2060 rdma_entry);
2061 break;
2062 case EFA_MMAP_IO_WC:
2063 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
2064 entry->rdma_entry.npages * PAGE_SIZE,
2065 pgprot_writecombine(vma->vm_page_prot),
2066 rdma_entry);
2067 break;
2068 case EFA_MMAP_DMA_PAGE:
2069 for (va = vma->vm_start; va < vma->vm_end;
2070 va += PAGE_SIZE, pfn++) {
2071 err = vm_insert_page(vma, va, pfn_to_page(pfn));
2072 if (err)
2073 break;
2074 }
2075 break;
2076 default:
2077 err = -EINVAL;
2078 }
2079
2080 if (err) {
2081 ibdev_dbg(
2082 &dev->ibdev,
2083 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
2084 entry->address, rdma_entry->npages * PAGE_SIZE,
2085 entry->mmap_flag, err);
2086 atomic64_inc(&dev->stats.mmap_err);
2087 }
2088
2089 rdma_user_mmap_entry_put(rdma_entry);
2090 return err;
2091 }
2092
efa_mmap(struct ib_ucontext * ibucontext,struct vm_area_struct * vma)2093 int efa_mmap(struct ib_ucontext *ibucontext,
2094 struct vm_area_struct *vma)
2095 {
2096 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
2097 struct efa_dev *dev = to_edev(ibucontext->device);
2098 size_t length = vma->vm_end - vma->vm_start;
2099
2100 ibdev_dbg(&dev->ibdev,
2101 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
2102 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
2103
2104 return __efa_mmap(dev, ucontext, vma);
2105 }
2106
efa_ah_destroy(struct efa_dev * dev,struct efa_ah * ah)2107 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
2108 {
2109 struct efa_com_destroy_ah_params params = {
2110 .ah = ah->ah,
2111 .pdn = to_epd(ah->ibah.pd)->pdn,
2112 };
2113
2114 return efa_com_destroy_ah(&dev->edev, ¶ms);
2115 }
2116
efa_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)2117 int efa_create_ah(struct ib_ah *ibah,
2118 struct rdma_ah_init_attr *init_attr,
2119 struct ib_udata *udata)
2120 {
2121 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
2122 struct efa_dev *dev = to_edev(ibah->device);
2123 struct efa_com_create_ah_params params = {};
2124 struct efa_ibv_create_ah_resp resp = {};
2125 struct efa_com_create_ah_result result;
2126 struct efa_ah *ah = to_eah(ibah);
2127 int err;
2128
2129 if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
2130 ibdev_dbg(&dev->ibdev,
2131 "Create address handle is not supported in atomic context\n");
2132 err = -EOPNOTSUPP;
2133 goto err_out;
2134 }
2135
2136 if (udata->inlen &&
2137 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
2138 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
2139 err = -EINVAL;
2140 goto err_out;
2141 }
2142
2143 memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
2144 sizeof(params.dest_addr));
2145 params.pdn = to_epd(ibah->pd)->pdn;
2146 err = efa_com_create_ah(&dev->edev, ¶ms, &result);
2147 if (err)
2148 goto err_out;
2149
2150 memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
2151 ah->ah = result.ah;
2152
2153 resp.efa_address_handle = result.ah;
2154
2155 if (udata->outlen) {
2156 err = ib_copy_to_udata(udata, &resp,
2157 min(sizeof(resp), udata->outlen));
2158 if (err) {
2159 ibdev_dbg(&dev->ibdev,
2160 "Failed to copy udata for create_ah response\n");
2161 goto err_destroy_ah;
2162 }
2163 }
2164 ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
2165
2166 return 0;
2167
2168 err_destroy_ah:
2169 efa_ah_destroy(dev, ah);
2170 err_out:
2171 atomic64_inc(&dev->stats.create_ah_err);
2172 return err;
2173 }
2174
efa_destroy_ah(struct ib_ah * ibah,u32 flags)2175 int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
2176 {
2177 struct efa_dev *dev = to_edev(ibah->pd->device);
2178 struct efa_ah *ah = to_eah(ibah);
2179
2180 ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
2181
2182 if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
2183 ibdev_dbg(&dev->ibdev,
2184 "Destroy address handle is not supported in atomic context\n");
2185 return -EOPNOTSUPP;
2186 }
2187
2188 efa_ah_destroy(dev, ah);
2189 return 0;
2190 }
2191
efa_alloc_hw_port_stats(struct ib_device * ibdev,u32 port_num)2192 struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
2193 u32 port_num)
2194 {
2195 return rdma_alloc_hw_stats_struct(efa_port_stats_descs,
2196 ARRAY_SIZE(efa_port_stats_descs),
2197 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2198 }
2199
efa_alloc_hw_device_stats(struct ib_device * ibdev)2200 struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
2201 {
2202 return rdma_alloc_hw_stats_struct(efa_device_stats_descs,
2203 ARRAY_SIZE(efa_device_stats_descs),
2204 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2205 }
2206
efa_fill_device_stats(struct efa_dev * dev,struct rdma_hw_stats * stats)2207 static int efa_fill_device_stats(struct efa_dev *dev,
2208 struct rdma_hw_stats *stats)
2209 {
2210 struct efa_com_stats_admin *as = &dev->edev.aq.stats;
2211 struct efa_stats *s = &dev->stats;
2212
2213 stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
2214 stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
2215 stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
2216 stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
2217
2218 stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
2219 stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
2220 stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
2221 stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
2222 stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
2223 stats->value[EFA_ALLOC_UCONTEXT_ERR] =
2224 atomic64_read(&s->alloc_ucontext_err);
2225 stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
2226 stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
2227
2228 return ARRAY_SIZE(efa_device_stats_descs);
2229 }
2230
efa_fill_port_stats(struct efa_dev * dev,struct rdma_hw_stats * stats,u32 port_num)2231 static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
2232 u32 port_num)
2233 {
2234 struct efa_com_get_stats_params params = {};
2235 union efa_com_get_stats_result result;
2236 struct efa_com_rdma_write_stats *rws;
2237 struct efa_com_rdma_read_stats *rrs;
2238 struct efa_com_messages_stats *ms;
2239 struct efa_com_network_stats *ns;
2240 struct efa_com_basic_stats *bs;
2241 int err;
2242
2243 params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
2244 params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
2245
2246 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2247 if (err)
2248 return err;
2249
2250 bs = &result.basic_stats;
2251 stats->value[EFA_TX_BYTES] = bs->tx_bytes;
2252 stats->value[EFA_TX_PKTS] = bs->tx_pkts;
2253 stats->value[EFA_RX_BYTES] = bs->rx_bytes;
2254 stats->value[EFA_RX_PKTS] = bs->rx_pkts;
2255 stats->value[EFA_RX_DROPS] = bs->rx_drops;
2256
2257 params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
2258 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2259 if (err)
2260 return err;
2261
2262 ms = &result.messages_stats;
2263 stats->value[EFA_SEND_BYTES] = ms->send_bytes;
2264 stats->value[EFA_SEND_WRS] = ms->send_wrs;
2265 stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
2266 stats->value[EFA_RECV_WRS] = ms->recv_wrs;
2267
2268 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
2269 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2270 if (err)
2271 return err;
2272
2273 rrs = &result.rdma_read_stats;
2274 stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
2275 stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
2276 stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
2277 stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
2278
2279 if (EFA_DEV_CAP(dev, RDMA_WRITE)) {
2280 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE;
2281 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2282 if (err)
2283 return err;
2284
2285 rws = &result.rdma_write_stats;
2286 stats->value[EFA_RDMA_WRITE_WRS] = rws->write_wrs;
2287 stats->value[EFA_RDMA_WRITE_BYTES] = rws->write_bytes;
2288 stats->value[EFA_RDMA_WRITE_WR_ERR] = rws->write_wr_err;
2289 stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes;
2290 }
2291
2292 params.type = EFA_ADMIN_GET_STATS_TYPE_NETWORK;
2293 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2294 if (err)
2295 return err;
2296
2297 ns = &result.network_stats;
2298 stats->value[EFA_RETRANS_BYTES] = ns->retrans_bytes;
2299 stats->value[EFA_RETRANS_PKTS] = ns->retrans_pkts;
2300 stats->value[EFA_RETRANS_TIMEOUT_EVENS] = ns->retrans_timeout_events;
2301 stats->value[EFA_UNRESPONSIVE_REMOTE_EVENTS] = ns->unresponsive_remote_events;
2302 stats->value[EFA_IMPAIRED_REMOTE_CONN_EVENTS] = ns->impaired_remote_conn_events;
2303
2304 return ARRAY_SIZE(efa_port_stats_descs);
2305 }
2306
efa_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u32 port_num,int index)2307 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
2308 u32 port_num, int index)
2309 {
2310 if (port_num)
2311 return efa_fill_port_stats(to_edev(ibdev), stats, port_num);
2312 else
2313 return efa_fill_device_stats(to_edev(ibdev), stats);
2314 }
2315
efa_port_link_layer(struct ib_device * ibdev,u32 port_num)2316 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
2317 u32 port_num)
2318 {
2319 return IB_LINK_LAYER_UNSPECIFIED;
2320 }
2321
2322 DECLARE_UVERBS_NAMED_METHOD(EFA_IB_METHOD_MR_QUERY,
2323 UVERBS_ATTR_IDR(EFA_IB_ATTR_QUERY_MR_HANDLE,
2324 UVERBS_OBJECT_MR,
2325 UVERBS_ACCESS_READ,
2326 UA_MANDATORY),
2327 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
2328 UVERBS_ATTR_TYPE(u16),
2329 UA_MANDATORY),
2330 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
2331 UVERBS_ATTR_TYPE(u16),
2332 UA_MANDATORY),
2333 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
2334 UVERBS_ATTR_TYPE(u16),
2335 UA_MANDATORY),
2336 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
2337 UVERBS_ATTR_TYPE(u16),
2338 UA_MANDATORY));
2339
2340 ADD_UVERBS_METHODS(efa_mr,
2341 UVERBS_OBJECT_MR,
2342 &UVERBS_METHOD(EFA_IB_METHOD_MR_QUERY));
2343
2344 const struct uapi_definition efa_uapi_defs[] = {
2345 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR,
2346 &efa_mr),
2347 {},
2348 };
2349