1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #include <linux/vmalloc.h>
7 #include <linux/log2.h>
8
9 #include <rdma/ib_addr.h>
10 #include <rdma/ib_umem.h>
11 #include <rdma/ib_user_verbs.h>
12 #include <rdma/ib_verbs.h>
13 #include <rdma/uverbs_ioctl.h>
14
15 #include "efa.h"
16
17 enum {
18 EFA_MMAP_DMA_PAGE = 0,
19 EFA_MMAP_IO_WC,
20 EFA_MMAP_IO_NC,
21 };
22
23 #define EFA_AENQ_ENABLED_GROUPS \
24 (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
25 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
26
27 struct efa_user_mmap_entry {
28 struct rdma_user_mmap_entry rdma_entry;
29 u64 address;
30 u8 mmap_flag;
31 };
32
33 #define EFA_DEFINE_STATS(op) \
34 op(EFA_TX_BYTES, "tx_bytes") \
35 op(EFA_TX_PKTS, "tx_pkts") \
36 op(EFA_RX_BYTES, "rx_bytes") \
37 op(EFA_RX_PKTS, "rx_pkts") \
38 op(EFA_RX_DROPS, "rx_drops") \
39 op(EFA_SEND_BYTES, "send_bytes") \
40 op(EFA_SEND_WRS, "send_wrs") \
41 op(EFA_RECV_BYTES, "recv_bytes") \
42 op(EFA_RECV_WRS, "recv_wrs") \
43 op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
44 op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
45 op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
46 op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
47 op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
48 op(EFA_COMPLETED_CMDS, "completed_cmds") \
49 op(EFA_CMDS_ERR, "cmds_err") \
50 op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
51 op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
52 op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
53 op(EFA_CREATE_QP_ERR, "create_qp_err") \
54 op(EFA_CREATE_CQ_ERR, "create_cq_err") \
55 op(EFA_REG_MR_ERR, "reg_mr_err") \
56 op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
57 op(EFA_CREATE_AH_ERR, "create_ah_err") \
58 op(EFA_MMAP_ERR, "mmap_err")
59
60 #define EFA_STATS_ENUM(ename, name) ename,
61 #define EFA_STATS_STR(ename, name) [ename] = name,
62
63 enum efa_hw_stats {
64 EFA_DEFINE_STATS(EFA_STATS_ENUM)
65 };
66
67 static const char *const efa_stats_names[] = {
68 EFA_DEFINE_STATS(EFA_STATS_STR)
69 };
70
71 #define EFA_CHUNK_PAYLOAD_SHIFT 12
72 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
73 #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
74
75 #define EFA_CHUNK_SHIFT 12
76 #define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
77 #define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
78
79 #define EFA_PTRS_PER_CHUNK \
80 ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
81
82 #define EFA_CHUNK_USED_SIZE \
83 ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
84
85 struct pbl_chunk {
86 dma_addr_t dma_addr;
87 u64 *buf;
88 u32 length;
89 };
90
91 struct pbl_chunk_list {
92 struct pbl_chunk *chunks;
93 unsigned int size;
94 };
95
96 struct pbl_context {
97 union {
98 struct {
99 dma_addr_t dma_addr;
100 } continuous;
101 struct {
102 u32 pbl_buf_size_in_pages;
103 struct scatterlist *sgl;
104 int sg_dma_cnt;
105 struct pbl_chunk_list chunk_list;
106 } indirect;
107 } phys;
108 u64 *pbl_buf;
109 u32 pbl_buf_size_in_bytes;
110 u8 physically_continuous;
111 };
112
to_edev(struct ib_device * ibdev)113 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
114 {
115 return container_of(ibdev, struct efa_dev, ibdev);
116 }
117
to_eucontext(struct ib_ucontext * ibucontext)118 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
119 {
120 return container_of(ibucontext, struct efa_ucontext, ibucontext);
121 }
122
to_epd(struct ib_pd * ibpd)123 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
124 {
125 return container_of(ibpd, struct efa_pd, ibpd);
126 }
127
to_emr(struct ib_mr * ibmr)128 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
129 {
130 return container_of(ibmr, struct efa_mr, ibmr);
131 }
132
to_eqp(struct ib_qp * ibqp)133 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
134 {
135 return container_of(ibqp, struct efa_qp, ibqp);
136 }
137
to_ecq(struct ib_cq * ibcq)138 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
139 {
140 return container_of(ibcq, struct efa_cq, ibcq);
141 }
142
to_eah(struct ib_ah * ibah)143 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
144 {
145 return container_of(ibah, struct efa_ah, ibah);
146 }
147
148 static inline struct efa_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry * rdma_entry)149 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
150 {
151 return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
152 }
153
154 #define EFA_DEV_CAP(dev, cap) \
155 ((dev)->dev_attr.device_caps & \
156 EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
157
158 #define is_reserved_cleared(reserved) \
159 !memchr_inv(reserved, 0, sizeof(reserved))
160
efa_zalloc_mapped(struct efa_dev * dev,dma_addr_t * dma_addr,size_t size,enum dma_data_direction dir)161 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
162 size_t size, enum dma_data_direction dir)
163 {
164 void *addr;
165
166 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
167 if (!addr)
168 return NULL;
169
170 *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
171 if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
172 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
173 free_pages_exact(addr, size);
174 return NULL;
175 }
176
177 return addr;
178 }
179
efa_free_mapped(struct efa_dev * dev,void * cpu_addr,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)180 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
181 dma_addr_t dma_addr,
182 size_t size, enum dma_data_direction dir)
183 {
184 dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
185 free_pages_exact(cpu_addr, size);
186 }
187
efa_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)188 int efa_query_device(struct ib_device *ibdev,
189 struct ib_device_attr *props,
190 struct ib_udata *udata)
191 {
192 struct efa_com_get_device_attr_result *dev_attr;
193 struct efa_ibv_ex_query_device_resp resp = {};
194 struct efa_dev *dev = to_edev(ibdev);
195 int err;
196
197 if (udata && udata->inlen &&
198 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
199 ibdev_dbg(ibdev,
200 "Incompatible ABI params, udata not cleared\n");
201 return -EINVAL;
202 }
203
204 dev_attr = &dev->dev_attr;
205
206 memset(props, 0, sizeof(*props));
207 props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
208 props->page_size_cap = dev_attr->page_size_cap;
209 props->vendor_id = dev->pdev->vendor;
210 props->vendor_part_id = dev->pdev->device;
211 props->hw_ver = dev->pdev->subsystem_device;
212 props->max_qp = dev_attr->max_qp;
213 props->max_cq = dev_attr->max_cq;
214 props->max_pd = dev_attr->max_pd;
215 props->max_mr = dev_attr->max_mr;
216 props->max_ah = dev_attr->max_ah;
217 props->max_cqe = dev_attr->max_cq_depth;
218 props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
219 dev_attr->max_rq_depth);
220 props->max_send_sge = dev_attr->max_sq_sge;
221 props->max_recv_sge = dev_attr->max_rq_sge;
222 props->max_sge_rd = dev_attr->max_wr_rdma_sge;
223 props->max_pkeys = 1;
224
225 if (udata && udata->outlen) {
226 resp.max_sq_sge = dev_attr->max_sq_sge;
227 resp.max_rq_sge = dev_attr->max_rq_sge;
228 resp.max_sq_wr = dev_attr->max_sq_depth;
229 resp.max_rq_wr = dev_attr->max_rq_depth;
230 resp.max_rdma_size = dev_attr->max_rdma_size;
231
232 if (EFA_DEV_CAP(dev, RDMA_READ))
233 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
234
235 if (EFA_DEV_CAP(dev, RNR_RETRY))
236 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
237
238 err = ib_copy_to_udata(udata, &resp,
239 min(sizeof(resp), udata->outlen));
240 if (err) {
241 ibdev_dbg(ibdev,
242 "Failed to copy udata for query_device\n");
243 return err;
244 }
245 }
246
247 return 0;
248 }
249
efa_query_port(struct ib_device * ibdev,u8 port,struct ib_port_attr * props)250 int efa_query_port(struct ib_device *ibdev, u8 port,
251 struct ib_port_attr *props)
252 {
253 struct efa_dev *dev = to_edev(ibdev);
254
255 props->lmc = 1;
256
257 props->state = IB_PORT_ACTIVE;
258 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
259 props->gid_tbl_len = 1;
260 props->pkey_tbl_len = 1;
261 props->active_speed = IB_SPEED_EDR;
262 props->active_width = IB_WIDTH_4X;
263 props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
264 props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
265 props->max_msg_sz = dev->dev_attr.mtu;
266 props->max_vl_num = 1;
267
268 return 0;
269 }
270
efa_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)271 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
272 int qp_attr_mask,
273 struct ib_qp_init_attr *qp_init_attr)
274 {
275 struct efa_dev *dev = to_edev(ibqp->device);
276 struct efa_com_query_qp_params params = {};
277 struct efa_com_query_qp_result result;
278 struct efa_qp *qp = to_eqp(ibqp);
279 int err;
280
281 #define EFA_QUERY_QP_SUPP_MASK \
282 (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
283 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
284
285 if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
286 ibdev_dbg(&dev->ibdev,
287 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
288 qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
289 return -EOPNOTSUPP;
290 }
291
292 memset(qp_attr, 0, sizeof(*qp_attr));
293 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
294
295 params.qp_handle = qp->qp_handle;
296 err = efa_com_query_qp(&dev->edev, ¶ms, &result);
297 if (err)
298 return err;
299
300 qp_attr->qp_state = result.qp_state;
301 qp_attr->qkey = result.qkey;
302 qp_attr->sq_psn = result.sq_psn;
303 qp_attr->sq_draining = result.sq_draining;
304 qp_attr->port_num = 1;
305 qp_attr->rnr_retry = result.rnr_retry;
306
307 qp_attr->cap.max_send_wr = qp->max_send_wr;
308 qp_attr->cap.max_recv_wr = qp->max_recv_wr;
309 qp_attr->cap.max_send_sge = qp->max_send_sge;
310 qp_attr->cap.max_recv_sge = qp->max_recv_sge;
311 qp_attr->cap.max_inline_data = qp->max_inline_data;
312
313 qp_init_attr->qp_type = ibqp->qp_type;
314 qp_init_attr->recv_cq = ibqp->recv_cq;
315 qp_init_attr->send_cq = ibqp->send_cq;
316 qp_init_attr->qp_context = ibqp->qp_context;
317 qp_init_attr->cap = qp_attr->cap;
318
319 return 0;
320 }
321
efa_query_gid(struct ib_device * ibdev,u8 port,int index,union ib_gid * gid)322 int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
323 union ib_gid *gid)
324 {
325 struct efa_dev *dev = to_edev(ibdev);
326
327 memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
328
329 return 0;
330 }
331
efa_query_pkey(struct ib_device * ibdev,u8 port,u16 index,u16 * pkey)332 int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
333 u16 *pkey)
334 {
335 if (index > 0)
336 return -EINVAL;
337
338 *pkey = 0xffff;
339 return 0;
340 }
341
efa_pd_dealloc(struct efa_dev * dev,u16 pdn)342 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
343 {
344 struct efa_com_dealloc_pd_params params = {
345 .pdn = pdn,
346 };
347
348 return efa_com_dealloc_pd(&dev->edev, ¶ms);
349 }
350
efa_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)351 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
352 {
353 struct efa_dev *dev = to_edev(ibpd->device);
354 struct efa_ibv_alloc_pd_resp resp = {};
355 struct efa_com_alloc_pd_result result;
356 struct efa_pd *pd = to_epd(ibpd);
357 int err;
358
359 if (udata->inlen &&
360 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
361 ibdev_dbg(&dev->ibdev,
362 "Incompatible ABI params, udata not cleared\n");
363 err = -EINVAL;
364 goto err_out;
365 }
366
367 err = efa_com_alloc_pd(&dev->edev, &result);
368 if (err)
369 goto err_out;
370
371 pd->pdn = result.pdn;
372 resp.pdn = result.pdn;
373
374 if (udata->outlen) {
375 err = ib_copy_to_udata(udata, &resp,
376 min(sizeof(resp), udata->outlen));
377 if (err) {
378 ibdev_dbg(&dev->ibdev,
379 "Failed to copy udata for alloc_pd\n");
380 goto err_dealloc_pd;
381 }
382 }
383
384 ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
385
386 return 0;
387
388 err_dealloc_pd:
389 efa_pd_dealloc(dev, result.pdn);
390 err_out:
391 atomic64_inc(&dev->stats.alloc_pd_err);
392 return err;
393 }
394
efa_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)395 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
396 {
397 struct efa_dev *dev = to_edev(ibpd->device);
398 struct efa_pd *pd = to_epd(ibpd);
399
400 ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
401 efa_pd_dealloc(dev, pd->pdn);
402 return 0;
403 }
404
efa_destroy_qp_handle(struct efa_dev * dev,u32 qp_handle)405 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
406 {
407 struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
408
409 return efa_com_destroy_qp(&dev->edev, ¶ms);
410 }
411
efa_qp_user_mmap_entries_remove(struct efa_qp * qp)412 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
413 {
414 rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
415 rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
416 rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
417 rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
418 }
419
efa_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)420 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
421 {
422 struct efa_dev *dev = to_edev(ibqp->pd->device);
423 struct efa_qp *qp = to_eqp(ibqp);
424 int err;
425
426 ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
427
428 efa_qp_user_mmap_entries_remove(qp);
429
430 err = efa_destroy_qp_handle(dev, qp->qp_handle);
431 if (err)
432 return err;
433
434 if (qp->rq_cpu_addr) {
435 ibdev_dbg(&dev->ibdev,
436 "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
437 qp->rq_cpu_addr, qp->rq_size,
438 &qp->rq_dma_addr);
439 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
440 qp->rq_size, DMA_TO_DEVICE);
441 }
442
443 kfree(qp);
444 return 0;
445 }
446
447 static struct rdma_user_mmap_entry*
efa_user_mmap_entry_insert(struct ib_ucontext * ucontext,u64 address,size_t length,u8 mmap_flag,u64 * offset)448 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
449 u64 address, size_t length,
450 u8 mmap_flag, u64 *offset)
451 {
452 struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
453 int err;
454
455 if (!entry)
456 return NULL;
457
458 entry->address = address;
459 entry->mmap_flag = mmap_flag;
460
461 err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
462 length);
463 if (err) {
464 kfree(entry);
465 return NULL;
466 }
467 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
468
469 return &entry->rdma_entry;
470 }
471
qp_mmap_entries_setup(struct efa_qp * qp,struct efa_dev * dev,struct efa_ucontext * ucontext,struct efa_com_create_qp_params * params,struct efa_ibv_create_qp_resp * resp)472 static int qp_mmap_entries_setup(struct efa_qp *qp,
473 struct efa_dev *dev,
474 struct efa_ucontext *ucontext,
475 struct efa_com_create_qp_params *params,
476 struct efa_ibv_create_qp_resp *resp)
477 {
478 size_t length;
479 u64 address;
480
481 address = dev->db_bar_addr + resp->sq_db_offset;
482 qp->sq_db_mmap_entry =
483 efa_user_mmap_entry_insert(&ucontext->ibucontext,
484 address,
485 PAGE_SIZE, EFA_MMAP_IO_NC,
486 &resp->sq_db_mmap_key);
487 if (!qp->sq_db_mmap_entry)
488 return -ENOMEM;
489
490 resp->sq_db_offset &= ~PAGE_MASK;
491
492 address = dev->mem_bar_addr + resp->llq_desc_offset;
493 length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
494 (resp->llq_desc_offset & ~PAGE_MASK));
495
496 qp->llq_desc_mmap_entry =
497 efa_user_mmap_entry_insert(&ucontext->ibucontext,
498 address, length,
499 EFA_MMAP_IO_WC,
500 &resp->llq_desc_mmap_key);
501 if (!qp->llq_desc_mmap_entry)
502 goto err_remove_mmap;
503
504 resp->llq_desc_offset &= ~PAGE_MASK;
505
506 if (qp->rq_size) {
507 address = dev->db_bar_addr + resp->rq_db_offset;
508
509 qp->rq_db_mmap_entry =
510 efa_user_mmap_entry_insert(&ucontext->ibucontext,
511 address, PAGE_SIZE,
512 EFA_MMAP_IO_NC,
513 &resp->rq_db_mmap_key);
514 if (!qp->rq_db_mmap_entry)
515 goto err_remove_mmap;
516
517 resp->rq_db_offset &= ~PAGE_MASK;
518
519 address = virt_to_phys(qp->rq_cpu_addr);
520 qp->rq_mmap_entry =
521 efa_user_mmap_entry_insert(&ucontext->ibucontext,
522 address, qp->rq_size,
523 EFA_MMAP_DMA_PAGE,
524 &resp->rq_mmap_key);
525 if (!qp->rq_mmap_entry)
526 goto err_remove_mmap;
527
528 resp->rq_mmap_size = qp->rq_size;
529 }
530
531 return 0;
532
533 err_remove_mmap:
534 efa_qp_user_mmap_entries_remove(qp);
535
536 return -ENOMEM;
537 }
538
efa_qp_validate_cap(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)539 static int efa_qp_validate_cap(struct efa_dev *dev,
540 struct ib_qp_init_attr *init_attr)
541 {
542 if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
543 ibdev_dbg(&dev->ibdev,
544 "qp: requested send wr[%u] exceeds the max[%u]\n",
545 init_attr->cap.max_send_wr,
546 dev->dev_attr.max_sq_depth);
547 return -EINVAL;
548 }
549 if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
550 ibdev_dbg(&dev->ibdev,
551 "qp: requested receive wr[%u] exceeds the max[%u]\n",
552 init_attr->cap.max_recv_wr,
553 dev->dev_attr.max_rq_depth);
554 return -EINVAL;
555 }
556 if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
557 ibdev_dbg(&dev->ibdev,
558 "qp: requested sge send[%u] exceeds the max[%u]\n",
559 init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
560 return -EINVAL;
561 }
562 if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
563 ibdev_dbg(&dev->ibdev,
564 "qp: requested sge recv[%u] exceeds the max[%u]\n",
565 init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
566 return -EINVAL;
567 }
568 if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
569 ibdev_dbg(&dev->ibdev,
570 "qp: requested inline data[%u] exceeds the max[%u]\n",
571 init_attr->cap.max_inline_data,
572 dev->dev_attr.inline_buf_size);
573 return -EINVAL;
574 }
575
576 return 0;
577 }
578
efa_qp_validate_attr(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)579 static int efa_qp_validate_attr(struct efa_dev *dev,
580 struct ib_qp_init_attr *init_attr)
581 {
582 if (init_attr->qp_type != IB_QPT_DRIVER &&
583 init_attr->qp_type != IB_QPT_UD) {
584 ibdev_dbg(&dev->ibdev,
585 "Unsupported qp type %d\n", init_attr->qp_type);
586 return -EOPNOTSUPP;
587 }
588
589 if (init_attr->srq) {
590 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
591 return -EOPNOTSUPP;
592 }
593
594 if (init_attr->create_flags) {
595 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
596 return -EOPNOTSUPP;
597 }
598
599 return 0;
600 }
601
efa_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)602 struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
603 struct ib_qp_init_attr *init_attr,
604 struct ib_udata *udata)
605 {
606 struct efa_com_create_qp_params create_qp_params = {};
607 struct efa_com_create_qp_result create_qp_resp;
608 struct efa_dev *dev = to_edev(ibpd->device);
609 struct efa_ibv_create_qp_resp resp = {};
610 struct efa_ibv_create_qp cmd = {};
611 struct efa_ucontext *ucontext;
612 struct efa_qp *qp;
613 int err;
614
615 ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
616 ibucontext);
617
618 err = efa_qp_validate_cap(dev, init_attr);
619 if (err)
620 goto err_out;
621
622 err = efa_qp_validate_attr(dev, init_attr);
623 if (err)
624 goto err_out;
625
626 if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
627 ibdev_dbg(&dev->ibdev,
628 "Incompatible ABI params, no input udata\n");
629 err = -EINVAL;
630 goto err_out;
631 }
632
633 if (udata->inlen > sizeof(cmd) &&
634 !ib_is_udata_cleared(udata, sizeof(cmd),
635 udata->inlen - sizeof(cmd))) {
636 ibdev_dbg(&dev->ibdev,
637 "Incompatible ABI params, unknown fields in udata\n");
638 err = -EINVAL;
639 goto err_out;
640 }
641
642 err = ib_copy_from_udata(&cmd, udata,
643 min(sizeof(cmd), udata->inlen));
644 if (err) {
645 ibdev_dbg(&dev->ibdev,
646 "Cannot copy udata for create_qp\n");
647 goto err_out;
648 }
649
650 if (cmd.comp_mask) {
651 ibdev_dbg(&dev->ibdev,
652 "Incompatible ABI params, unknown fields in udata\n");
653 err = -EINVAL;
654 goto err_out;
655 }
656
657 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
658 if (!qp) {
659 err = -ENOMEM;
660 goto err_out;
661 }
662
663 create_qp_params.uarn = ucontext->uarn;
664 create_qp_params.pd = to_epd(ibpd)->pdn;
665
666 if (init_attr->qp_type == IB_QPT_UD) {
667 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
668 } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
669 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
670 } else {
671 ibdev_dbg(&dev->ibdev,
672 "Unsupported qp type %d driver qp type %d\n",
673 init_attr->qp_type, cmd.driver_qp_type);
674 err = -EOPNOTSUPP;
675 goto err_free_qp;
676 }
677
678 ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
679 init_attr->qp_type, cmd.driver_qp_type);
680 create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
681 create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
682 create_qp_params.sq_depth = init_attr->cap.max_send_wr;
683 create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
684
685 create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
686 create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
687 qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
688 if (qp->rq_size) {
689 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
690 qp->rq_size, DMA_TO_DEVICE);
691 if (!qp->rq_cpu_addr) {
692 err = -ENOMEM;
693 goto err_free_qp;
694 }
695
696 ibdev_dbg(&dev->ibdev,
697 "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
698 qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
699 create_qp_params.rq_base_addr = qp->rq_dma_addr;
700 }
701
702 err = efa_com_create_qp(&dev->edev, &create_qp_params,
703 &create_qp_resp);
704 if (err)
705 goto err_free_mapped;
706
707 resp.sq_db_offset = create_qp_resp.sq_db_offset;
708 resp.rq_db_offset = create_qp_resp.rq_db_offset;
709 resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
710 resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
711 resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
712
713 err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
714 &resp);
715 if (err)
716 goto err_destroy_qp;
717
718 qp->qp_handle = create_qp_resp.qp_handle;
719 qp->ibqp.qp_num = create_qp_resp.qp_num;
720 qp->ibqp.qp_type = init_attr->qp_type;
721 qp->max_send_wr = init_attr->cap.max_send_wr;
722 qp->max_recv_wr = init_attr->cap.max_recv_wr;
723 qp->max_send_sge = init_attr->cap.max_send_sge;
724 qp->max_recv_sge = init_attr->cap.max_recv_sge;
725 qp->max_inline_data = init_attr->cap.max_inline_data;
726
727 if (udata->outlen) {
728 err = ib_copy_to_udata(udata, &resp,
729 min(sizeof(resp), udata->outlen));
730 if (err) {
731 ibdev_dbg(&dev->ibdev,
732 "Failed to copy udata for qp[%u]\n",
733 create_qp_resp.qp_num);
734 goto err_remove_mmap_entries;
735 }
736 }
737
738 ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
739
740 return &qp->ibqp;
741
742 err_remove_mmap_entries:
743 efa_qp_user_mmap_entries_remove(qp);
744 err_destroy_qp:
745 efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
746 err_free_mapped:
747 if (qp->rq_size)
748 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
749 qp->rq_size, DMA_TO_DEVICE);
750 err_free_qp:
751 kfree(qp);
752 err_out:
753 atomic64_inc(&dev->stats.create_qp_err);
754 return ERR_PTR(err);
755 }
756
757 static const struct {
758 int valid;
759 enum ib_qp_attr_mask req_param;
760 enum ib_qp_attr_mask opt_param;
761 } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
762 [IB_QPS_RESET] = {
763 [IB_QPS_RESET] = { .valid = 1 },
764 [IB_QPS_INIT] = {
765 .valid = 1,
766 .req_param = IB_QP_PKEY_INDEX |
767 IB_QP_PORT |
768 IB_QP_QKEY,
769 },
770 },
771 [IB_QPS_INIT] = {
772 [IB_QPS_RESET] = { .valid = 1 },
773 [IB_QPS_ERR] = { .valid = 1 },
774 [IB_QPS_INIT] = {
775 .valid = 1,
776 .opt_param = IB_QP_PKEY_INDEX |
777 IB_QP_PORT |
778 IB_QP_QKEY,
779 },
780 [IB_QPS_RTR] = {
781 .valid = 1,
782 .opt_param = IB_QP_PKEY_INDEX |
783 IB_QP_QKEY,
784 },
785 },
786 [IB_QPS_RTR] = {
787 [IB_QPS_RESET] = { .valid = 1 },
788 [IB_QPS_ERR] = { .valid = 1 },
789 [IB_QPS_RTS] = {
790 .valid = 1,
791 .req_param = IB_QP_SQ_PSN,
792 .opt_param = IB_QP_CUR_STATE |
793 IB_QP_QKEY |
794 IB_QP_RNR_RETRY,
795
796 }
797 },
798 [IB_QPS_RTS] = {
799 [IB_QPS_RESET] = { .valid = 1 },
800 [IB_QPS_ERR] = { .valid = 1 },
801 [IB_QPS_RTS] = {
802 .valid = 1,
803 .opt_param = IB_QP_CUR_STATE |
804 IB_QP_QKEY,
805 },
806 [IB_QPS_SQD] = {
807 .valid = 1,
808 .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
809 },
810 },
811 [IB_QPS_SQD] = {
812 [IB_QPS_RESET] = { .valid = 1 },
813 [IB_QPS_ERR] = { .valid = 1 },
814 [IB_QPS_RTS] = {
815 .valid = 1,
816 .opt_param = IB_QP_CUR_STATE |
817 IB_QP_QKEY,
818 },
819 [IB_QPS_SQD] = {
820 .valid = 1,
821 .opt_param = IB_QP_PKEY_INDEX |
822 IB_QP_QKEY,
823 }
824 },
825 [IB_QPS_SQE] = {
826 [IB_QPS_RESET] = { .valid = 1 },
827 [IB_QPS_ERR] = { .valid = 1 },
828 [IB_QPS_RTS] = {
829 .valid = 1,
830 .opt_param = IB_QP_CUR_STATE |
831 IB_QP_QKEY,
832 }
833 },
834 [IB_QPS_ERR] = {
835 [IB_QPS_RESET] = { .valid = 1 },
836 [IB_QPS_ERR] = { .valid = 1 },
837 }
838 };
839
efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_attr_mask mask)840 static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
841 enum ib_qp_state next_state,
842 enum ib_qp_attr_mask mask)
843 {
844 enum ib_qp_attr_mask req_param, opt_param;
845
846 if (mask & IB_QP_CUR_STATE &&
847 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
848 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
849 return false;
850
851 if (!srd_qp_state_table[cur_state][next_state].valid)
852 return false;
853
854 req_param = srd_qp_state_table[cur_state][next_state].req_param;
855 opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
856
857 if ((mask & req_param) != req_param)
858 return false;
859
860 if (mask & ~(req_param | opt_param | IB_QP_STATE))
861 return false;
862
863 return true;
864 }
865
efa_modify_qp_validate(struct efa_dev * dev,struct efa_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)866 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
867 struct ib_qp_attr *qp_attr, int qp_attr_mask,
868 enum ib_qp_state cur_state,
869 enum ib_qp_state new_state)
870 {
871 int err;
872
873 #define EFA_MODIFY_QP_SUPP_MASK \
874 (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
875 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
876 IB_QP_RNR_RETRY)
877
878 if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
879 ibdev_dbg(&dev->ibdev,
880 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
881 qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
882 return -EOPNOTSUPP;
883 }
884
885 if (qp->ibqp.qp_type == IB_QPT_DRIVER)
886 err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
887 qp_attr_mask);
888 else
889 err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
890 qp_attr_mask);
891
892 if (err) {
893 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
894 return -EINVAL;
895 }
896
897 if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
898 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
899 return -EOPNOTSUPP;
900 }
901
902 if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
903 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
904 return -EOPNOTSUPP;
905 }
906
907 return 0;
908 }
909
efa_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)910 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
911 int qp_attr_mask, struct ib_udata *udata)
912 {
913 struct efa_dev *dev = to_edev(ibqp->device);
914 struct efa_com_modify_qp_params params = {};
915 struct efa_qp *qp = to_eqp(ibqp);
916 enum ib_qp_state cur_state;
917 enum ib_qp_state new_state;
918 int err;
919
920 if (udata->inlen &&
921 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
922 ibdev_dbg(&dev->ibdev,
923 "Incompatible ABI params, udata not cleared\n");
924 return -EINVAL;
925 }
926
927 cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
928 qp->state;
929 new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
930
931 err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
932 new_state);
933 if (err)
934 return err;
935
936 params.qp_handle = qp->qp_handle;
937
938 if (qp_attr_mask & IB_QP_STATE) {
939 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
940 1);
941 EFA_SET(¶ms.modify_mask,
942 EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
943 params.cur_qp_state = cur_state;
944 params.qp_state = new_state;
945 }
946
947 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
948 EFA_SET(¶ms.modify_mask,
949 EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
950 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
951 }
952
953 if (qp_attr_mask & IB_QP_QKEY) {
954 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
955 params.qkey = qp_attr->qkey;
956 }
957
958 if (qp_attr_mask & IB_QP_SQ_PSN) {
959 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
960 params.sq_psn = qp_attr->sq_psn;
961 }
962
963 if (qp_attr_mask & IB_QP_RNR_RETRY) {
964 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
965 1);
966 params.rnr_retry = qp_attr->rnr_retry;
967 }
968
969 err = efa_com_modify_qp(&dev->edev, ¶ms);
970 if (err)
971 return err;
972
973 qp->state = new_state;
974
975 return 0;
976 }
977
efa_destroy_cq_idx(struct efa_dev * dev,int cq_idx)978 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
979 {
980 struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
981
982 return efa_com_destroy_cq(&dev->edev, ¶ms);
983 }
984
efa_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)985 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
986 {
987 struct efa_dev *dev = to_edev(ibcq->device);
988 struct efa_cq *cq = to_ecq(ibcq);
989
990 ibdev_dbg(&dev->ibdev,
991 "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
992 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
993
994 rdma_user_mmap_entry_remove(cq->mmap_entry);
995 efa_destroy_cq_idx(dev, cq->cq_idx);
996 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
997 DMA_FROM_DEVICE);
998 return 0;
999 }
1000
cq_mmap_entries_setup(struct efa_dev * dev,struct efa_cq * cq,struct efa_ibv_create_cq_resp * resp)1001 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1002 struct efa_ibv_create_cq_resp *resp)
1003 {
1004 resp->q_mmap_size = cq->size;
1005 cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1006 virt_to_phys(cq->cpu_addr),
1007 cq->size, EFA_MMAP_DMA_PAGE,
1008 &resp->q_mmap_key);
1009 if (!cq->mmap_entry)
1010 return -ENOMEM;
1011
1012 return 0;
1013 }
1014
efa_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)1015 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1016 struct ib_udata *udata)
1017 {
1018 struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
1019 udata, struct efa_ucontext, ibucontext);
1020 struct efa_ibv_create_cq_resp resp = {};
1021 struct efa_com_create_cq_params params;
1022 struct efa_com_create_cq_result result;
1023 struct ib_device *ibdev = ibcq->device;
1024 struct efa_dev *dev = to_edev(ibdev);
1025 struct efa_ibv_create_cq cmd = {};
1026 struct efa_cq *cq = to_ecq(ibcq);
1027 int entries = attr->cqe;
1028 int err;
1029
1030 ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
1031
1032 if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
1033 ibdev_dbg(ibdev,
1034 "cq: requested entries[%u] non-positive or greater than max[%u]\n",
1035 entries, dev->dev_attr.max_cq_depth);
1036 err = -EINVAL;
1037 goto err_out;
1038 }
1039
1040 if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
1041 ibdev_dbg(ibdev,
1042 "Incompatible ABI params, no input udata\n");
1043 err = -EINVAL;
1044 goto err_out;
1045 }
1046
1047 if (udata->inlen > sizeof(cmd) &&
1048 !ib_is_udata_cleared(udata, sizeof(cmd),
1049 udata->inlen - sizeof(cmd))) {
1050 ibdev_dbg(ibdev,
1051 "Incompatible ABI params, unknown fields in udata\n");
1052 err = -EINVAL;
1053 goto err_out;
1054 }
1055
1056 err = ib_copy_from_udata(&cmd, udata,
1057 min(sizeof(cmd), udata->inlen));
1058 if (err) {
1059 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
1060 goto err_out;
1061 }
1062
1063 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
1064 ibdev_dbg(ibdev,
1065 "Incompatible ABI params, unknown fields in udata\n");
1066 err = -EINVAL;
1067 goto err_out;
1068 }
1069
1070 if (!cmd.cq_entry_size) {
1071 ibdev_dbg(ibdev,
1072 "Invalid entry size [%u]\n", cmd.cq_entry_size);
1073 err = -EINVAL;
1074 goto err_out;
1075 }
1076
1077 if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
1078 ibdev_dbg(ibdev,
1079 "Invalid number of sub cqs[%u] expected[%u]\n",
1080 cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
1081 err = -EINVAL;
1082 goto err_out;
1083 }
1084
1085 cq->ucontext = ucontext;
1086 cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
1087 cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
1088 DMA_FROM_DEVICE);
1089 if (!cq->cpu_addr) {
1090 err = -ENOMEM;
1091 goto err_out;
1092 }
1093
1094 params.uarn = cq->ucontext->uarn;
1095 params.cq_depth = entries;
1096 params.dma_addr = cq->dma_addr;
1097 params.entry_size_in_bytes = cmd.cq_entry_size;
1098 params.num_sub_cqs = cmd.num_sub_cqs;
1099 err = efa_com_create_cq(&dev->edev, ¶ms, &result);
1100 if (err)
1101 goto err_free_mapped;
1102
1103 resp.cq_idx = result.cq_idx;
1104 cq->cq_idx = result.cq_idx;
1105 cq->ibcq.cqe = result.actual_depth;
1106 WARN_ON_ONCE(entries != result.actual_depth);
1107
1108 err = cq_mmap_entries_setup(dev, cq, &resp);
1109 if (err) {
1110 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
1111 cq->cq_idx);
1112 goto err_destroy_cq;
1113 }
1114
1115 if (udata->outlen) {
1116 err = ib_copy_to_udata(udata, &resp,
1117 min(sizeof(resp), udata->outlen));
1118 if (err) {
1119 ibdev_dbg(ibdev,
1120 "Failed to copy udata for create_cq\n");
1121 goto err_remove_mmap;
1122 }
1123 }
1124
1125 ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1126 cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1127
1128 return 0;
1129
1130 err_remove_mmap:
1131 rdma_user_mmap_entry_remove(cq->mmap_entry);
1132 err_destroy_cq:
1133 efa_destroy_cq_idx(dev, cq->cq_idx);
1134 err_free_mapped:
1135 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1136 DMA_FROM_DEVICE);
1137
1138 err_out:
1139 atomic64_inc(&dev->stats.create_cq_err);
1140 return err;
1141 }
1142
umem_to_page_list(struct efa_dev * dev,struct ib_umem * umem,u64 * page_list,u32 hp_cnt,u8 hp_shift)1143 static int umem_to_page_list(struct efa_dev *dev,
1144 struct ib_umem *umem,
1145 u64 *page_list,
1146 u32 hp_cnt,
1147 u8 hp_shift)
1148 {
1149 u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1150 struct ib_block_iter biter;
1151 unsigned int hp_idx = 0;
1152
1153 ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1154 hp_cnt, pages_in_hp);
1155
1156 rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
1157 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1158
1159 return 0;
1160 }
1161
efa_vmalloc_buf_to_sg(u64 * buf,int page_cnt)1162 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1163 {
1164 struct scatterlist *sglist;
1165 struct page *pg;
1166 int i;
1167
1168 sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL);
1169 if (!sglist)
1170 return NULL;
1171 sg_init_table(sglist, page_cnt);
1172 for (i = 0; i < page_cnt; i++) {
1173 pg = vmalloc_to_page(buf);
1174 if (!pg)
1175 goto err;
1176 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1177 buf += PAGE_SIZE / sizeof(*buf);
1178 }
1179 return sglist;
1180
1181 err:
1182 kfree(sglist);
1183 return NULL;
1184 }
1185
1186 /*
1187 * create a chunk list of physical pages dma addresses from the supplied
1188 * scatter gather list
1189 */
pbl_chunk_list_create(struct efa_dev * dev,struct pbl_context * pbl)1190 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1191 {
1192 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1193 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1194 struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1195 unsigned int chunk_list_size, chunk_idx, payload_idx;
1196 int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1197 struct efa_com_ctrl_buff_info *ctrl_buf;
1198 u64 *cur_chunk_buf, *prev_chunk_buf;
1199 struct ib_block_iter biter;
1200 dma_addr_t dma_addr;
1201 int i;
1202
1203 /* allocate a chunk list that consists of 4KB chunks */
1204 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1205
1206 chunk_list->size = chunk_list_size;
1207 chunk_list->chunks = kcalloc(chunk_list_size,
1208 sizeof(*chunk_list->chunks),
1209 GFP_KERNEL);
1210 if (!chunk_list->chunks)
1211 return -ENOMEM;
1212
1213 ibdev_dbg(&dev->ibdev,
1214 "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1215 page_cnt);
1216
1217 /* allocate chunk buffers: */
1218 for (i = 0; i < chunk_list_size; i++) {
1219 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1220 if (!chunk_list->chunks[i].buf)
1221 goto chunk_list_dealloc;
1222
1223 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1224 }
1225 chunk_list->chunks[chunk_list_size - 1].length =
1226 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1227 EFA_CHUNK_PTR_SIZE;
1228
1229 /* fill the dma addresses of sg list pages to chunks: */
1230 chunk_idx = 0;
1231 payload_idx = 0;
1232 cur_chunk_buf = chunk_list->chunks[0].buf;
1233 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1234 EFA_CHUNK_PAYLOAD_SIZE) {
1235 cur_chunk_buf[payload_idx++] =
1236 rdma_block_iter_dma_address(&biter);
1237
1238 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1239 chunk_idx++;
1240 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1241 payload_idx = 0;
1242 }
1243 }
1244
1245 /* map chunks to dma and fill chunks next ptrs */
1246 for (i = chunk_list_size - 1; i >= 0; i--) {
1247 dma_addr = dma_map_single(&dev->pdev->dev,
1248 chunk_list->chunks[i].buf,
1249 chunk_list->chunks[i].length,
1250 DMA_TO_DEVICE);
1251 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1252 ibdev_err(&dev->ibdev,
1253 "chunk[%u] dma_map_failed\n", i);
1254 goto chunk_list_unmap;
1255 }
1256
1257 chunk_list->chunks[i].dma_addr = dma_addr;
1258 ibdev_dbg(&dev->ibdev,
1259 "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1260
1261 if (!i)
1262 break;
1263
1264 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1265
1266 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1267 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1268 ctrl_buf->length = chunk_list->chunks[i].length;
1269
1270 efa_com_set_dma_addr(dma_addr,
1271 &ctrl_buf->address.mem_addr_high,
1272 &ctrl_buf->address.mem_addr_low);
1273 }
1274
1275 return 0;
1276
1277 chunk_list_unmap:
1278 for (; i < chunk_list_size; i++) {
1279 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1280 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1281 }
1282 chunk_list_dealloc:
1283 for (i = 0; i < chunk_list_size; i++)
1284 kfree(chunk_list->chunks[i].buf);
1285
1286 kfree(chunk_list->chunks);
1287 return -ENOMEM;
1288 }
1289
pbl_chunk_list_destroy(struct efa_dev * dev,struct pbl_context * pbl)1290 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1291 {
1292 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1293 int i;
1294
1295 for (i = 0; i < chunk_list->size; i++) {
1296 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1297 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1298 kfree(chunk_list->chunks[i].buf);
1299 }
1300
1301 kfree(chunk_list->chunks);
1302 }
1303
1304 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
pbl_continuous_initialize(struct efa_dev * dev,struct pbl_context * pbl)1305 static int pbl_continuous_initialize(struct efa_dev *dev,
1306 struct pbl_context *pbl)
1307 {
1308 dma_addr_t dma_addr;
1309
1310 dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1311 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1312 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1313 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1314 return -ENOMEM;
1315 }
1316
1317 pbl->phys.continuous.dma_addr = dma_addr;
1318 ibdev_dbg(&dev->ibdev,
1319 "pbl continuous - dma_addr = %pad, size[%u]\n",
1320 &dma_addr, pbl->pbl_buf_size_in_bytes);
1321
1322 return 0;
1323 }
1324
1325 /*
1326 * initialize pbl indirect mode:
1327 * create a chunk list out of the dma addresses of the physical pages of
1328 * pbl buffer.
1329 */
pbl_indirect_initialize(struct efa_dev * dev,struct pbl_context * pbl)1330 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1331 {
1332 u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1333 struct scatterlist *sgl;
1334 int sg_dma_cnt, err;
1335
1336 BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1337 sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1338 if (!sgl)
1339 return -ENOMEM;
1340
1341 sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1342 if (!sg_dma_cnt) {
1343 err = -EINVAL;
1344 goto err_map;
1345 }
1346
1347 pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1348 pbl->phys.indirect.sgl = sgl;
1349 pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1350 err = pbl_chunk_list_create(dev, pbl);
1351 if (err) {
1352 ibdev_dbg(&dev->ibdev,
1353 "chunk_list creation failed[%d]\n", err);
1354 goto err_chunk;
1355 }
1356
1357 ibdev_dbg(&dev->ibdev,
1358 "pbl indirect - size[%u], chunks[%u]\n",
1359 pbl->pbl_buf_size_in_bytes,
1360 pbl->phys.indirect.chunk_list.size);
1361
1362 return 0;
1363
1364 err_chunk:
1365 dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1366 err_map:
1367 kfree(sgl);
1368 return err;
1369 }
1370
pbl_indirect_terminate(struct efa_dev * dev,struct pbl_context * pbl)1371 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1372 {
1373 pbl_chunk_list_destroy(dev, pbl);
1374 dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1375 pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1376 kfree(pbl->phys.indirect.sgl);
1377 }
1378
1379 /* create a page buffer list from a mapped user memory region */
pbl_create(struct efa_dev * dev,struct pbl_context * pbl,struct ib_umem * umem,int hp_cnt,u8 hp_shift)1380 static int pbl_create(struct efa_dev *dev,
1381 struct pbl_context *pbl,
1382 struct ib_umem *umem,
1383 int hp_cnt,
1384 u8 hp_shift)
1385 {
1386 int err;
1387
1388 pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1389 pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1390 if (!pbl->pbl_buf)
1391 return -ENOMEM;
1392
1393 if (is_vmalloc_addr(pbl->pbl_buf)) {
1394 pbl->physically_continuous = 0;
1395 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1396 hp_shift);
1397 if (err)
1398 goto err_free;
1399
1400 err = pbl_indirect_initialize(dev, pbl);
1401 if (err)
1402 goto err_free;
1403 } else {
1404 pbl->physically_continuous = 1;
1405 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1406 hp_shift);
1407 if (err)
1408 goto err_free;
1409
1410 err = pbl_continuous_initialize(dev, pbl);
1411 if (err)
1412 goto err_free;
1413 }
1414
1415 ibdev_dbg(&dev->ibdev,
1416 "user_pbl_created: user_pages[%u], continuous[%u]\n",
1417 hp_cnt, pbl->physically_continuous);
1418
1419 return 0;
1420
1421 err_free:
1422 kvfree(pbl->pbl_buf);
1423 return err;
1424 }
1425
pbl_destroy(struct efa_dev * dev,struct pbl_context * pbl)1426 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1427 {
1428 if (pbl->physically_continuous)
1429 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1430 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1431 else
1432 pbl_indirect_terminate(dev, pbl);
1433
1434 kvfree(pbl->pbl_buf);
1435 }
1436
efa_create_inline_pbl(struct efa_dev * dev,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1437 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1438 struct efa_com_reg_mr_params *params)
1439 {
1440 int err;
1441
1442 params->inline_pbl = 1;
1443 err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1444 params->page_num, params->page_shift);
1445 if (err)
1446 return err;
1447
1448 ibdev_dbg(&dev->ibdev,
1449 "inline_pbl_array - pages[%u]\n", params->page_num);
1450
1451 return 0;
1452 }
1453
efa_create_pbl(struct efa_dev * dev,struct pbl_context * pbl,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1454 static int efa_create_pbl(struct efa_dev *dev,
1455 struct pbl_context *pbl,
1456 struct efa_mr *mr,
1457 struct efa_com_reg_mr_params *params)
1458 {
1459 int err;
1460
1461 err = pbl_create(dev, pbl, mr->umem, params->page_num,
1462 params->page_shift);
1463 if (err) {
1464 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1465 return err;
1466 }
1467
1468 params->inline_pbl = 0;
1469 params->indirect = !pbl->physically_continuous;
1470 if (pbl->physically_continuous) {
1471 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1472
1473 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1474 ¶ms->pbl.pbl.address.mem_addr_high,
1475 ¶ms->pbl.pbl.address.mem_addr_low);
1476 } else {
1477 params->pbl.pbl.length =
1478 pbl->phys.indirect.chunk_list.chunks[0].length;
1479
1480 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1481 ¶ms->pbl.pbl.address.mem_addr_high,
1482 ¶ms->pbl.pbl.address.mem_addr_low);
1483 }
1484
1485 return 0;
1486 }
1487
efa_reg_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)1488 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1489 u64 virt_addr, int access_flags,
1490 struct ib_udata *udata)
1491 {
1492 struct efa_dev *dev = to_edev(ibpd->device);
1493 struct efa_com_reg_mr_params params = {};
1494 struct efa_com_reg_mr_result result = {};
1495 struct pbl_context pbl;
1496 int supp_access_flags;
1497 unsigned int pg_sz;
1498 struct efa_mr *mr;
1499 int inline_size;
1500 int err;
1501
1502 if (udata && udata->inlen &&
1503 !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1504 ibdev_dbg(&dev->ibdev,
1505 "Incompatible ABI params, udata not cleared\n");
1506 err = -EINVAL;
1507 goto err_out;
1508 }
1509
1510 supp_access_flags =
1511 IB_ACCESS_LOCAL_WRITE |
1512 (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0);
1513
1514 access_flags &= ~IB_ACCESS_OPTIONAL;
1515 if (access_flags & ~supp_access_flags) {
1516 ibdev_dbg(&dev->ibdev,
1517 "Unsupported access flags[%#x], supported[%#x]\n",
1518 access_flags, supp_access_flags);
1519 err = -EOPNOTSUPP;
1520 goto err_out;
1521 }
1522
1523 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1524 if (!mr) {
1525 err = -ENOMEM;
1526 goto err_out;
1527 }
1528
1529 mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1530 if (IS_ERR(mr->umem)) {
1531 err = PTR_ERR(mr->umem);
1532 ibdev_dbg(&dev->ibdev,
1533 "Failed to pin and map user space memory[%d]\n", err);
1534 goto err_free;
1535 }
1536
1537 params.pd = to_epd(ibpd)->pdn;
1538 params.iova = virt_addr;
1539 params.mr_length_in_bytes = length;
1540 params.permissions = access_flags;
1541
1542 pg_sz = ib_umem_find_best_pgsz(mr->umem,
1543 dev->dev_attr.page_size_cap,
1544 virt_addr);
1545 if (!pg_sz) {
1546 err = -EOPNOTSUPP;
1547 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1548 dev->dev_attr.page_size_cap);
1549 goto err_unmap;
1550 }
1551
1552 params.page_shift = order_base_2(pg_sz);
1553 params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
1554
1555 ibdev_dbg(&dev->ibdev,
1556 "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1557 start, length, params.page_shift, params.page_num);
1558
1559 inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1560 if (params.page_num <= inline_size) {
1561 err = efa_create_inline_pbl(dev, mr, ¶ms);
1562 if (err)
1563 goto err_unmap;
1564
1565 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1566 if (err)
1567 goto err_unmap;
1568 } else {
1569 err = efa_create_pbl(dev, &pbl, mr, ¶ms);
1570 if (err)
1571 goto err_unmap;
1572
1573 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1574 pbl_destroy(dev, &pbl);
1575
1576 if (err)
1577 goto err_unmap;
1578 }
1579
1580 mr->ibmr.lkey = result.l_key;
1581 mr->ibmr.rkey = result.r_key;
1582 mr->ibmr.length = length;
1583 ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1584
1585 return &mr->ibmr;
1586
1587 err_unmap:
1588 ib_umem_release(mr->umem);
1589 err_free:
1590 kfree(mr);
1591 err_out:
1592 atomic64_inc(&dev->stats.reg_mr_err);
1593 return ERR_PTR(err);
1594 }
1595
efa_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1596 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1597 {
1598 struct efa_dev *dev = to_edev(ibmr->device);
1599 struct efa_com_dereg_mr_params params;
1600 struct efa_mr *mr = to_emr(ibmr);
1601 int err;
1602
1603 ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1604
1605 params.l_key = mr->ibmr.lkey;
1606 err = efa_com_dereg_mr(&dev->edev, ¶ms);
1607 if (err)
1608 return err;
1609
1610 ib_umem_release(mr->umem);
1611 kfree(mr);
1612
1613 return 0;
1614 }
1615
efa_get_port_immutable(struct ib_device * ibdev,u8 port_num,struct ib_port_immutable * immutable)1616 int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
1617 struct ib_port_immutable *immutable)
1618 {
1619 struct ib_port_attr attr;
1620 int err;
1621
1622 err = ib_query_port(ibdev, port_num, &attr);
1623 if (err) {
1624 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1625 return err;
1626 }
1627
1628 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1629 immutable->gid_tbl_len = attr.gid_tbl_len;
1630
1631 return 0;
1632 }
1633
efa_dealloc_uar(struct efa_dev * dev,u16 uarn)1634 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1635 {
1636 struct efa_com_dealloc_uar_params params = {
1637 .uarn = uarn,
1638 };
1639
1640 return efa_com_dealloc_uar(&dev->edev, ¶ms);
1641 }
1642
1643 #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
1644 (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
1645 NULL : #_attr)
1646
efa_user_comp_handshake(const struct ib_ucontext * ibucontext,const struct efa_ibv_alloc_ucontext_cmd * cmd)1647 static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
1648 const struct efa_ibv_alloc_ucontext_cmd *cmd)
1649 {
1650 struct efa_dev *dev = to_edev(ibucontext->device);
1651 char *attr_str;
1652
1653 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
1654 EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
1655 goto err;
1656
1657 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
1658 EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
1659 attr_str))
1660 goto err;
1661
1662 return 0;
1663
1664 err:
1665 ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1666 attr_str);
1667 return -EOPNOTSUPP;
1668 }
1669
efa_alloc_ucontext(struct ib_ucontext * ibucontext,struct ib_udata * udata)1670 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1671 {
1672 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1673 struct efa_dev *dev = to_edev(ibucontext->device);
1674 struct efa_ibv_alloc_ucontext_resp resp = {};
1675 struct efa_ibv_alloc_ucontext_cmd cmd = {};
1676 struct efa_com_alloc_uar_result result;
1677 int err;
1678
1679 /*
1680 * it's fine if the driver does not know all request fields,
1681 * we will ack input fields in our response.
1682 */
1683
1684 err = ib_copy_from_udata(&cmd, udata,
1685 min(sizeof(cmd), udata->inlen));
1686 if (err) {
1687 ibdev_dbg(&dev->ibdev,
1688 "Cannot copy udata for alloc_ucontext\n");
1689 goto err_out;
1690 }
1691
1692 err = efa_user_comp_handshake(ibucontext, &cmd);
1693 if (err)
1694 goto err_out;
1695
1696 err = efa_com_alloc_uar(&dev->edev, &result);
1697 if (err)
1698 goto err_out;
1699
1700 ucontext->uarn = result.uarn;
1701
1702 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1703 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1704 resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1705 resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1706 resp.max_llq_size = dev->dev_attr.max_llq_size;
1707 resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1708 resp.min_sq_wr = dev->dev_attr.min_sq_depth;
1709
1710 err = ib_copy_to_udata(udata, &resp,
1711 min(sizeof(resp), udata->outlen));
1712 if (err)
1713 goto err_dealloc_uar;
1714
1715 return 0;
1716
1717 err_dealloc_uar:
1718 efa_dealloc_uar(dev, result.uarn);
1719 err_out:
1720 atomic64_inc(&dev->stats.alloc_ucontext_err);
1721 return err;
1722 }
1723
efa_dealloc_ucontext(struct ib_ucontext * ibucontext)1724 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1725 {
1726 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1727 struct efa_dev *dev = to_edev(ibucontext->device);
1728
1729 efa_dealloc_uar(dev, ucontext->uarn);
1730 }
1731
efa_mmap_free(struct rdma_user_mmap_entry * rdma_entry)1732 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1733 {
1734 struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1735
1736 kfree(entry);
1737 }
1738
__efa_mmap(struct efa_dev * dev,struct efa_ucontext * ucontext,struct vm_area_struct * vma)1739 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1740 struct vm_area_struct *vma)
1741 {
1742 struct rdma_user_mmap_entry *rdma_entry;
1743 struct efa_user_mmap_entry *entry;
1744 unsigned long va;
1745 int err = 0;
1746 u64 pfn;
1747
1748 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1749 if (!rdma_entry) {
1750 ibdev_dbg(&dev->ibdev,
1751 "pgoff[%#lx] does not have valid entry\n",
1752 vma->vm_pgoff);
1753 atomic64_inc(&dev->stats.mmap_err);
1754 return -EINVAL;
1755 }
1756 entry = to_emmap(rdma_entry);
1757
1758 ibdev_dbg(&dev->ibdev,
1759 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1760 entry->address, rdma_entry->npages * PAGE_SIZE,
1761 entry->mmap_flag);
1762
1763 pfn = entry->address >> PAGE_SHIFT;
1764 switch (entry->mmap_flag) {
1765 case EFA_MMAP_IO_NC:
1766 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1767 entry->rdma_entry.npages * PAGE_SIZE,
1768 pgprot_noncached(vma->vm_page_prot),
1769 rdma_entry);
1770 break;
1771 case EFA_MMAP_IO_WC:
1772 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1773 entry->rdma_entry.npages * PAGE_SIZE,
1774 pgprot_writecombine(vma->vm_page_prot),
1775 rdma_entry);
1776 break;
1777 case EFA_MMAP_DMA_PAGE:
1778 for (va = vma->vm_start; va < vma->vm_end;
1779 va += PAGE_SIZE, pfn++) {
1780 err = vm_insert_page(vma, va, pfn_to_page(pfn));
1781 if (err)
1782 break;
1783 }
1784 break;
1785 default:
1786 err = -EINVAL;
1787 }
1788
1789 if (err) {
1790 ibdev_dbg(
1791 &dev->ibdev,
1792 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1793 entry->address, rdma_entry->npages * PAGE_SIZE,
1794 entry->mmap_flag, err);
1795 atomic64_inc(&dev->stats.mmap_err);
1796 }
1797
1798 rdma_user_mmap_entry_put(rdma_entry);
1799 return err;
1800 }
1801
efa_mmap(struct ib_ucontext * ibucontext,struct vm_area_struct * vma)1802 int efa_mmap(struct ib_ucontext *ibucontext,
1803 struct vm_area_struct *vma)
1804 {
1805 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1806 struct efa_dev *dev = to_edev(ibucontext->device);
1807 size_t length = vma->vm_end - vma->vm_start;
1808
1809 ibdev_dbg(&dev->ibdev,
1810 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1811 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1812
1813 return __efa_mmap(dev, ucontext, vma);
1814 }
1815
efa_ah_destroy(struct efa_dev * dev,struct efa_ah * ah)1816 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1817 {
1818 struct efa_com_destroy_ah_params params = {
1819 .ah = ah->ah,
1820 .pdn = to_epd(ah->ibah.pd)->pdn,
1821 };
1822
1823 return efa_com_destroy_ah(&dev->edev, ¶ms);
1824 }
1825
efa_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)1826 int efa_create_ah(struct ib_ah *ibah,
1827 struct rdma_ah_init_attr *init_attr,
1828 struct ib_udata *udata)
1829 {
1830 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
1831 struct efa_dev *dev = to_edev(ibah->device);
1832 struct efa_com_create_ah_params params = {};
1833 struct efa_ibv_create_ah_resp resp = {};
1834 struct efa_com_create_ah_result result;
1835 struct efa_ah *ah = to_eah(ibah);
1836 int err;
1837
1838 if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
1839 ibdev_dbg(&dev->ibdev,
1840 "Create address handle is not supported in atomic context\n");
1841 err = -EOPNOTSUPP;
1842 goto err_out;
1843 }
1844
1845 if (udata->inlen &&
1846 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1847 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1848 err = -EINVAL;
1849 goto err_out;
1850 }
1851
1852 memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1853 sizeof(params.dest_addr));
1854 params.pdn = to_epd(ibah->pd)->pdn;
1855 err = efa_com_create_ah(&dev->edev, ¶ms, &result);
1856 if (err)
1857 goto err_out;
1858
1859 memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1860 ah->ah = result.ah;
1861
1862 resp.efa_address_handle = result.ah;
1863
1864 if (udata->outlen) {
1865 err = ib_copy_to_udata(udata, &resp,
1866 min(sizeof(resp), udata->outlen));
1867 if (err) {
1868 ibdev_dbg(&dev->ibdev,
1869 "Failed to copy udata for create_ah response\n");
1870 goto err_destroy_ah;
1871 }
1872 }
1873 ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1874
1875 return 0;
1876
1877 err_destroy_ah:
1878 efa_ah_destroy(dev, ah);
1879 err_out:
1880 atomic64_inc(&dev->stats.create_ah_err);
1881 return err;
1882 }
1883
efa_destroy_ah(struct ib_ah * ibah,u32 flags)1884 int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1885 {
1886 struct efa_dev *dev = to_edev(ibah->pd->device);
1887 struct efa_ah *ah = to_eah(ibah);
1888
1889 ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1890
1891 if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1892 ibdev_dbg(&dev->ibdev,
1893 "Destroy address handle is not supported in atomic context\n");
1894 return -EOPNOTSUPP;
1895 }
1896
1897 efa_ah_destroy(dev, ah);
1898 return 0;
1899 }
1900
efa_alloc_hw_stats(struct ib_device * ibdev,u8 port_num)1901 struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
1902 {
1903 return rdma_alloc_hw_stats_struct(efa_stats_names,
1904 ARRAY_SIZE(efa_stats_names),
1905 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1906 }
1907
efa_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u8 port_num,int index)1908 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1909 u8 port_num, int index)
1910 {
1911 struct efa_com_get_stats_params params = {};
1912 union efa_com_get_stats_result result;
1913 struct efa_dev *dev = to_edev(ibdev);
1914 struct efa_com_rdma_read_stats *rrs;
1915 struct efa_com_messages_stats *ms;
1916 struct efa_com_basic_stats *bs;
1917 struct efa_com_stats_admin *as;
1918 struct efa_stats *s;
1919 int err;
1920
1921 params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
1922 params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
1923
1924 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
1925 if (err)
1926 return err;
1927
1928 bs = &result.basic_stats;
1929 stats->value[EFA_TX_BYTES] = bs->tx_bytes;
1930 stats->value[EFA_TX_PKTS] = bs->tx_pkts;
1931 stats->value[EFA_RX_BYTES] = bs->rx_bytes;
1932 stats->value[EFA_RX_PKTS] = bs->rx_pkts;
1933 stats->value[EFA_RX_DROPS] = bs->rx_drops;
1934
1935 params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
1936 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
1937 if (err)
1938 return err;
1939
1940 ms = &result.messages_stats;
1941 stats->value[EFA_SEND_BYTES] = ms->send_bytes;
1942 stats->value[EFA_SEND_WRS] = ms->send_wrs;
1943 stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
1944 stats->value[EFA_RECV_WRS] = ms->recv_wrs;
1945
1946 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
1947 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
1948 if (err)
1949 return err;
1950
1951 rrs = &result.rdma_read_stats;
1952 stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
1953 stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
1954 stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
1955 stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
1956
1957 as = &dev->edev.aq.stats;
1958 stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
1959 stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
1960 stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
1961 stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
1962
1963 s = &dev->stats;
1964 stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
1965 stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
1966 stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
1967 stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
1968 stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
1969 stats->value[EFA_ALLOC_UCONTEXT_ERR] =
1970 atomic64_read(&s->alloc_ucontext_err);
1971 stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
1972 stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
1973
1974 return ARRAY_SIZE(efa_stats_names);
1975 }
1976
efa_port_link_layer(struct ib_device * ibdev,u8 port_num)1977 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
1978 u8 port_num)
1979 {
1980 return IB_LINK_LAYER_UNSPECIFIED;
1981 }
1982
1983