Lines Matching +full:no +full:- +full:memory +full:- +full:wc

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
11 * of arbitrarily-sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
19 * Memory Region is invalidated using a LOCAL_INV Work Request
32 * send lock, just as ->send_request does. This prevents frwr_map and
51 struct rpc_rdma_cid *cid = &mr->mr_cid; in frwr_cid_init()
53 cid->ci_queue_id = ep->re_attr.send_cq->res.id; in frwr_cid_init()
54 cid->ci_completion_id = mr->mr_ibmr->res.id; in frwr_cid_init()
59 if (mr->mr_device) { in frwr_mr_unmap()
61 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents, in frwr_mr_unmap()
62 mr->mr_dir); in frwr_mr_unmap()
63 mr->mr_device = NULL; in frwr_mr_unmap()
68 * frwr_mr_release - Destroy one MR
76 frwr_mr_unmap(mr->mr_xprt, mr); in frwr_mr_release()
78 rc = ib_dereg_mr(mr->mr_ibmr); in frwr_mr_release()
81 kfree(mr->mr_sg); in frwr_mr_release()
87 frwr_mr_unmap(mr->mr_xprt, mr); in frwr_mr_put()
90 * of to the xprt's MR free list. No spinlock is needed. in frwr_mr_put()
92 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); in frwr_mr_put()
95 /* frwr_reset - Place MRs back on the free list
109 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) in frwr_reset()
114 * frwr_mr_init - Initialize one MR
123 struct rpcrdma_ep *ep = r_xprt->rx_ep; in frwr_mr_init()
124 unsigned int depth = ep->re_max_fr_depth; in frwr_mr_init()
129 ibdev_to_node(ep->re_id->device)); in frwr_mr_init()
131 return -ENOMEM; in frwr_mr_init()
133 frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth); in frwr_mr_init()
137 mr->mr_xprt = r_xprt; in frwr_mr_init()
138 mr->mr_ibmr = frmr; in frwr_mr_init()
139 mr->mr_device = NULL; in frwr_mr_init()
140 INIT_LIST_HEAD(&mr->mr_list); in frwr_mr_init()
141 init_completion(&mr->mr_linv_done); in frwr_mr_init()
145 mr->mr_sg = sg; in frwr_mr_init()
155 * frwr_query_device - Prepare a transport for use with FRWR
160 * ep->re_attr
161 * ep->re_max_requests
162 * ep->re_max_rdma_segs
163 * ep->re_max_fr_depth
164 * ep->re_mrtype
168 * %-EINVAL - the device does not support FRWR memory registration
169 * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
173 const struct ib_device_attr *attrs = &device->attrs; in frwr_query_device()
177 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) || in frwr_query_device()
178 attrs->max_fast_reg_page_list_len == 0) { in frwr_query_device()
180 device->name); in frwr_query_device()
181 return -EINVAL; in frwr_query_device()
184 max_sge = min_t(unsigned int, attrs->max_send_sge, in frwr_query_device()
188 return -ENOMEM; in frwr_query_device()
190 ep->re_attr.cap.max_send_sge = max_sge; in frwr_query_device()
191 ep->re_attr.cap.max_recv_sge = 1; in frwr_query_device()
193 ep->re_mrtype = IB_MR_TYPE_MEM_REG; in frwr_query_device()
194 if (attrs->kernel_cap_flags & IBK_SG_GAPS_REG) in frwr_query_device()
195 ep->re_mrtype = IB_MR_TYPE_SG_GAPS; in frwr_query_device()
201 if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS) in frwr_query_device()
202 ep->re_max_fr_depth = attrs->max_sge_rd; in frwr_query_device()
204 ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len; in frwr_query_device()
205 if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS) in frwr_query_device()
206 ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS; in frwr_query_device()
222 if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) { in frwr_query_device()
223 delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth; in frwr_query_device()
226 delta -= ep->re_max_fr_depth; in frwr_query_device()
230 max_qp_wr = attrs->max_qp_wr; in frwr_query_device()
231 max_qp_wr -= RPCRDMA_BACKWARD_WRS; in frwr_query_device()
232 max_qp_wr -= 1; in frwr_query_device()
234 return -ENOMEM; in frwr_query_device()
235 if (ep->re_max_requests > max_qp_wr) in frwr_query_device()
236 ep->re_max_requests = max_qp_wr; in frwr_query_device()
237 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth; in frwr_query_device()
238 if (ep->re_attr.cap.max_send_wr > max_qp_wr) { in frwr_query_device()
239 ep->re_max_requests = max_qp_wr / depth; in frwr_query_device()
240 if (!ep->re_max_requests) in frwr_query_device()
241 return -ENOMEM; in frwr_query_device()
242 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth; in frwr_query_device()
244 ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; in frwr_query_device()
245 ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */ in frwr_query_device()
246 ep->re_attr.cap.max_recv_wr = ep->re_max_requests; in frwr_query_device()
247 ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; in frwr_query_device()
248 ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH; in frwr_query_device()
249 ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */ in frwr_query_device()
251 ep->re_max_rdma_segs = in frwr_query_device()
252 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth); in frwr_query_device()
254 ep->re_max_rdma_segs += 2; in frwr_query_device()
255 if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS) in frwr_query_device()
256 ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS; in frwr_query_device()
263 if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS) in frwr_query_device()
264 return -ENOMEM; in frwr_query_device()
270 * frwr_map - Register a memory region
272 * @seg: memory region co-ordinates
275 * @xid: XID of RPC using the registered memory
278 * Prepare a REG_MR Work Request to register a memory region
289 struct rpcrdma_ep *ep = r_xprt->rx_ep; in frwr_map()
295 if (nsegs > ep->re_max_fr_depth) in frwr_map()
296 nsegs = ep->re_max_fr_depth; in frwr_map()
298 sg_set_page(&mr->mr_sg[i], seg->mr_page, in frwr_map()
299 seg->mr_len, seg->mr_offset); in frwr_map()
303 if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS) in frwr_map()
305 if ((i < nsegs && seg->mr_offset) || in frwr_map()
306 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) in frwr_map()
309 mr->mr_dir = rpcrdma_data_dir(writing); in frwr_map()
310 mr->mr_nents = i; in frwr_map()
312 dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents, in frwr_map()
313 mr->mr_dir); in frwr_map()
316 mr->mr_device = ep->re_id->device; in frwr_map()
318 ibmr = mr->mr_ibmr; in frwr_map()
319 n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); in frwr_map()
323 ibmr->iova &= 0x00000000ffffffff; in frwr_map()
324 ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32; in frwr_map()
325 key = (u8)(ibmr->rkey & 0x000000FF); in frwr_map()
328 reg_wr = &mr->mr_regwr; in frwr_map()
329 reg_wr->mr = ibmr; in frwr_map()
330 reg_wr->key = ibmr->rkey; in frwr_map()
331 reg_wr->access = writing ? in frwr_map()
335 mr->mr_handle = ibmr->rkey; in frwr_map()
336 mr->mr_length = ibmr->length; in frwr_map()
337 mr->mr_offset = ibmr->iova; in frwr_map()
344 return ERR_PTR(-EIO); in frwr_map()
348 return ERR_PTR(-EIO); in frwr_map()
352 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
354 * @wc: WCE for a completed FastReg WR
358 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_fastreg() argument
360 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_fastreg()
364 trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid); in frwr_wc_fastreg()
366 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_fastreg()
370 * frwr_send - post Send WRs containing the RPC Call message
375 * single ib_post_send call is needed to register memory
385 struct ib_send_wr *post_wr, *send_wr = &req->rl_wr; in frwr_send()
386 struct rpcrdma_ep *ep = r_xprt->rx_ep; in frwr_send()
393 list_for_each_entry(mr, &req->rl_registered, mr_list) { in frwr_send()
396 mr->mr_cqe.done = frwr_wc_fastreg; in frwr_send()
397 mr->mr_regwr.wr.next = post_wr; in frwr_send()
398 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; in frwr_send()
399 mr->mr_regwr.wr.num_sge = 0; in frwr_send()
400 mr->mr_regwr.wr.opcode = IB_WR_REG_MR; in frwr_send()
401 mr->mr_regwr.wr.send_flags = 0; in frwr_send()
402 post_wr = &mr->mr_regwr.wr; in frwr_send()
406 if ((kref_read(&req->rl_kref) > 1) || num_wrs > ep->re_send_count) { in frwr_send()
407 send_wr->send_flags |= IB_SEND_SIGNALED; in frwr_send()
408 ep->re_send_count = min_t(unsigned int, ep->re_send_batch, in frwr_send()
409 num_wrs - ep->re_send_count); in frwr_send()
411 send_wr->send_flags &= ~IB_SEND_SIGNALED; in frwr_send()
412 ep->re_send_count -= num_wrs; in frwr_send()
416 ret = ib_post_send(ep->re_id->qp, post_wr, NULL); in frwr_send()
423 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
433 if (mr->mr_handle == rep->rr_inv_rkey) { in frwr_reminv()
434 list_del_init(&mr->mr_list); in frwr_reminv()
441 static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr) in frwr_mr_done() argument
443 if (likely(wc->status == IB_WC_SUCCESS)) in frwr_mr_done()
448 * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
450 * @wc: WCE for a completed LocalInv WR
453 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv() argument
455 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv()
459 trace_xprtrdma_wc_li(wc, &mr->mr_cid); in frwr_wc_localinv()
460 frwr_mr_done(wc, mr); in frwr_wc_localinv()
462 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_localinv()
466 * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
468 * @wc: WCE for a completed LocalInv WR
472 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv_wake() argument
474 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_wake()
478 trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid); in frwr_wc_localinv_wake()
479 frwr_mr_done(wc, mr); in frwr_wc_localinv_wake()
480 complete(&mr->mr_linv_done); in frwr_wc_localinv_wake()
482 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_localinv_wake()
486 * frwr_unmap_sync - invalidate memory regions that were registered for @req
488 * @req: rpcrdma_req with a non-empty list of MRs to process
491 * memory regions. This guarantees that registered MRs are properly fenced
499 struct rpcrdma_ep *ep = r_xprt->rx_ep; in frwr_unmap_sync()
510 mr = rpcrdma_mr_pop(&req->rl_registered); in frwr_unmap_sync()
513 r_xprt->rx_stats.local_inv_needed++; in frwr_unmap_sync()
515 last = &mr->mr_invwr; in frwr_unmap_sync()
516 last->next = NULL; in frwr_unmap_sync()
517 last->wr_cqe = &mr->mr_cqe; in frwr_unmap_sync()
518 last->sg_list = NULL; in frwr_unmap_sync()
519 last->num_sge = 0; in frwr_unmap_sync()
520 last->opcode = IB_WR_LOCAL_INV; in frwr_unmap_sync()
521 last->send_flags = IB_SEND_SIGNALED; in frwr_unmap_sync()
522 last->ex.invalidate_rkey = mr->mr_handle; in frwr_unmap_sync()
524 last->wr_cqe->done = frwr_wc_localinv; in frwr_unmap_sync()
527 prev = &last->next; in frwr_unmap_sync()
528 } while ((mr = rpcrdma_mr_pop(&req->rl_registered))); in frwr_unmap_sync()
536 last->wr_cqe->done = frwr_wc_localinv_wake; in frwr_unmap_sync()
537 reinit_completion(&mr->mr_linv_done); in frwr_unmap_sync()
541 * unless re_id->qp is a valid pointer. in frwr_unmap_sync()
544 rc = ib_post_send(ep->re_id->qp, first, &bad_wr); in frwr_unmap_sync()
551 wait_for_completion(&mr->mr_linv_done); in frwr_unmap_sync()
564 * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
566 * @wc: WCE for a completed LocalInv WR
569 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv_done() argument
571 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_done()
576 trace_xprtrdma_wc_li_done(wc, &mr->mr_cid); in frwr_wc_localinv_done()
579 rep = mr->mr_req->rl_reply; in frwr_wc_localinv_done()
582 if (wc->status != IB_WC_SUCCESS) { in frwr_wc_localinv_done()
585 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_localinv_done()
593 * frwr_unmap_async - invalidate memory regions that were registered for @req
595 * @req: rpcrdma_req with a non-empty list of MRs to process
605 struct rpcrdma_ep *ep = r_xprt->rx_ep; in frwr_unmap_async()
613 mr = rpcrdma_mr_pop(&req->rl_registered); in frwr_unmap_async()
616 r_xprt->rx_stats.local_inv_needed++; in frwr_unmap_async()
618 last = &mr->mr_invwr; in frwr_unmap_async()
619 last->next = NULL; in frwr_unmap_async()
620 last->wr_cqe = &mr->mr_cqe; in frwr_unmap_async()
621 last->sg_list = NULL; in frwr_unmap_async()
622 last->num_sge = 0; in frwr_unmap_async()
623 last->opcode = IB_WR_LOCAL_INV; in frwr_unmap_async()
624 last->send_flags = IB_SEND_SIGNALED; in frwr_unmap_async()
625 last->ex.invalidate_rkey = mr->mr_handle; in frwr_unmap_async()
627 last->wr_cqe->done = frwr_wc_localinv; in frwr_unmap_async()
630 prev = &last->next; in frwr_unmap_async()
631 } while ((mr = rpcrdma_mr_pop(&req->rl_registered))); in frwr_unmap_async()
638 last->wr_cqe->done = frwr_wc_localinv_done; in frwr_unmap_async()
642 * unless re_id->qp is a valid pointer. in frwr_unmap_async()
644 rc = ib_post_send(ep->re_id->qp, first, NULL); in frwr_unmap_async()
656 rpcrdma_unpin_rqst(req->rl_reply); in frwr_unmap_async()
664 * frwr_wp_create - Create an MR for padding Write chunks
671 struct rpcrdma_ep *ep = r_xprt->rx_ep; in frwr_wp_create()
677 return -EAGAIN; in frwr_wp_create()
678 mr->mr_req = NULL; in frwr_wp_create()
679 ep->re_write_pad_mr = mr; in frwr_wp_create()
682 seg.mr_page = virt_to_page(ep->re_write_pad); in frwr_wp_create()
683 seg.mr_offset = offset_in_page(ep->re_write_pad); in frwr_wp_create()
685 return -EIO; in frwr_wp_create()
688 mr->mr_cqe.done = frwr_wc_fastreg; in frwr_wp_create()
689 mr->mr_regwr.wr.next = NULL; in frwr_wp_create()
690 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; in frwr_wp_create()
691 mr->mr_regwr.wr.num_sge = 0; in frwr_wp_create()
692 mr->mr_regwr.wr.opcode = IB_WR_REG_MR; in frwr_wp_create()
693 mr->mr_regwr.wr.send_flags = 0; in frwr_wp_create()
695 return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL); in frwr_wp_create()