Lines Matching +full:no +full:- +full:memory +full:- +full:wc
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
9 * COPYING in the main directory of this source tree, or the BSD-type
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * o buffer memory
58 #include <asm-generic/barrier.h>
101 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_xprt_drain()
102 struct rdma_cm_id *id = ep->re_id; in rpcrdma_xprt_drain()
107 ib_drain_rq(id->qp); in rpcrdma_xprt_drain()
112 ib_drain_sq(id->qp); in rpcrdma_xprt_drain()
118 * rpcrdma_qp_event_handler - Handle one QP event (error notification)
139 if (atomic_add_unless(&ep->re_force_disconnect, 1, 1)) in rpcrdma_force_disconnect()
140 xprt_force_disconnect(ep->re_xprt); in rpcrdma_force_disconnect()
144 * rpcrdma_flush_disconnect - Disconnect on flushed completion
146 * @wc: work completion entry
150 void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc) in rpcrdma_flush_disconnect() argument
152 if (wc->status != IB_WC_SUCCESS) in rpcrdma_flush_disconnect()
153 rpcrdma_force_disconnect(r_xprt->rx_ep); in rpcrdma_flush_disconnect()
157 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
159 * @wc: WCE for a completed Send WR
162 static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_send() argument
164 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_send()
167 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_send()
170 trace_xprtrdma_wc_send(sc, wc); in rpcrdma_wc_send()
172 rpcrdma_flush_disconnect(r_xprt, wc); in rpcrdma_wc_send()
176 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
178 * @wc: WCE for a completed Receive WR
181 static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_receive() argument
183 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_receive()
186 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_receive()
189 trace_xprtrdma_wc_receive(wc); in rpcrdma_wc_receive()
190 --r_xprt->rx_ep->re_receive_count; in rpcrdma_wc_receive()
191 if (wc->status != IB_WC_SUCCESS) in rpcrdma_wc_receive()
194 /* status == SUCCESS means all fields in wc are trustworthy */ in rpcrdma_wc_receive()
195 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); in rpcrdma_wc_receive()
196 rep->rr_wc_flags = wc->wc_flags; in rpcrdma_wc_receive()
197 rep->rr_inv_rkey = wc->ex.invalidate_rkey; in rpcrdma_wc_receive()
199 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), in rpcrdma_wc_receive()
200 rdmab_addr(rep->rr_rdmabuf), in rpcrdma_wc_receive()
201 wc->byte_len, DMA_FROM_DEVICE); in rpcrdma_wc_receive()
207 rpcrdma_flush_disconnect(r_xprt, wc); in rpcrdma_wc_receive()
214 const struct rpcrdma_connect_private *pmsg = param->private_data; in rpcrdma_update_cm_private()
217 /* Default settings for RPC-over-RDMA Version One */ in rpcrdma_update_cm_private()
218 ep->re_implicit_roundup = xprt_rdma_pad_optimize; in rpcrdma_update_cm_private()
223 pmsg->cp_magic == rpcrdma_cmp_magic && in rpcrdma_update_cm_private()
224 pmsg->cp_version == RPCRDMA_CMP_VERSION) { in rpcrdma_update_cm_private()
225 ep->re_implicit_roundup = true; in rpcrdma_update_cm_private()
226 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); in rpcrdma_update_cm_private()
227 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); in rpcrdma_update_cm_private()
230 if (rsize < ep->re_inline_recv) in rpcrdma_update_cm_private()
231 ep->re_inline_recv = rsize; in rpcrdma_update_cm_private()
232 if (wsize < ep->re_inline_send) in rpcrdma_update_cm_private()
233 ep->re_inline_send = wsize; in rpcrdma_update_cm_private()
239 * rpcrdma_cm_event_handler - Handle RDMA CM events
249 struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr; in rpcrdma_cm_event_handler()
250 struct rpcrdma_ep *ep = id->context; in rpcrdma_cm_event_handler()
254 switch (event->event) { in rpcrdma_cm_event_handler()
257 ep->re_async_rc = 0; in rpcrdma_cm_event_handler()
258 complete(&ep->re_done); in rpcrdma_cm_event_handler()
261 ep->re_async_rc = -EPROTO; in rpcrdma_cm_event_handler()
262 complete(&ep->re_done); in rpcrdma_cm_event_handler()
265 ep->re_async_rc = -ENETUNREACH; in rpcrdma_cm_event_handler()
266 complete(&ep->re_done); in rpcrdma_cm_event_handler()
270 ep->re_id->device->name, sap); in rpcrdma_cm_event_handler()
273 ep->re_connect_status = -ENODEV; in rpcrdma_cm_event_handler()
277 ep->re_connect_status = 1; in rpcrdma_cm_event_handler()
278 rpcrdma_update_cm_private(ep, &event->param.conn); in rpcrdma_cm_event_handler()
280 wake_up_all(&ep->re_connect_wait); in rpcrdma_cm_event_handler()
283 ep->re_connect_status = -ENOTCONN; in rpcrdma_cm_event_handler()
286 ep->re_connect_status = -ENETUNREACH; in rpcrdma_cm_event_handler()
290 sap, rdma_reject_msg(id, event->status)); in rpcrdma_cm_event_handler()
291 ep->re_connect_status = -ECONNREFUSED; in rpcrdma_cm_event_handler()
292 if (event->status == IB_CM_REJ_STALE_CONN) in rpcrdma_cm_event_handler()
293 ep->re_connect_status = -ENOTCONN; in rpcrdma_cm_event_handler()
295 wake_up_all(&ep->re_connect_wait); in rpcrdma_cm_event_handler()
298 ep->re_connect_status = -ECONNABORTED; in rpcrdma_cm_event_handler()
307 ep->re_id->device->name, rdma_event_msg(event->event)); in rpcrdma_cm_event_handler()
315 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_create_id()
319 init_completion(&ep->re_done); in rpcrdma_create_id()
321 id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep, in rpcrdma_create_id()
326 ep->re_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
327 rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr, in rpcrdma_create_id()
331 rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout); in rpcrdma_create_id()
335 rc = ep->re_async_rc; in rpcrdma_create_id()
339 ep->re_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
343 rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout); in rpcrdma_create_id()
346 rc = ep->re_async_rc; in rpcrdma_create_id()
361 if (ep->re_id->qp) { in rpcrdma_ep_destroy()
362 rdma_destroy_qp(ep->re_id); in rpcrdma_ep_destroy()
363 ep->re_id->qp = NULL; in rpcrdma_ep_destroy()
366 if (ep->re_attr.recv_cq) in rpcrdma_ep_destroy()
367 ib_free_cq(ep->re_attr.recv_cq); in rpcrdma_ep_destroy()
368 ep->re_attr.recv_cq = NULL; in rpcrdma_ep_destroy()
369 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy()
370 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy()
371 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy()
373 if (ep->re_pd) in rpcrdma_ep_destroy()
374 ib_dealloc_pd(ep->re_pd); in rpcrdma_ep_destroy()
375 ep->re_pd = NULL; in rpcrdma_ep_destroy()
383 kref_get(&ep->re_kref); in rpcrdma_ep_get()
392 return kref_put(&ep->re_kref, rpcrdma_ep_destroy); in rpcrdma_ep_put()
405 return -ENOTCONN; in rpcrdma_ep_create()
406 ep->re_xprt = &r_xprt->rx_xprt; in rpcrdma_ep_create()
407 kref_init(&ep->re_kref); in rpcrdma_ep_create()
415 device = id->device; in rpcrdma_ep_create()
416 ep->re_id = id; in rpcrdma_ep_create()
418 ep->re_max_requests = r_xprt->rx_xprt.max_reqs; in rpcrdma_ep_create()
419 ep->re_inline_send = xprt_rdma_max_inline_write; in rpcrdma_ep_create()
420 ep->re_inline_recv = xprt_rdma_max_inline_read; in rpcrdma_ep_create()
425 r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests); in rpcrdma_ep_create()
427 ep->re_attr.event_handler = rpcrdma_qp_event_handler; in rpcrdma_ep_create()
428 ep->re_attr.qp_context = ep; in rpcrdma_ep_create()
429 ep->re_attr.srq = NULL; in rpcrdma_ep_create()
430 ep->re_attr.cap.max_inline_data = 0; in rpcrdma_ep_create()
431 ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR; in rpcrdma_ep_create()
432 ep->re_attr.qp_type = IB_QPT_RC; in rpcrdma_ep_create()
433 ep->re_attr.port_num = ~0; in rpcrdma_ep_create()
438 ep->re_attr.cap.max_send_wr, in rpcrdma_ep_create()
439 ep->re_attr.cap.max_recv_wr, in rpcrdma_ep_create()
440 ep->re_attr.cap.max_send_sge, in rpcrdma_ep_create()
441 ep->re_attr.cap.max_recv_sge); in rpcrdma_ep_create()
443 ep->re_send_batch = ep->re_max_requests >> 3; in rpcrdma_ep_create()
444 ep->re_send_count = ep->re_send_batch; in rpcrdma_ep_create()
445 init_waitqueue_head(&ep->re_connect_wait); in rpcrdma_ep_create()
447 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
448 ep->re_attr.cap.max_send_wr, in rpcrdma_ep_create()
450 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create()
451 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create()
455 ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
456 ep->re_attr.cap.max_recv_wr, in rpcrdma_ep_create()
458 if (IS_ERR(ep->re_attr.recv_cq)) { in rpcrdma_ep_create()
459 rc = PTR_ERR(ep->re_attr.recv_cq); in rpcrdma_ep_create()
462 ep->re_receive_count = 0; in rpcrdma_ep_create()
465 memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma)); in rpcrdma_ep_create()
467 /* Prepare RDMA-CM private message */ in rpcrdma_ep_create()
468 pmsg = &ep->re_cm_private; in rpcrdma_ep_create()
469 pmsg->cp_magic = rpcrdma_cmp_magic; in rpcrdma_ep_create()
470 pmsg->cp_version = RPCRDMA_CMP_VERSION; in rpcrdma_ep_create()
471 pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK; in rpcrdma_ep_create()
472 pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send); in rpcrdma_ep_create()
473 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv); in rpcrdma_ep_create()
474 ep->re_remote_cma.private_data = pmsg; in rpcrdma_ep_create()
475 ep->re_remote_cma.private_data_len = sizeof(*pmsg); in rpcrdma_ep_create()
478 ep->re_remote_cma.initiator_depth = 0; in rpcrdma_ep_create()
479 ep->re_remote_cma.responder_resources = in rpcrdma_ep_create()
480 min_t(int, U8_MAX, device->attrs.max_qp_rd_atom); in rpcrdma_ep_create()
483 * GID changes quickly. RPC layer handles re-establishing in rpcrdma_ep_create()
486 ep->re_remote_cma.retry_count = 6; in rpcrdma_ep_create()
488 /* RPC-over-RDMA handles its own flow control. In addition, in rpcrdma_ep_create()
489 * make all RNR NAKs visible so we know that RPC-over-RDMA in rpcrdma_ep_create()
490 * flow control is working correctly (no NAKs should be seen). in rpcrdma_ep_create()
492 ep->re_remote_cma.flow_control = 0; in rpcrdma_ep_create()
493 ep->re_remote_cma.rnr_retry_count = 0; in rpcrdma_ep_create()
495 ep->re_pd = ib_alloc_pd(device, 0); in rpcrdma_ep_create()
496 if (IS_ERR(ep->re_pd)) { in rpcrdma_ep_create()
497 rc = PTR_ERR(ep->re_pd); in rpcrdma_ep_create()
501 rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr); in rpcrdma_ep_create()
505 r_xprt->rx_ep = ep; in rpcrdma_ep_create()
515 * rpcrdma_xprt_connect - Connect an unconnected transport
522 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_xprt_connect()
529 ep = r_xprt->rx_ep; in rpcrdma_xprt_connect()
540 rc = rdma_connect(ep->re_id, &ep->re_remote_cma); in rpcrdma_xprt_connect()
544 if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) in rpcrdma_xprt_connect()
545 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; in rpcrdma_xprt_connect()
546 wait_event_interruptible(ep->re_connect_wait, in rpcrdma_xprt_connect()
547 ep->re_connect_status != 0); in rpcrdma_xprt_connect()
548 if (ep->re_connect_status <= 0) { in rpcrdma_xprt_connect()
549 rc = ep->re_connect_status; in rpcrdma_xprt_connect()
555 rc = -ENOTCONN; in rpcrdma_xprt_connect()
561 rc = -ENOTCONN; in rpcrdma_xprt_connect()
572 * rpcrdma_xprt_disconnect - Disconnect underlying transport
579 * resources and prepared for the next ->connect operation.
583 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_xprt_disconnect()
590 id = ep->re_id; in rpcrdma_xprt_disconnect()
603 r_xprt->rx_ep = NULL; in rpcrdma_xprt_disconnect()
606 /* Fixed-size circular FIFO queue. This implementation is wait-free and
607 * lock-free.
612 * ->send_request call at a time.
625 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_destroy()
628 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_destroy()
630 for (i = 0; i <= buf->rb_sc_last; i++) in rpcrdma_sendctxs_destroy()
631 kfree(buf->rb_sc_ctxs[i]); in rpcrdma_sendctxs_destroy()
632 kfree(buf->rb_sc_ctxs); in rpcrdma_sendctxs_destroy()
633 buf->rb_sc_ctxs = NULL; in rpcrdma_sendctxs_destroy()
640 sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge), in rpcrdma_sendctx_create()
645 sc->sc_cqe.done = rpcrdma_wc_send; in rpcrdma_sendctx_create()
651 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_create()
657 * the ->send_request call to fail temporarily before too many in rpcrdma_sendctxs_create()
660 i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS; in rpcrdma_sendctxs_create()
661 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); in rpcrdma_sendctxs_create()
662 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_create()
663 return -ENOMEM; in rpcrdma_sendctxs_create()
665 buf->rb_sc_last = i - 1; in rpcrdma_sendctxs_create()
666 for (i = 0; i <= buf->rb_sc_last; i++) { in rpcrdma_sendctxs_create()
667 sc = rpcrdma_sendctx_create(r_xprt->rx_ep); in rpcrdma_sendctxs_create()
669 return -ENOMEM; in rpcrdma_sendctxs_create()
671 buf->rb_sc_ctxs[i] = sc; in rpcrdma_sendctxs_create()
674 buf->rb_sc_head = 0; in rpcrdma_sendctxs_create()
675 buf->rb_sc_tail = 0; in rpcrdma_sendctxs_create()
686 return likely(item < buf->rb_sc_last) ? item + 1 : 0; in rpcrdma_sendctx_next()
690 * rpcrdma_sendctx_get_locked - Acquire a send context
699 * provides an effective memory barrier that flushes the new value
704 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctx_get_locked()
708 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); in rpcrdma_sendctx_get_locked()
710 if (next_head == READ_ONCE(buf->rb_sc_tail)) in rpcrdma_sendctx_get_locked()
714 sc = buf->rb_sc_ctxs[next_head]; in rpcrdma_sendctx_get_locked()
716 /* Releasing the lock in the caller acts as a memory in rpcrdma_sendctx_get_locked()
719 buf->rb_sc_head = next_head; in rpcrdma_sendctx_get_locked()
728 xprt_wait_for_buffer_space(&r_xprt->rx_xprt); in rpcrdma_sendctx_get_locked()
729 r_xprt->rx_stats.empty_sendctx_q++; in rpcrdma_sendctx_get_locked()
734 * rpcrdma_sendctx_put_locked - Release a send context
746 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctx_put_locked()
752 next_tail = buf->rb_sc_tail; in rpcrdma_sendctx_put_locked()
757 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]); in rpcrdma_sendctx_put_locked()
759 } while (buf->rb_sc_ctxs[next_tail] != sc); in rpcrdma_sendctx_put_locked()
762 smp_store_release(&buf->rb_sc_tail, next_tail); in rpcrdma_sendctx_put_locked()
764 xprt_write_space(&r_xprt->rx_xprt); in rpcrdma_sendctx_put_locked()
770 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_create()
771 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_mrs_create()
774 for (count = 0; count < ep->re_max_rdma_segs; count++) { in rpcrdma_mrs_create()
788 spin_lock(&buf->rb_lock); in rpcrdma_mrs_create()
789 rpcrdma_mr_push(mr, &buf->rb_mrs); in rpcrdma_mrs_create()
790 list_add(&mr->mr_all, &buf->rb_all_mrs); in rpcrdma_mrs_create()
791 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_create()
794 r_xprt->rx_stats.mrs_allocated += count; in rpcrdma_mrs_create()
807 xprt_write_space(&r_xprt->rx_xprt); in rpcrdma_mr_refresh_worker()
811 * rpcrdma_mrs_refresh - Wake the MR refresh worker
817 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_refresh()
818 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_mrs_refresh()
820 /* If there is no underlying connection, it's no use in rpcrdma_mrs_refresh()
823 if (ep->re_connect_status == 1) { in rpcrdma_mrs_refresh()
828 queue_work(xprtiod_workqueue, &buf->rb_refresh_worker); in rpcrdma_mrs_refresh()
833 * rpcrdma_req_create - Allocate an rpcrdma_req object
836 * @flags: GFP flags passed to memory allocators
843 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; in rpcrdma_req_create()
850 req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags); in rpcrdma_req_create()
851 if (!req->rl_sendbuf) in rpcrdma_req_create()
854 req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags); in rpcrdma_req_create()
855 if (!req->rl_recvbuf) in rpcrdma_req_create()
858 INIT_LIST_HEAD(&req->rl_free_mrs); in rpcrdma_req_create()
859 INIT_LIST_HEAD(&req->rl_registered); in rpcrdma_req_create()
860 spin_lock(&buffer->rb_lock); in rpcrdma_req_create()
861 list_add(&req->rl_all, &buffer->rb_allreqs); in rpcrdma_req_create()
862 spin_unlock(&buffer->rb_lock); in rpcrdma_req_create()
866 kfree(req->rl_sendbuf); in rpcrdma_req_create()
874 * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
887 r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz; in rpcrdma_req_setup()
897 req->rl_rdmabuf = rb; in rpcrdma_req_setup()
898 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb)); in rpcrdma_req_setup()
904 return -ENOMEM; in rpcrdma_req_setup()
914 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reqs_setup()
918 list_for_each_entry(req, &buf->rb_allreqs, rl_all) { in rpcrdma_reqs_setup()
929 req->rl_slot.rq_cong = 0; in rpcrdma_req_reset()
931 rpcrdma_regbuf_free(req->rl_rdmabuf); in rpcrdma_req_reset()
932 req->rl_rdmabuf = NULL; in rpcrdma_req_reset()
934 rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); in rpcrdma_req_reset()
935 rpcrdma_regbuf_dma_unmap(req->rl_recvbuf); in rpcrdma_req_reset()
947 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reqs_reset()
950 list_for_each_entry(req, &buf->rb_allreqs, rl_all) in rpcrdma_reqs_reset()
954 /* No locking needed here. This function is called only by the
967 rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv, in rpcrdma_rep_create()
969 if (!rep->rr_rdmabuf) in rpcrdma_rep_create()
972 if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) in rpcrdma_rep_create()
975 xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), in rpcrdma_rep_create()
976 rdmab_length(rep->rr_rdmabuf)); in rpcrdma_rep_create()
977 rep->rr_cqe.done = rpcrdma_wc_receive; in rpcrdma_rep_create()
978 rep->rr_rxprt = r_xprt; in rpcrdma_rep_create()
979 rep->rr_recv_wr.next = NULL; in rpcrdma_rep_create()
980 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; in rpcrdma_rep_create()
981 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; in rpcrdma_rep_create()
982 rep->rr_recv_wr.num_sge = 1; in rpcrdma_rep_create()
983 rep->rr_temp = temp; in rpcrdma_rep_create()
984 list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps); in rpcrdma_rep_create()
988 rpcrdma_regbuf_free(rep->rr_rdmabuf); in rpcrdma_rep_create()
995 /* No locking needed here. This function is invoked only by the
1000 list_del(&rep->rr_all); in rpcrdma_rep_destroy()
1001 rpcrdma_regbuf_free(rep->rr_rdmabuf); in rpcrdma_rep_destroy()
1010 node = llist_del_first(&buf->rb_free_reps); in rpcrdma_rep_get_locked()
1019 llist_add(&rep->rr_node, &buf->rb_free_reps); in rpcrdma_rep_put()
1024 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reps_unmap()
1027 list_for_each_entry(rep, &buf->rb_all_reps, rr_all) { in rpcrdma_reps_unmap()
1028 rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf); in rpcrdma_reps_unmap()
1029 rep->rr_temp = true; in rpcrdma_reps_unmap()
1042 * rpcrdma_buffer_create - Create initial set of req/rep objects
1049 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_buffer_create()
1052 buf->rb_bc_srv_max_requests = 0; in rpcrdma_buffer_create()
1053 spin_lock_init(&buf->rb_lock); in rpcrdma_buffer_create()
1054 INIT_LIST_HEAD(&buf->rb_mrs); in rpcrdma_buffer_create()
1055 INIT_LIST_HEAD(&buf->rb_all_mrs); in rpcrdma_buffer_create()
1056 INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker); in rpcrdma_buffer_create()
1058 INIT_LIST_HEAD(&buf->rb_send_bufs); in rpcrdma_buffer_create()
1059 INIT_LIST_HEAD(&buf->rb_allreqs); in rpcrdma_buffer_create()
1060 INIT_LIST_HEAD(&buf->rb_all_reps); in rpcrdma_buffer_create()
1062 rc = -ENOMEM; in rpcrdma_buffer_create()
1063 for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) { in rpcrdma_buffer_create()
1070 list_add(&req->rl_list, &buf->rb_send_bufs); in rpcrdma_buffer_create()
1073 init_llist_head(&buf->rb_free_reps); in rpcrdma_buffer_create()
1082 * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1086 * removing req->rl_all from buf->rb_all_reqs safely.
1092 list_del(&req->rl_all); in rpcrdma_req_destroy()
1094 while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) { in rpcrdma_req_destroy()
1095 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; in rpcrdma_req_destroy()
1097 spin_lock(&buf->rb_lock); in rpcrdma_req_destroy()
1098 list_del(&mr->mr_all); in rpcrdma_req_destroy()
1099 spin_unlock(&buf->rb_lock); in rpcrdma_req_destroy()
1104 rpcrdma_regbuf_free(req->rl_recvbuf); in rpcrdma_req_destroy()
1105 rpcrdma_regbuf_free(req->rl_sendbuf); in rpcrdma_req_destroy()
1106 rpcrdma_regbuf_free(req->rl_rdmabuf); in rpcrdma_req_destroy()
1111 * rpcrdma_mrs_destroy - Release all of a transport's MRs
1115 * removing mr->mr_list from req->rl_free_mrs safely.
1119 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_destroy()
1122 cancel_work_sync(&buf->rb_refresh_worker); in rpcrdma_mrs_destroy()
1124 spin_lock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1125 while ((mr = list_first_entry_or_null(&buf->rb_all_mrs, in rpcrdma_mrs_destroy()
1128 list_del(&mr->mr_list); in rpcrdma_mrs_destroy()
1129 list_del(&mr->mr_all); in rpcrdma_mrs_destroy()
1130 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1134 spin_lock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1136 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1140 * rpcrdma_buffer_destroy - Release all hw resources
1144 * - No more Send or Receive completions can occur
1145 * - All MRs, reps, and reqs are returned to their free lists
1152 while (!list_empty(&buf->rb_send_bufs)) { in rpcrdma_buffer_destroy()
1155 req = list_first_entry(&buf->rb_send_bufs, in rpcrdma_buffer_destroy()
1157 list_del(&req->rl_list); in rpcrdma_buffer_destroy()
1163 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1166 * Returns an initialized rpcrdma_mr or NULL if no free
1172 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mr_get()
1175 spin_lock(&buf->rb_lock); in rpcrdma_mr_get()
1176 mr = rpcrdma_mr_pop(&buf->rb_mrs); in rpcrdma_mr_get()
1177 spin_unlock(&buf->rb_lock); in rpcrdma_mr_get()
1182 * rpcrdma_mr_put - DMA unmap an MR and release it
1188 struct rpcrdma_xprt *r_xprt = mr->mr_xprt; in rpcrdma_mr_put()
1190 if (mr->mr_dir != DMA_NONE) { in rpcrdma_mr_put()
1192 ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device, in rpcrdma_mr_put()
1193 mr->mr_sg, mr->mr_nents, mr->mr_dir); in rpcrdma_mr_put()
1194 mr->mr_dir = DMA_NONE; in rpcrdma_mr_put()
1197 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); in rpcrdma_mr_put()
1201 * rpcrdma_buffer_get - Get a request buffer
1211 spin_lock(&buffers->rb_lock); in rpcrdma_buffer_get()
1212 req = list_first_entry_or_null(&buffers->rb_send_bufs, in rpcrdma_buffer_get()
1215 list_del_init(&req->rl_list); in rpcrdma_buffer_get()
1216 spin_unlock(&buffers->rb_lock); in rpcrdma_buffer_get()
1221 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1228 if (req->rl_reply) in rpcrdma_buffer_put()
1229 rpcrdma_rep_put(buffers, req->rl_reply); in rpcrdma_buffer_put()
1230 req->rl_reply = NULL; in rpcrdma_buffer_put()
1232 spin_lock(&buffers->rb_lock); in rpcrdma_buffer_put()
1233 list_add(&req->rl_list, &buffers->rb_send_bufs); in rpcrdma_buffer_put()
1234 spin_unlock(&buffers->rb_lock); in rpcrdma_buffer_put()
1238 * rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list
1245 rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep); in rpcrdma_recv_buffer_put()
1263 rb->rg_data = kmalloc(size, flags); in rpcrdma_regbuf_alloc()
1264 if (!rb->rg_data) { in rpcrdma_regbuf_alloc()
1269 rb->rg_device = NULL; in rpcrdma_regbuf_alloc()
1270 rb->rg_direction = direction; in rpcrdma_regbuf_alloc()
1271 rb->rg_iov.length = size; in rpcrdma_regbuf_alloc()
1276 * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1293 kfree(rb->rg_data); in rpcrdma_regbuf_realloc()
1295 rb->rg_data = buf; in rpcrdma_regbuf_realloc()
1296 rb->rg_iov.length = size; in rpcrdma_regbuf_realloc()
1301 * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1310 struct ib_device *device = r_xprt->rx_ep->re_id->device; in __rpcrdma_regbuf_dma_map()
1312 if (rb->rg_direction == DMA_NONE) in __rpcrdma_regbuf_dma_map()
1315 rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb), in __rpcrdma_regbuf_dma_map()
1316 rdmab_length(rb), rb->rg_direction); in __rpcrdma_regbuf_dma_map()
1322 rb->rg_device = device; in __rpcrdma_regbuf_dma_map()
1323 rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey; in __rpcrdma_regbuf_dma_map()
1335 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb), in rpcrdma_regbuf_dma_unmap()
1336 rb->rg_direction); in rpcrdma_regbuf_dma_unmap()
1337 rb->rg_device = NULL; in rpcrdma_regbuf_dma_unmap()
1344 kfree(rb->rg_data); in rpcrdma_regbuf_free()
1349 * rpcrdma_post_sends - Post WRs to a transport's Send Queue
1353 * Returns 0 if the post was successful, otherwise -ENOTCONN
1358 struct ib_send_wr *send_wr = &req->rl_wr; in rpcrdma_post_sends()
1359 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_post_sends()
1362 if (!ep->re_send_count || kref_read(&req->rl_kref) > 1) { in rpcrdma_post_sends()
1363 send_wr->send_flags |= IB_SEND_SIGNALED; in rpcrdma_post_sends()
1364 ep->re_send_count = ep->re_send_batch; in rpcrdma_post_sends()
1366 send_wr->send_flags &= ~IB_SEND_SIGNALED; in rpcrdma_post_sends()
1367 --ep->re_send_count; in rpcrdma_post_sends()
1373 return -ENOTCONN; in rpcrdma_post_sends()
1378 * rpcrdma_post_recvs - Refill the Receive Queue
1385 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_post_recvs()
1386 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_post_recvs()
1394 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1); in rpcrdma_post_recvs()
1395 if (likely(ep->re_receive_count > needed)) in rpcrdma_post_recvs()
1397 needed -= ep->re_receive_count; in rpcrdma_post_recvs()
1405 if (rep && rep->rr_temp) { in rpcrdma_post_recvs()
1415 rep->rr_recv_wr.next = wr; in rpcrdma_post_recvs()
1416 wr = &rep->rr_recv_wr; in rpcrdma_post_recvs()
1417 --needed; in rpcrdma_post_recvs()
1423 rc = ib_post_recv(ep->re_id->qp, wr, in rpcrdma_post_recvs()
1432 wr = wr->next; in rpcrdma_post_recvs()
1434 --count; in rpcrdma_post_recvs()
1437 ep->re_receive_count += count; in rpcrdma_post_recvs()