Lines Matching +full:pmsg +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
9 * COPYING in the main directory of this source tree, or the BSD-type
58 #include <asm-generic/barrier.h>
79 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction);
89 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_xprt_drain()
90 struct rdma_cm_id *id = ep->re_id; in rpcrdma_xprt_drain()
95 if (atomic_inc_return(&ep->re_receiving) > 1) in rpcrdma_xprt_drain()
96 wait_for_completion(&ep->re_done); in rpcrdma_xprt_drain()
101 ib_drain_rq(id->qp); in rpcrdma_xprt_drain()
106 ib_drain_sq(id->qp); in rpcrdma_xprt_drain()
117 if (atomic_add_unless(&ep->re_force_disconnect, 1, 1)) in rpcrdma_force_disconnect()
118 xprt_force_disconnect(ep->re_xprt); in rpcrdma_force_disconnect()
122 * rpcrdma_flush_disconnect - Disconnect on flushed completion
130 if (wc->status != IB_WC_SUCCESS) in rpcrdma_flush_disconnect()
131 rpcrdma_force_disconnect(r_xprt->rx_ep); in rpcrdma_flush_disconnect()
135 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
142 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_send()
145 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_send()
148 trace_xprtrdma_wc_send(wc, &sc->sc_cid); in rpcrdma_wc_send()
154 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
161 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_receive()
164 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_receive()
167 trace_xprtrdma_wc_receive(wc, &rep->rr_cid); in rpcrdma_wc_receive()
168 --r_xprt->rx_ep->re_receive_count; in rpcrdma_wc_receive()
169 if (wc->status != IB_WC_SUCCESS) in rpcrdma_wc_receive()
173 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); in rpcrdma_wc_receive()
174 rep->rr_wc_flags = wc->wc_flags; in rpcrdma_wc_receive()
175 rep->rr_inv_rkey = wc->ex.invalidate_rkey; in rpcrdma_wc_receive()
177 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), in rpcrdma_wc_receive()
178 rdmab_addr(rep->rr_rdmabuf), in rpcrdma_wc_receive()
179 wc->byte_len, DMA_FROM_DEVICE); in rpcrdma_wc_receive()
186 rpcrdma_rep_put(&r_xprt->rx_buf, rep); in rpcrdma_wc_receive()
192 const struct rpcrdma_connect_private *pmsg = param->private_data; in rpcrdma_update_cm_private() local
195 /* Default settings for RPC-over-RDMA Version One */ in rpcrdma_update_cm_private()
199 if (pmsg && in rpcrdma_update_cm_private()
200 pmsg->cp_magic == rpcrdma_cmp_magic && in rpcrdma_update_cm_private()
201 pmsg->cp_version == RPCRDMA_CMP_VERSION) { in rpcrdma_update_cm_private()
202 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); in rpcrdma_update_cm_private()
203 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); in rpcrdma_update_cm_private()
206 if (rsize < ep->re_inline_recv) in rpcrdma_update_cm_private()
207 ep->re_inline_recv = rsize; in rpcrdma_update_cm_private()
208 if (wsize < ep->re_inline_send) in rpcrdma_update_cm_private()
209 ep->re_inline_send = wsize; in rpcrdma_update_cm_private()
215 * rpcrdma_cm_event_handler - Handle RDMA CM events
225 struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr; in rpcrdma_cm_event_handler()
226 struct rpcrdma_ep *ep = id->context; in rpcrdma_cm_event_handler()
230 switch (event->event) { in rpcrdma_cm_event_handler()
233 ep->re_async_rc = 0; in rpcrdma_cm_event_handler()
234 complete(&ep->re_done); in rpcrdma_cm_event_handler()
237 ep->re_async_rc = -EPROTO; in rpcrdma_cm_event_handler()
238 complete(&ep->re_done); in rpcrdma_cm_event_handler()
241 ep->re_async_rc = -ENETUNREACH; in rpcrdma_cm_event_handler()
242 complete(&ep->re_done); in rpcrdma_cm_event_handler()
246 ep->re_id->device->name, sap); in rpcrdma_cm_event_handler()
249 ep->re_connect_status = -ENODEV; in rpcrdma_cm_event_handler()
253 ep->re_connect_status = 1; in rpcrdma_cm_event_handler()
254 rpcrdma_update_cm_private(ep, &event->param.conn); in rpcrdma_cm_event_handler()
256 wake_up_all(&ep->re_connect_wait); in rpcrdma_cm_event_handler()
259 ep->re_connect_status = -ENOTCONN; in rpcrdma_cm_event_handler()
262 ep->re_connect_status = -ENETUNREACH; in rpcrdma_cm_event_handler()
265 ep->re_connect_status = -ECONNREFUSED; in rpcrdma_cm_event_handler()
266 if (event->status == IB_CM_REJ_STALE_CONN) in rpcrdma_cm_event_handler()
267 ep->re_connect_status = -ENOTCONN; in rpcrdma_cm_event_handler()
269 wake_up_all(&ep->re_connect_wait); in rpcrdma_cm_event_handler()
272 ep->re_connect_status = -ECONNABORTED; in rpcrdma_cm_event_handler()
287 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_create_id()
291 init_completion(&ep->re_done); in rpcrdma_create_id()
293 id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep, in rpcrdma_create_id()
298 ep->re_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
299 rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr, in rpcrdma_create_id()
303 rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout); in rpcrdma_create_id()
307 rc = ep->re_async_rc; in rpcrdma_create_id()
311 ep->re_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
315 rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout); in rpcrdma_create_id()
318 rc = ep->re_async_rc; in rpcrdma_create_id()
333 if (ep->re_id->qp) { in rpcrdma_ep_destroy()
334 rdma_destroy_qp(ep->re_id); in rpcrdma_ep_destroy()
335 ep->re_id->qp = NULL; in rpcrdma_ep_destroy()
338 if (ep->re_attr.recv_cq) in rpcrdma_ep_destroy()
339 ib_free_cq(ep->re_attr.recv_cq); in rpcrdma_ep_destroy()
340 ep->re_attr.recv_cq = NULL; in rpcrdma_ep_destroy()
341 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy()
342 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy()
343 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy()
345 if (ep->re_pd) in rpcrdma_ep_destroy()
346 ib_dealloc_pd(ep->re_pd); in rpcrdma_ep_destroy()
347 ep->re_pd = NULL; in rpcrdma_ep_destroy()
355 kref_get(&ep->re_kref); in rpcrdma_ep_get()
364 return kref_put(&ep->re_kref, rpcrdma_ep_destroy); in rpcrdma_ep_put()
369 struct rpcrdma_connect_private *pmsg; in rpcrdma_ep_create() local
377 return -ENOTCONN; in rpcrdma_ep_create()
378 ep->re_xprt = &r_xprt->rx_xprt; in rpcrdma_ep_create()
379 kref_init(&ep->re_kref); in rpcrdma_ep_create()
387 device = id->device; in rpcrdma_ep_create()
388 ep->re_id = id; in rpcrdma_ep_create()
389 reinit_completion(&ep->re_done); in rpcrdma_ep_create()
391 ep->re_max_requests = r_xprt->rx_xprt.max_reqs; in rpcrdma_ep_create()
392 ep->re_inline_send = xprt_rdma_max_inline_write; in rpcrdma_ep_create()
393 ep->re_inline_recv = xprt_rdma_max_inline_read; in rpcrdma_ep_create()
398 r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests); in rpcrdma_ep_create()
400 ep->re_attr.srq = NULL; in rpcrdma_ep_create()
401 ep->re_attr.cap.max_inline_data = 0; in rpcrdma_ep_create()
402 ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR; in rpcrdma_ep_create()
403 ep->re_attr.qp_type = IB_QPT_RC; in rpcrdma_ep_create()
404 ep->re_attr.port_num = ~0; in rpcrdma_ep_create()
406 ep->re_send_batch = ep->re_max_requests >> 3; in rpcrdma_ep_create()
407 ep->re_send_count = ep->re_send_batch; in rpcrdma_ep_create()
408 init_waitqueue_head(&ep->re_connect_wait); in rpcrdma_ep_create()
410 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
411 ep->re_attr.cap.max_send_wr, in rpcrdma_ep_create()
413 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create()
414 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create()
415 ep->re_attr.send_cq = NULL; in rpcrdma_ep_create()
419 ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
420 ep->re_attr.cap.max_recv_wr, in rpcrdma_ep_create()
422 if (IS_ERR(ep->re_attr.recv_cq)) { in rpcrdma_ep_create()
423 rc = PTR_ERR(ep->re_attr.recv_cq); in rpcrdma_ep_create()
424 ep->re_attr.recv_cq = NULL; in rpcrdma_ep_create()
427 ep->re_receive_count = 0; in rpcrdma_ep_create()
430 memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma)); in rpcrdma_ep_create()
432 /* Prepare RDMA-CM private message */ in rpcrdma_ep_create()
433 pmsg = &ep->re_cm_private; in rpcrdma_ep_create()
434 pmsg->cp_magic = rpcrdma_cmp_magic; in rpcrdma_ep_create()
435 pmsg->cp_version = RPCRDMA_CMP_VERSION; in rpcrdma_ep_create()
436 pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK; in rpcrdma_ep_create()
437 pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send); in rpcrdma_ep_create()
438 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv); in rpcrdma_ep_create()
439 ep->re_remote_cma.private_data = pmsg; in rpcrdma_ep_create()
440 ep->re_remote_cma.private_data_len = sizeof(*pmsg); in rpcrdma_ep_create()
443 ep->re_remote_cma.initiator_depth = 0; in rpcrdma_ep_create()
444 ep->re_remote_cma.responder_resources = in rpcrdma_ep_create()
445 min_t(int, U8_MAX, device->attrs.max_qp_rd_atom); in rpcrdma_ep_create()
448 * GID changes quickly. RPC layer handles re-establishing in rpcrdma_ep_create()
451 ep->re_remote_cma.retry_count = 6; in rpcrdma_ep_create()
453 /* RPC-over-RDMA handles its own flow control. In addition, in rpcrdma_ep_create()
454 * make all RNR NAKs visible so we know that RPC-over-RDMA in rpcrdma_ep_create()
457 ep->re_remote_cma.flow_control = 0; in rpcrdma_ep_create()
458 ep->re_remote_cma.rnr_retry_count = 0; in rpcrdma_ep_create()
460 ep->re_pd = ib_alloc_pd(device, 0); in rpcrdma_ep_create()
461 if (IS_ERR(ep->re_pd)) { in rpcrdma_ep_create()
462 rc = PTR_ERR(ep->re_pd); in rpcrdma_ep_create()
463 ep->re_pd = NULL; in rpcrdma_ep_create()
467 rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr); in rpcrdma_ep_create()
471 r_xprt->rx_ep = ep; in rpcrdma_ep_create()
481 * rpcrdma_xprt_connect - Connect an unconnected transport
488 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_xprt_connect()
495 ep = r_xprt->rx_ep; in rpcrdma_xprt_connect()
506 rc = rdma_connect(ep->re_id, &ep->re_remote_cma); in rpcrdma_xprt_connect()
510 if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) in rpcrdma_xprt_connect()
511 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; in rpcrdma_xprt_connect()
512 wait_event_interruptible(ep->re_connect_wait, in rpcrdma_xprt_connect()
513 ep->re_connect_status != 0); in rpcrdma_xprt_connect()
514 if (ep->re_connect_status <= 0) { in rpcrdma_xprt_connect()
515 rc = ep->re_connect_status; in rpcrdma_xprt_connect()
521 rc = -ENOTCONN; in rpcrdma_xprt_connect()
527 rc = -ENOTCONN; in rpcrdma_xprt_connect()
539 * rpcrdma_xprt_disconnect - Disconnect underlying transport
546 * resources and prepared for the next ->connect operation.
550 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_xprt_disconnect()
557 id = ep->re_id; in rpcrdma_xprt_disconnect()
570 r_xprt->rx_ep = NULL; in rpcrdma_xprt_disconnect()
573 /* Fixed-size circular FIFO queue. This implementation is wait-free and
574 * lock-free.
579 * ->send_request call at a time.
592 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_destroy()
595 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_destroy()
597 for (i = 0; i <= buf->rb_sc_last; i++) in rpcrdma_sendctxs_destroy()
598 kfree(buf->rb_sc_ctxs[i]); in rpcrdma_sendctxs_destroy()
599 kfree(buf->rb_sc_ctxs); in rpcrdma_sendctxs_destroy()
600 buf->rb_sc_ctxs = NULL; in rpcrdma_sendctxs_destroy()
607 sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge), in rpcrdma_sendctx_create()
612 sc->sc_cqe.done = rpcrdma_wc_send; in rpcrdma_sendctx_create()
613 sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; in rpcrdma_sendctx_create()
614 sc->sc_cid.ci_completion_id = in rpcrdma_sendctx_create()
615 atomic_inc_return(&ep->re_completion_ids); in rpcrdma_sendctx_create()
621 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_create()
626 * the circular queue size stops Send Queue overflow by causing in rpcrdma_sendctxs_create()
627 * the ->send_request call to fail temporarily before too many in rpcrdma_sendctxs_create()
630 i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS; in rpcrdma_sendctxs_create()
631 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), XPRTRDMA_GFP_FLAGS); in rpcrdma_sendctxs_create()
632 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_create()
633 return -ENOMEM; in rpcrdma_sendctxs_create()
635 buf->rb_sc_last = i - 1; in rpcrdma_sendctxs_create()
636 for (i = 0; i <= buf->rb_sc_last; i++) { in rpcrdma_sendctxs_create()
637 sc = rpcrdma_sendctx_create(r_xprt->rx_ep); in rpcrdma_sendctxs_create()
639 return -ENOMEM; in rpcrdma_sendctxs_create()
641 buf->rb_sc_ctxs[i] = sc; in rpcrdma_sendctxs_create()
644 buf->rb_sc_head = 0; in rpcrdma_sendctxs_create()
645 buf->rb_sc_tail = 0; in rpcrdma_sendctxs_create()
649 /* The sendctx queue is not guaranteed to have a size that is a
656 return likely(item < buf->rb_sc_last) ? item + 1 : 0; in rpcrdma_sendctx_next()
660 * rpcrdma_sendctx_get_locked - Acquire a send context
674 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctx_get_locked()
678 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); in rpcrdma_sendctx_get_locked()
680 if (next_head == READ_ONCE(buf->rb_sc_tail)) in rpcrdma_sendctx_get_locked()
684 sc = buf->rb_sc_ctxs[next_head]; in rpcrdma_sendctx_get_locked()
689 buf->rb_sc_head = next_head; in rpcrdma_sendctx_get_locked()
698 xprt_wait_for_buffer_space(&r_xprt->rx_xprt); in rpcrdma_sendctx_get_locked()
699 r_xprt->rx_stats.empty_sendctx_q++; in rpcrdma_sendctx_get_locked()
704 * rpcrdma_sendctx_put_locked - Release a send context
716 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctx_put_locked()
722 next_tail = buf->rb_sc_tail; in rpcrdma_sendctx_put_locked()
727 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]); in rpcrdma_sendctx_put_locked()
729 } while (buf->rb_sc_ctxs[next_tail] != sc); in rpcrdma_sendctx_put_locked()
732 smp_store_release(&buf->rb_sc_tail, next_tail); in rpcrdma_sendctx_put_locked()
734 xprt_write_space(&r_xprt->rx_xprt); in rpcrdma_sendctx_put_locked()
740 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_create()
741 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_mrs_create()
742 struct ib_device *device = ep->re_id->device; in rpcrdma_mrs_create()
745 /* Try to allocate enough to perform one full-sized I/O */ in rpcrdma_mrs_create()
746 for (count = 0; count < ep->re_max_rdma_segs; count++) { in rpcrdma_mrs_create()
761 spin_lock(&buf->rb_lock); in rpcrdma_mrs_create()
762 rpcrdma_mr_push(mr, &buf->rb_mrs); in rpcrdma_mrs_create()
763 list_add(&mr->mr_all, &buf->rb_all_mrs); in rpcrdma_mrs_create()
764 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_create()
767 r_xprt->rx_stats.mrs_allocated += count; in rpcrdma_mrs_create()
780 xprt_write_space(&r_xprt->rx_xprt); in rpcrdma_mr_refresh_worker()
784 * rpcrdma_mrs_refresh - Wake the MR refresh worker
790 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_refresh()
791 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_mrs_refresh()
796 if (ep->re_connect_status != 1) in rpcrdma_mrs_refresh()
798 queue_work(system_highpri_wq, &buf->rb_refresh_worker); in rpcrdma_mrs_refresh()
802 * rpcrdma_req_create - Allocate an rpcrdma_req object
804 * @size: initial size, in bytes, of send and receive buffers
809 size_t size) in rpcrdma_req_create() argument
811 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; in rpcrdma_req_create()
818 req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE); in rpcrdma_req_create()
819 if (!req->rl_sendbuf) in rpcrdma_req_create()
822 req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE); in rpcrdma_req_create()
823 if (!req->rl_recvbuf) in rpcrdma_req_create()
826 INIT_LIST_HEAD(&req->rl_free_mrs); in rpcrdma_req_create()
827 INIT_LIST_HEAD(&req->rl_registered); in rpcrdma_req_create()
828 spin_lock(&buffer->rb_lock); in rpcrdma_req_create()
829 list_add(&req->rl_all, &buffer->rb_allreqs); in rpcrdma_req_create()
830 spin_unlock(&buffer->rb_lock); in rpcrdma_req_create()
834 rpcrdma_regbuf_free(req->rl_sendbuf); in rpcrdma_req_create()
842 * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
853 /* Compute maximum header buffer size in bytes */ in rpcrdma_req_setup()
855 r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz; in rpcrdma_req_setup()
865 req->rl_rdmabuf = rb; in rpcrdma_req_setup()
866 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb)); in rpcrdma_req_setup()
872 return -ENOMEM; in rpcrdma_req_setup()
882 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reqs_setup()
886 list_for_each_entry(req, &buf->rb_allreqs, rl_all) { in rpcrdma_reqs_setup()
897 req->rl_slot.rq_cong = 0; in rpcrdma_req_reset()
899 rpcrdma_regbuf_free(req->rl_rdmabuf); in rpcrdma_req_reset()
900 req->rl_rdmabuf = NULL; in rpcrdma_req_reset()
902 rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); in rpcrdma_req_reset()
903 rpcrdma_regbuf_dma_unmap(req->rl_recvbuf); in rpcrdma_req_reset()
915 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reqs_reset()
918 list_for_each_entry(req, &buf->rb_allreqs, rl_all) in rpcrdma_reqs_reset()
926 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_rep_create()
933 rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv, in rpcrdma_rep_create()
935 if (!rep->rr_rdmabuf) in rpcrdma_rep_create()
938 rep->rr_cid.ci_completion_id = in rpcrdma_rep_create()
939 atomic_inc_return(&r_xprt->rx_ep->re_completion_ids); in rpcrdma_rep_create()
941 xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), in rpcrdma_rep_create()
942 rdmab_length(rep->rr_rdmabuf)); in rpcrdma_rep_create()
943 rep->rr_cqe.done = rpcrdma_wc_receive; in rpcrdma_rep_create()
944 rep->rr_rxprt = r_xprt; in rpcrdma_rep_create()
945 rep->rr_recv_wr.next = NULL; in rpcrdma_rep_create()
946 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; in rpcrdma_rep_create()
947 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; in rpcrdma_rep_create()
948 rep->rr_recv_wr.num_sge = 1; in rpcrdma_rep_create()
949 rep->rr_temp = temp; in rpcrdma_rep_create()
951 spin_lock(&buf->rb_lock); in rpcrdma_rep_create()
952 list_add(&rep->rr_all, &buf->rb_all_reps); in rpcrdma_rep_create()
953 spin_unlock(&buf->rb_lock); in rpcrdma_rep_create()
964 rpcrdma_regbuf_free(rep->rr_rdmabuf); in rpcrdma_rep_free()
970 struct rpcrdma_buffer *buf = &rep->rr_rxprt->rx_buf; in rpcrdma_rep_destroy()
972 spin_lock(&buf->rb_lock); in rpcrdma_rep_destroy()
973 list_del(&rep->rr_all); in rpcrdma_rep_destroy()
974 spin_unlock(&buf->rb_lock); in rpcrdma_rep_destroy()
984 node = llist_del_first(&buf->rb_free_reps); in rpcrdma_rep_get_locked()
991 * rpcrdma_rep_put - Release rpcrdma_rep back to free list
998 llist_add(&rep->rr_node, &buf->rb_free_reps); in rpcrdma_rep_put()
1007 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reps_unmap()
1010 list_for_each_entry(rep, &buf->rb_all_reps, rr_all) { in rpcrdma_reps_unmap()
1011 rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf); in rpcrdma_reps_unmap()
1012 rep->rr_temp = true; /* Mark this rep for destruction */ in rpcrdma_reps_unmap()
1020 spin_lock(&buf->rb_lock); in rpcrdma_reps_destroy()
1021 while ((rep = list_first_entry_or_null(&buf->rb_all_reps, in rpcrdma_reps_destroy()
1024 list_del(&rep->rr_all); in rpcrdma_reps_destroy()
1025 spin_unlock(&buf->rb_lock); in rpcrdma_reps_destroy()
1029 spin_lock(&buf->rb_lock); in rpcrdma_reps_destroy()
1031 spin_unlock(&buf->rb_lock); in rpcrdma_reps_destroy()
1035 * rpcrdma_buffer_create - Create initial set of req/rep objects
1042 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_buffer_create()
1045 buf->rb_bc_srv_max_requests = 0; in rpcrdma_buffer_create()
1046 spin_lock_init(&buf->rb_lock); in rpcrdma_buffer_create()
1047 INIT_LIST_HEAD(&buf->rb_mrs); in rpcrdma_buffer_create()
1048 INIT_LIST_HEAD(&buf->rb_all_mrs); in rpcrdma_buffer_create()
1049 INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker); in rpcrdma_buffer_create()
1051 INIT_LIST_HEAD(&buf->rb_send_bufs); in rpcrdma_buffer_create()
1052 INIT_LIST_HEAD(&buf->rb_allreqs); in rpcrdma_buffer_create()
1053 INIT_LIST_HEAD(&buf->rb_all_reps); in rpcrdma_buffer_create()
1055 rc = -ENOMEM; in rpcrdma_buffer_create()
1056 for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) { in rpcrdma_buffer_create()
1063 list_add(&req->rl_list, &buf->rb_send_bufs); in rpcrdma_buffer_create()
1066 init_llist_head(&buf->rb_free_reps); in rpcrdma_buffer_create()
1075 * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1079 * removing req->rl_all from buf->rb_all_reqs safely.
1085 list_del(&req->rl_all); in rpcrdma_req_destroy()
1087 while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) { in rpcrdma_req_destroy()
1088 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; in rpcrdma_req_destroy()
1090 spin_lock(&buf->rb_lock); in rpcrdma_req_destroy()
1091 list_del(&mr->mr_all); in rpcrdma_req_destroy()
1092 spin_unlock(&buf->rb_lock); in rpcrdma_req_destroy()
1097 rpcrdma_regbuf_free(req->rl_recvbuf); in rpcrdma_req_destroy()
1098 rpcrdma_regbuf_free(req->rl_sendbuf); in rpcrdma_req_destroy()
1099 rpcrdma_regbuf_free(req->rl_rdmabuf); in rpcrdma_req_destroy()
1104 * rpcrdma_mrs_destroy - Release all of a transport's MRs
1108 * removing mr->mr_list from req->rl_free_mrs safely.
1112 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_destroy()
1115 cancel_work_sync(&buf->rb_refresh_worker); in rpcrdma_mrs_destroy()
1117 spin_lock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1118 while ((mr = list_first_entry_or_null(&buf->rb_all_mrs, in rpcrdma_mrs_destroy()
1121 list_del(&mr->mr_list); in rpcrdma_mrs_destroy()
1122 list_del(&mr->mr_all); in rpcrdma_mrs_destroy()
1123 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1127 spin_lock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1129 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1133 * rpcrdma_buffer_destroy - Release all hw resources
1137 * - No more Send or Receive completions can occur
1138 * - All MRs, reps, and reqs are returned to their free lists
1145 while (!list_empty(&buf->rb_send_bufs)) { in rpcrdma_buffer_destroy()
1148 req = list_first_entry(&buf->rb_send_bufs, in rpcrdma_buffer_destroy()
1150 list_del(&req->rl_list); in rpcrdma_buffer_destroy()
1156 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1165 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mr_get()
1168 spin_lock(&buf->rb_lock); in rpcrdma_mr_get()
1169 mr = rpcrdma_mr_pop(&buf->rb_mrs); in rpcrdma_mr_get()
1170 spin_unlock(&buf->rb_lock); in rpcrdma_mr_get()
1175 * rpcrdma_reply_put - Put reply buffers back into pool
1182 if (req->rl_reply) { in rpcrdma_reply_put()
1183 rpcrdma_rep_put(buffers, req->rl_reply); in rpcrdma_reply_put()
1184 req->rl_reply = NULL; in rpcrdma_reply_put()
1189 * rpcrdma_buffer_get - Get a request buffer
1199 spin_lock(&buffers->rb_lock); in rpcrdma_buffer_get()
1200 req = list_first_entry_or_null(&buffers->rb_send_bufs, in rpcrdma_buffer_get()
1203 list_del_init(&req->rl_list); in rpcrdma_buffer_get()
1204 spin_unlock(&buffers->rb_lock); in rpcrdma_buffer_get()
1209 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1218 spin_lock(&buffers->rb_lock); in rpcrdma_buffer_put()
1219 list_add(&req->rl_list, &buffers->rb_send_bufs); in rpcrdma_buffer_put()
1220 spin_unlock(&buffers->rb_lock); in rpcrdma_buffer_put()
1230 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction) in rpcrdma_regbuf_alloc() argument
1237 rb->rg_data = kmalloc(size, XPRTRDMA_GFP_FLAGS); in rpcrdma_regbuf_alloc()
1238 if (!rb->rg_data) { in rpcrdma_regbuf_alloc()
1243 rb->rg_device = NULL; in rpcrdma_regbuf_alloc()
1244 rb->rg_direction = direction; in rpcrdma_regbuf_alloc()
1245 rb->rg_iov.length = size; in rpcrdma_regbuf_alloc()
1250 * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1252 * @size: size of buffer to be allocated, in bytes
1258 bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags) in rpcrdma_regbuf_realloc() argument
1262 buf = kmalloc(size, flags); in rpcrdma_regbuf_realloc()
1267 kfree(rb->rg_data); in rpcrdma_regbuf_realloc()
1269 rb->rg_data = buf; in rpcrdma_regbuf_realloc()
1270 rb->rg_iov.length = size; in rpcrdma_regbuf_realloc()
1275 * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1284 struct ib_device *device = r_xprt->rx_ep->re_id->device; in __rpcrdma_regbuf_dma_map()
1286 if (rb->rg_direction == DMA_NONE) in __rpcrdma_regbuf_dma_map()
1289 rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb), in __rpcrdma_regbuf_dma_map()
1290 rdmab_length(rb), rb->rg_direction); in __rpcrdma_regbuf_dma_map()
1296 rb->rg_device = device; in __rpcrdma_regbuf_dma_map()
1297 rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey; in __rpcrdma_regbuf_dma_map()
1309 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb), in rpcrdma_regbuf_dma_unmap()
1310 rb->rg_direction); in rpcrdma_regbuf_dma_unmap()
1311 rb->rg_device = NULL; in rpcrdma_regbuf_dma_unmap()
1318 kfree(rb->rg_data); in rpcrdma_regbuf_free()
1323 * rpcrdma_post_recvs - Refill the Receive Queue
1331 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_post_recvs()
1332 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_post_recvs()
1340 if (likely(ep->re_receive_count > needed)) in rpcrdma_post_recvs()
1342 needed -= ep->re_receive_count; in rpcrdma_post_recvs()
1346 if (atomic_inc_return(&ep->re_receiving) > 1) in rpcrdma_post_recvs()
1353 if (rep && rep->rr_temp) { in rpcrdma_post_recvs()
1361 if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) { in rpcrdma_post_recvs()
1366 rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id; in rpcrdma_post_recvs()
1367 trace_xprtrdma_post_recv(&rep->rr_cid); in rpcrdma_post_recvs()
1368 rep->rr_recv_wr.next = wr; in rpcrdma_post_recvs()
1369 wr = &rep->rr_recv_wr; in rpcrdma_post_recvs()
1370 --needed; in rpcrdma_post_recvs()
1376 rc = ib_post_recv(ep->re_id->qp, wr, in rpcrdma_post_recvs()
1384 wr = wr->next; in rpcrdma_post_recvs()
1386 --count; in rpcrdma_post_recvs()
1389 if (atomic_dec_return(&ep->re_receiving) > 0) in rpcrdma_post_recvs()
1390 complete(&ep->re_done); in rpcrdma_post_recvs()
1394 ep->re_receive_count += count; in rpcrdma_post_recvs()