Lines Matching +full:rpc +full:- +full:if

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
10 * COPYING in the main directory of this source tree, or the BSD-type
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 * free pages (rq_pages) that will contain the incoming RPC message.
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
76 * of the RPC Call message, using the RDMA Read sink pages kept in
79 * the length of the completed RPC Call message.
121 int node = ibdev_to_node(rdma->sc_cm_id->device); in svc_rdma_recv_ctxt_alloc()
127 if (!ctxt) in svc_rdma_recv_ctxt_alloc()
129 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node); in svc_rdma_recv_ctxt_alloc()
130 if (!buffer) in svc_rdma_recv_ctxt_alloc()
132 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, in svc_rdma_recv_ctxt_alloc()
133 rdma->sc_max_req_size, DMA_FROM_DEVICE); in svc_rdma_recv_ctxt_alloc()
134 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) in svc_rdma_recv_ctxt_alloc()
137 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); in svc_rdma_recv_ctxt_alloc()
138 pcl_init(&ctxt->rc_call_pcl); in svc_rdma_recv_ctxt_alloc()
139 pcl_init(&ctxt->rc_read_pcl); in svc_rdma_recv_ctxt_alloc()
140 pcl_init(&ctxt->rc_write_pcl); in svc_rdma_recv_ctxt_alloc()
141 pcl_init(&ctxt->rc_reply_pcl); in svc_rdma_recv_ctxt_alloc()
143 ctxt->rc_recv_wr.next = NULL; in svc_rdma_recv_ctxt_alloc()
144 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc()
145 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; in svc_rdma_recv_ctxt_alloc()
146 ctxt->rc_recv_wr.num_sge = 1; in svc_rdma_recv_ctxt_alloc()
147 ctxt->rc_cqe.done = svc_rdma_wc_receive; in svc_rdma_recv_ctxt_alloc()
148 ctxt->rc_recv_sge.addr = addr; in svc_rdma_recv_ctxt_alloc()
149 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; in svc_rdma_recv_ctxt_alloc()
150 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey; in svc_rdma_recv_ctxt_alloc()
151 ctxt->rc_recv_buf = buffer; in svc_rdma_recv_ctxt_alloc()
152 svc_rdma_cc_init(rdma, &ctxt->rc_cc); in svc_rdma_recv_ctxt_alloc()
166 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr, in svc_rdma_recv_ctxt_destroy()
167 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE); in svc_rdma_recv_ctxt_destroy()
168 kfree(ctxt->rc_recv_buf); in svc_rdma_recv_ctxt_destroy()
173 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
182 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) { in svc_rdma_recv_ctxts_destroy()
189 * svc_rdma_recv_ctxt_get - Allocate a recv_ctxt
192 * Returns a recv_ctxt or (rarely) NULL if none are available.
199 node = llist_del_first(&rdma->sc_recv_ctxts); in svc_rdma_recv_ctxt_get()
200 if (!node) in svc_rdma_recv_ctxt_get()
204 ctxt->rc_page_count = 0; in svc_rdma_recv_ctxt_get()
209 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
217 svc_rdma_cc_release(rdma, &ctxt->rc_cc, DMA_FROM_DEVICE); in svc_rdma_recv_ctxt_put()
222 release_pages(ctxt->rc_pages, ctxt->rc_page_count); in svc_rdma_recv_ctxt_put()
224 pcl_free(&ctxt->rc_call_pcl); in svc_rdma_recv_ctxt_put()
225 pcl_free(&ctxt->rc_read_pcl); in svc_rdma_recv_ctxt_put()
226 pcl_free(&ctxt->rc_write_pcl); in svc_rdma_recv_ctxt_put()
227 pcl_free(&ctxt->rc_reply_pcl); in svc_rdma_recv_ctxt_put()
229 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts); in svc_rdma_recv_ctxt_put()
233 * svc_rdma_release_ctxt - Release transport-specific per-rqst resources
235 * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
239 * or svc_process could drop an RPC, before the Reply is sent.
247 if (ctxt) in svc_rdma_release_ctxt()
259 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) in svc_rdma_refresh_recvs()
263 while (wanted--) { in svc_rdma_refresh_recvs()
265 if (!ctxt) in svc_rdma_refresh_recvs()
268 trace_svcrdma_post_recv(&ctxt->rc_cid); in svc_rdma_refresh_recvs()
269 ctxt->rc_recv_wr.next = recv_chain; in svc_rdma_refresh_recvs()
270 recv_chain = &ctxt->rc_recv_wr; in svc_rdma_refresh_recvs()
271 rdma->sc_pending_recvs++; in svc_rdma_refresh_recvs()
273 if (!recv_chain) in svc_rdma_refresh_recvs()
276 ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr); in svc_rdma_refresh_recvs()
277 if (ret) in svc_rdma_refresh_recvs()
286 bad_wr = bad_wr->next; in svc_rdma_refresh_recvs()
295 * svc_rdma_post_recvs - Post initial set of Recv WRs
307 * posted Receive and one RPC in process. in svc_rdma_post_recvs()
309 total = (rdma->sc_max_requests * 2) + rdma->sc_recv_batch; in svc_rdma_post_recvs()
310 while (total--) { in svc_rdma_post_recvs()
314 if (!ctxt) in svc_rdma_post_recvs()
316 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts); in svc_rdma_post_recvs()
319 return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests); in svc_rdma_post_recvs()
323 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
330 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_receive()
331 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_receive()
334 rdma->sc_pending_recvs--; in svc_rdma_wc_receive()
336 /* WARNING: Only wc->wr_cqe and wc->status are reliable */ in svc_rdma_wc_receive()
339 if (wc->status != IB_WC_SUCCESS) in svc_rdma_wc_receive()
341 trace_svcrdma_wc_recv(wc, &ctxt->rc_cid); in svc_rdma_wc_receive()
343 /* If receive posting fails, the connection is about to be in svc_rdma_wc_receive()
345 * for this RPC, and the client will retransmit this RPC in svc_rdma_wc_receive()
348 * Therefore we drop the Receive, even if status was SUCCESS in svc_rdma_wc_receive()
352 if (rdma->sc_pending_recvs < rdma->sc_max_requests) in svc_rdma_wc_receive()
353 if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch)) in svc_rdma_wc_receive()
357 ctxt->rc_byte_len = wc->byte_len; in svc_rdma_wc_receive()
359 spin_lock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_receive()
360 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q); in svc_rdma_wc_receive()
362 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); in svc_rdma_wc_receive()
363 spin_unlock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_receive()
364 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags)) in svc_rdma_wc_receive()
365 svc_xprt_enqueue(&rdma->sc_xprt); in svc_rdma_wc_receive()
369 if (wc->status == IB_WC_WR_FLUSH_ERR) in svc_rdma_wc_receive()
370 trace_svcrdma_wc_recv_flush(wc, &ctxt->rc_cid); in svc_rdma_wc_receive()
372 trace_svcrdma_wc_recv_err(wc, &ctxt->rc_cid); in svc_rdma_wc_receive()
375 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_wc_receive()
379 * svc_rdma_flush_recv_queues - Drain pending Receive work
387 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) { in svc_rdma_flush_recv_queues()
388 list_del(&ctxt->rc_list); in svc_rdma_flush_recv_queues()
391 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) { in svc_rdma_flush_recv_queues()
392 list_del(&ctxt->rc_list); in svc_rdma_flush_recv_queues()
400 struct xdr_buf *arg = &rqstp->rq_arg; in svc_rdma_build_arg_xdr()
402 arg->head[0].iov_base = ctxt->rc_recv_buf; in svc_rdma_build_arg_xdr()
403 arg->head[0].iov_len = ctxt->rc_byte_len; in svc_rdma_build_arg_xdr()
404 arg->tail[0].iov_base = NULL; in svc_rdma_build_arg_xdr()
405 arg->tail[0].iov_len = 0; in svc_rdma_build_arg_xdr()
406 arg->page_len = 0; in svc_rdma_build_arg_xdr()
407 arg->page_base = 0; in svc_rdma_build_arg_xdr()
408 arg->buflen = ctxt->rc_byte_len; in svc_rdma_build_arg_xdr()
409 arg->len = ctxt->rc_byte_len; in svc_rdma_build_arg_xdr()
413 * xdr_count_read_segments - Count number of Read segments in Read list
415 * @p: Start of an un-decoded Read list
434 rctxt->rc_call_pcl.cl_count = 0; in xdr_count_read_segments()
435 rctxt->rc_read_pcl.cl_count = 0; in xdr_count_read_segments()
440 p = xdr_inline_decode(&rctxt->rc_stream, in xdr_count_read_segments()
442 if (!p) in xdr_count_read_segments()
447 if (position) { in xdr_count_read_segments()
448 if (position & 3) in xdr_count_read_segments()
450 ++rctxt->rc_read_pcl.cl_count; in xdr_count_read_segments()
452 ++rctxt->rc_call_pcl.cl_count; in xdr_count_read_segments()
455 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_count_read_segments()
456 if (!p) in xdr_count_read_segments()
465 * - Read list does not overflow Receive buffer.
466 * - Chunk size limited by largest NFS data payload.
478 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_check_read_list()
479 if (!p) in xdr_check_read_list()
481 if (!xdr_count_read_segments(rctxt, p)) in xdr_check_read_list()
483 if (!pcl_alloc_call(rctxt, p)) in xdr_check_read_list()
493 if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount)) in xdr_check_write_chunk()
497 p = xdr_inline_decode(&rctxt->rc_stream, in xdr_check_write_chunk()
503 * xdr_count_write_chunks - Count number of Write chunks in Write list
505 * @p: start of an un-decoded Write list
519 rctxt->rc_write_pcl.cl_count = 0; in xdr_count_write_chunks()
521 if (!xdr_check_write_chunk(rctxt)) in xdr_count_write_chunks()
523 ++rctxt->rc_write_pcl.cl_count; in xdr_count_write_chunks()
524 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_count_write_chunks()
525 if (!p) in xdr_count_write_chunks()
534 * - This implementation currently supports only one Write chunk.
537 * - Write list does not overflow Receive buffer.
538 * - Chunk size limited by largest NFS data payload.
550 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_check_write_list()
551 if (!p) in xdr_check_write_list()
553 if (!xdr_count_write_chunks(rctxt, p)) in xdr_check_write_list()
555 if (!pcl_alloc_write(rctxt, &rctxt->rc_write_pcl, p)) in xdr_check_write_list()
558 rctxt->rc_cur_result_payload = pcl_first_chunk(&rctxt->rc_write_pcl); in xdr_check_write_list()
565 * - Reply chunk does not overflow Receive buffer.
566 * - Chunk size limited by largest NFS data payload.
578 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_check_reply_chunk()
579 if (!p) in xdr_check_reply_chunk()
582 if (!xdr_item_is_present(p)) in xdr_check_reply_chunk()
584 if (!xdr_check_write_chunk(rctxt)) in xdr_check_reply_chunk()
587 rctxt->rc_reply_pcl.cl_count = 1; in xdr_check_reply_chunk()
588 return pcl_alloc_write(rctxt, &rctxt->rc_reply_pcl, p); in xdr_check_reply_chunk()
591 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
595 * If there is exactly one distinct R_key in the received transport
605 ctxt->rc_inv_rkey = 0; in svc_rdma_get_inv_rkey()
607 if (!rdma->sc_snd_w_inv) in svc_rdma_get_inv_rkey()
611 pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) { in svc_rdma_get_inv_rkey()
613 if (inv_rkey == 0) in svc_rdma_get_inv_rkey()
614 inv_rkey = segment->rs_handle; in svc_rdma_get_inv_rkey()
615 else if (inv_rkey != segment->rs_handle) in svc_rdma_get_inv_rkey()
619 pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) { in svc_rdma_get_inv_rkey()
621 if (inv_rkey == 0) in svc_rdma_get_inv_rkey()
622 inv_rkey = segment->rs_handle; in svc_rdma_get_inv_rkey()
623 else if (inv_rkey != segment->rs_handle) in svc_rdma_get_inv_rkey()
627 pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) { in svc_rdma_get_inv_rkey()
629 if (inv_rkey == 0) in svc_rdma_get_inv_rkey()
630 inv_rkey = segment->rs_handle; in svc_rdma_get_inv_rkey()
631 else if (inv_rkey != segment->rs_handle) in svc_rdma_get_inv_rkey()
635 pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) { in svc_rdma_get_inv_rkey()
637 if (inv_rkey == 0) in svc_rdma_get_inv_rkey()
638 inv_rkey = segment->rs_handle; in svc_rdma_get_inv_rkey()
639 else if (inv_rkey != segment->rs_handle) in svc_rdma_get_inv_rkey()
643 ctxt->rc_inv_rkey = inv_rkey; in svc_rdma_get_inv_rkey()
647 * svc_rdma_xdr_decode_req - Decode the transport header
648 * @rq_arg: xdr_buf containing ingress RPC/RDMA message
651 * On entry, xdr->head[0].iov_base points to first byte of the
652 * RPC-over-RDMA transport header.
655 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
657 * The length of the RPC-over-RDMA header is returned.
660 * - The transport header is entirely contained in the head iovec.
668 rdma_argp = rq_arg->head[0].iov_base; in svc_rdma_xdr_decode_req()
669 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL); in svc_rdma_xdr_decode_req()
671 p = xdr_inline_decode(&rctxt->rc_stream, in svc_rdma_xdr_decode_req()
673 if (unlikely(!p)) in svc_rdma_xdr_decode_req()
676 if (*p != rpcrdma_version) in svc_rdma_xdr_decode_req()
679 rctxt->rc_msgtype = *p; in svc_rdma_xdr_decode_req()
680 switch (rctxt->rc_msgtype) { in svc_rdma_xdr_decode_req()
693 if (!xdr_check_read_list(rctxt)) in svc_rdma_xdr_decode_req()
695 if (!xdr_check_write_list(rctxt)) in svc_rdma_xdr_decode_req()
697 if (!xdr_check_reply_chunk(rctxt)) in svc_rdma_xdr_decode_req()
700 rq_arg->head[0].iov_base = rctxt->rc_stream.p; in svc_rdma_xdr_decode_req()
701 hdr_len = xdr_stream_pos(&rctxt->rc_stream); in svc_rdma_xdr_decode_req()
702 rq_arg->head[0].iov_len -= hdr_len; in svc_rdma_xdr_decode_req()
703 rq_arg->len -= hdr_len; in svc_rdma_xdr_decode_req()
708 trace_svcrdma_decode_short_err(rctxt, rq_arg->len); in svc_rdma_xdr_decode_req()
709 return -EINVAL; in svc_rdma_xdr_decode_req()
713 return -EPROTONOSUPPORT; in svc_rdma_xdr_decode_req()
721 return -EINVAL; in svc_rdma_xdr_decode_req()
725 return -EINVAL; in svc_rdma_xdr_decode_req()
735 if (!sctxt) in svc_rdma_send_error()
742 * the RPC/RDMA header small and fixed in size, so it is
743 * straightforward to check the RPC header's direction field.
748 __be32 *p = rctxt->rc_recv_buf; in svc_rdma_is_reverse_direction_reply()
750 if (!xprt->xpt_bc_xprt) in svc_rdma_is_reverse_direction_reply()
753 if (rctxt->rc_msgtype != rdma_msg) in svc_rdma_is_reverse_direction_reply()
756 if (!pcl_is_empty(&rctxt->rc_call_pcl)) in svc_rdma_is_reverse_direction_reply()
758 if (!pcl_is_empty(&rctxt->rc_read_pcl)) in svc_rdma_is_reverse_direction_reply()
760 if (!pcl_is_empty(&rctxt->rc_write_pcl)) in svc_rdma_is_reverse_direction_reply()
762 if (!pcl_is_empty(&rctxt->rc_reply_pcl)) in svc_rdma_is_reverse_direction_reply()
765 /* RPC call direction */ in svc_rdma_is_reverse_direction_reply()
766 if (*(p + 8) == cpu_to_be32(RPC_CALL)) in svc_rdma_is_reverse_direction_reply()
772 /* Finish constructing the RPC Call message in rqstp::rq_arg.
774 * The incoming RPC/RDMA message is an RDMA_MSG type message
781 struct svc_rdma_chunk *chunk = pcl_first_chunk(&ctxt->rc_read_pcl); in svc_rdma_read_complete_one()
782 struct xdr_buf *buf = &rqstp->rq_arg; in svc_rdma_read_complete_one()
790 buf->tail[0].iov_base = buf->head[0].iov_base + chunk->ch_position; in svc_rdma_read_complete_one()
791 buf->tail[0].iov_len = buf->head[0].iov_len - chunk->ch_position; in svc_rdma_read_complete_one()
792 buf->head[0].iov_len = chunk->ch_position; in svc_rdma_read_complete_one()
796 * If the client already rounded up the chunk length, the in svc_rdma_read_complete_one()
798 * list is increased to include XDR round-up. in svc_rdma_read_complete_one()
801 * thus the rounded-up length never crosses a page boundary. in svc_rdma_read_complete_one()
803 buf->pages = &rqstp->rq_pages[0]; in svc_rdma_read_complete_one()
804 length = xdr_align_size(chunk->ch_length); in svc_rdma_read_complete_one()
805 buf->page_len = length; in svc_rdma_read_complete_one()
806 buf->len += length; in svc_rdma_read_complete_one()
807 buf->buflen += length; in svc_rdma_read_complete_one()
810 /* Finish constructing the RPC Call message in rqstp::rq_arg.
812 * The incoming RPC/RDMA message is an RDMA_MSG type message
818 struct xdr_buf *buf = &rqstp->rq_arg; in svc_rdma_read_complete_multiple()
820 buf->len += ctxt->rc_readbytes; in svc_rdma_read_complete_multiple()
821 buf->buflen += ctxt->rc_readbytes; in svc_rdma_read_complete_multiple()
823 buf->head[0].iov_base = page_address(rqstp->rq_pages[0]); in svc_rdma_read_complete_multiple()
824 buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes); in svc_rdma_read_complete_multiple()
825 buf->pages = &rqstp->rq_pages[1]; in svc_rdma_read_complete_multiple()
826 buf->page_len = ctxt->rc_readbytes - buf->head[0].iov_len; in svc_rdma_read_complete_multiple()
829 /* Finish constructing the RPC Call message in rqstp::rq_arg.
831 * The incoming RPC/RDMA message is an RDMA_NOMSG type message
832 * (the RPC message body was conveyed via RDMA Read).
837 struct xdr_buf *buf = &rqstp->rq_arg; in svc_rdma_read_complete_pzrc()
839 buf->len += ctxt->rc_readbytes; in svc_rdma_read_complete_pzrc()
840 buf->buflen += ctxt->rc_readbytes; in svc_rdma_read_complete_pzrc()
842 buf->head[0].iov_base = page_address(rqstp->rq_pages[0]); in svc_rdma_read_complete_pzrc()
843 buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes); in svc_rdma_read_complete_pzrc()
844 buf->pages = &rqstp->rq_pages[1]; in svc_rdma_read_complete_pzrc()
845 buf->page_len = ctxt->rc_readbytes - buf->head[0].iov_len; in svc_rdma_read_complete_pzrc()
856 release_pages(rqstp->rq_respages, ctxt->rc_page_count); in svc_rdma_read_complete()
857 for (i = 0; i < ctxt->rc_page_count; i++) in svc_rdma_read_complete()
858 rqstp->rq_pages[i] = ctxt->rc_pages[i]; in svc_rdma_read_complete()
863 rqstp->rq_respages = &rqstp->rq_pages[ctxt->rc_page_count]; in svc_rdma_read_complete()
864 rqstp->rq_next_page = rqstp->rq_respages + 1; in svc_rdma_read_complete()
869 ctxt->rc_page_count = 0; in svc_rdma_read_complete()
871 /* Finish constructing the RPC Call message. The exact in svc_rdma_read_complete()
872 * procedure for that depends on what kind of RPC/RDMA in svc_rdma_read_complete()
875 rqstp->rq_arg = ctxt->rc_saved_arg; in svc_rdma_read_complete()
876 if (pcl_is_empty(&ctxt->rc_call_pcl)) { in svc_rdma_read_complete()
877 if (ctxt->rc_read_pcl.cl_count == 1) in svc_rdma_read_complete()
885 trace_svcrdma_read_finished(&ctxt->rc_cid); in svc_rdma_read_complete()
889 * svc_rdma_recvfrom - Receive an RPC call
890 * @rqstp: request structure into which to receive an RPC Call
893 * The positive number of bytes in the RPC Call message,
894 * %0 if there were no Calls ready to return,
895 * %-EINVAL if the Read chunk data is too large,
896 * %-ENOMEM if rdma_rw context pool was exhausted,
897 * %-ENOTCONN if posting failed (connection is lost),
898 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
905 * - If the ctxt completes a Receive, then construct the Call
908 * - If there are no Read chunks in this message, then finish
912 * - If there are Read chunks in this message, post Read WRs to
918 struct svc_xprt *xprt = rqstp->rq_xprt; in svc_rdma_recvfrom()
927 rqstp->rq_respages = rqstp->rq_pages; in svc_rdma_recvfrom()
928 rqstp->rq_next_page = rqstp->rq_respages; in svc_rdma_recvfrom()
930 rqstp->rq_xprt_ctxt = NULL; in svc_rdma_recvfrom()
932 spin_lock(&rdma_xprt->sc_rq_dto_lock); in svc_rdma_recvfrom()
933 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q); in svc_rdma_recvfrom()
934 if (ctxt) { in svc_rdma_recvfrom()
935 list_del(&ctxt->rc_list); in svc_rdma_recvfrom()
936 spin_unlock(&rdma_xprt->sc_rq_dto_lock); in svc_rdma_recvfrom()
941 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q); in svc_rdma_recvfrom()
942 if (ctxt) in svc_rdma_recvfrom()
943 list_del(&ctxt->rc_list); in svc_rdma_recvfrom()
946 clear_bit(XPT_DATA, &xprt->xpt_flags); in svc_rdma_recvfrom()
947 spin_unlock(&rdma_xprt->sc_rq_dto_lock); in svc_rdma_recvfrom()
951 if (!ctxt) in svc_rdma_recvfrom()
955 ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device, in svc_rdma_recvfrom()
956 ctxt->rc_recv_sge.addr, ctxt->rc_byte_len, in svc_rdma_recvfrom()
960 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt); in svc_rdma_recvfrom()
961 if (ret < 0) in svc_rdma_recvfrom()
963 if (ret == 0) in svc_rdma_recvfrom()
966 if (svc_rdma_is_reverse_direction_reply(xprt, ctxt)) in svc_rdma_recvfrom()
971 if (!pcl_is_empty(&ctxt->rc_read_pcl) || in svc_rdma_recvfrom()
972 !pcl_is_empty(&ctxt->rc_call_pcl)) in svc_rdma_recvfrom()
976 rqstp->rq_xprt_ctxt = ctxt; in svc_rdma_recvfrom()
977 rqstp->rq_prot = IPPROTO_MAX; in svc_rdma_recvfrom()
979 set_bit(RQ_SECURE, &rqstp->rq_flags); in svc_rdma_recvfrom()
980 return rqstp->rq_arg.len; in svc_rdma_recvfrom()
993 ctxt->rc_saved_arg = rqstp->rq_arg; in svc_rdma_recvfrom()
996 if (ret < 0) { in svc_rdma_recvfrom()
997 if (ret == -EINVAL) in svc_rdma_recvfrom()