Lines Matching +full:rpc +full:- +full:if

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
10 * COPYING in the main directory of this source tree, or the BSD-type
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 * RPC server when an RPC Reply is ready to be transmitted to a client.
50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
53 * a Send WR conveying the transport header and the RPC message itself to
75 * DMA-unmap the pages under I/O for that Write segment. The Write
88 * are still DMA-mapped.
92 * - If the Send WR is posted successfully, it will either complete
95 * - If the Send WR cannot be not posted, the forward path releases
119 int node = ibdev_to_node(rdma->sc_cm_id->device); in svc_rdma_send_ctxt_alloc()
125 ctxt = kzalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges), in svc_rdma_send_ctxt_alloc()
127 if (!ctxt) in svc_rdma_send_ctxt_alloc()
129 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node); in svc_rdma_send_ctxt_alloc()
130 if (!buffer) in svc_rdma_send_ctxt_alloc()
132 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, in svc_rdma_send_ctxt_alloc()
133 rdma->sc_max_req_size, DMA_TO_DEVICE); in svc_rdma_send_ctxt_alloc()
134 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) in svc_rdma_send_ctxt_alloc()
137 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); in svc_rdma_send_ctxt_alloc()
139 ctxt->sc_rdma = rdma; in svc_rdma_send_ctxt_alloc()
140 ctxt->sc_send_wr.next = NULL; in svc_rdma_send_ctxt_alloc()
141 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc()
142 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; in svc_rdma_send_ctxt_alloc()
143 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; in svc_rdma_send_ctxt_alloc()
144 ctxt->sc_cqe.done = svc_rdma_wc_send; in svc_rdma_send_ctxt_alloc()
145 ctxt->sc_xprt_buf = buffer; in svc_rdma_send_ctxt_alloc()
146 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf, in svc_rdma_send_ctxt_alloc()
147 rdma->sc_max_req_size); in svc_rdma_send_ctxt_alloc()
148 ctxt->sc_sges[0].addr = addr; in svc_rdma_send_ctxt_alloc()
150 for (i = 0; i < rdma->sc_max_send_sges; i++) in svc_rdma_send_ctxt_alloc()
151 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey; in svc_rdma_send_ctxt_alloc()
163 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
172 while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) { in svc_rdma_send_ctxts_destroy()
174 ib_dma_unmap_single(rdma->sc_pd->device, in svc_rdma_send_ctxts_destroy()
175 ctxt->sc_sges[0].addr, in svc_rdma_send_ctxts_destroy()
176 rdma->sc_max_req_size, in svc_rdma_send_ctxts_destroy()
178 kfree(ctxt->sc_xprt_buf); in svc_rdma_send_ctxts_destroy()
184 * svc_rdma_send_ctxt_get - Get a free send_ctxt
187 * Returns a ready-to-use send_ctxt, or NULL if none are
195 spin_lock(&rdma->sc_send_lock); in svc_rdma_send_ctxt_get()
196 node = llist_del_first(&rdma->sc_send_ctxts); in svc_rdma_send_ctxt_get()
197 spin_unlock(&rdma->sc_send_lock); in svc_rdma_send_ctxt_get()
198 if (!node) in svc_rdma_send_ctxt_get()
204 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0); in svc_rdma_send_ctxt_get()
205 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, in svc_rdma_send_ctxt_get()
206 ctxt->sc_xprt_buf, NULL); in svc_rdma_send_ctxt_get()
208 ctxt->sc_send_wr.num_sge = 0; in svc_rdma_send_ctxt_get()
209 ctxt->sc_cur_sge_no = 0; in svc_rdma_send_ctxt_get()
210 ctxt->sc_page_count = 0; in svc_rdma_send_ctxt_get()
215 if (!ctxt) in svc_rdma_send_ctxt_get()
223 struct ib_device *device = rdma->sc_cm_id->device; in svc_rdma_send_ctxt_release()
226 if (ctxt->sc_page_count) in svc_rdma_send_ctxt_release()
227 release_pages(ctxt->sc_pages, ctxt->sc_page_count); in svc_rdma_send_ctxt_release()
232 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) { in svc_rdma_send_ctxt_release()
233 trace_svcrdma_dma_unmap_page(&ctxt->sc_cid, in svc_rdma_send_ctxt_release()
234 ctxt->sc_sges[i].addr, in svc_rdma_send_ctxt_release()
235 ctxt->sc_sges[i].length); in svc_rdma_send_ctxt_release()
237 ctxt->sc_sges[i].addr, in svc_rdma_send_ctxt_release()
238 ctxt->sc_sges[i].length, in svc_rdma_send_ctxt_release()
242 llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts); in svc_rdma_send_ctxt_release()
250 svc_rdma_send_ctxt_release(ctxt->sc_rdma, ctxt); in svc_rdma_send_ctxt_put_async()
254 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
263 INIT_WORK(&ctxt->sc_work, svc_rdma_send_ctxt_put_async); in svc_rdma_send_ctxt_put()
264 queue_work(svcrdma_wq, &ctxt->sc_work); in svc_rdma_send_ctxt_put()
268 * svc_rdma_wake_send_waiters - manage Send Queue accounting
275 atomic_add(avail, &rdma->sc_sq_avail); in svc_rdma_wake_send_waiters()
277 if (unlikely(waitqueue_active(&rdma->sc_send_wait))) in svc_rdma_wake_send_waiters()
278 wake_up(&rdma->sc_send_wait); in svc_rdma_wake_send_waiters()
282 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
291 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_send()
292 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_send()
298 if (unlikely(wc->status != IB_WC_SUCCESS)) in svc_rdma_wc_send()
301 trace_svcrdma_wc_send(&ctxt->sc_cid); in svc_rdma_wc_send()
306 if (wc->status != IB_WC_WR_FLUSH_ERR) in svc_rdma_wc_send()
307 trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid); in svc_rdma_wc_send()
309 trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid); in svc_rdma_wc_send()
311 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_wc_send()
315 * svc_rdma_send - Post a single Send WR
319 * Returns zero if the Send WR was posted successfully. Otherwise, a
324 struct ib_send_wr *wr = &ctxt->sc_send_wr; in svc_rdma_send()
330 ib_dma_sync_single_for_device(rdma->sc_pd->device, in svc_rdma_send()
331 wr->sg_list[0].addr, in svc_rdma_send()
332 wr->sg_list[0].length, in svc_rdma_send()
335 /* If the SQ is full, wait until an SQ entry is available */ in svc_rdma_send()
337 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) { in svc_rdma_send()
339 trace_svcrdma_sq_full(rdma, &ctxt->sc_cid); in svc_rdma_send()
340 atomic_inc(&rdma->sc_sq_avail); in svc_rdma_send()
341 wait_event(rdma->sc_send_wait, in svc_rdma_send()
342 atomic_read(&rdma->sc_sq_avail) > 1); in svc_rdma_send()
343 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) in svc_rdma_send()
344 return -ENOTCONN; in svc_rdma_send()
345 trace_svcrdma_sq_retry(rdma, &ctxt->sc_cid); in svc_rdma_send()
350 ret = ib_post_send(rdma->sc_qp, wr, NULL); in svc_rdma_send()
351 if (ret) in svc_rdma_send()
356 trace_svcrdma_sq_post_err(rdma, &ctxt->sc_cid, ret); in svc_rdma_send()
357 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_send()
358 wake_up(&rdma->sc_send_wait); in svc_rdma_send()
363 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
364 * @sctxt: Send context for the RPC Reply
369 * %-EMSGSIZE on XDR buffer overflow
373 /* RPC-over-RDMA version 1 replies never have a Read list. */ in svc_rdma_encode_read_list()
374 return xdr_stream_encode_item_absent(&sctxt->sc_stream); in svc_rdma_encode_read_list()
378 * svc_rdma_encode_write_segment - Encode one Write segment
379 * @sctxt: Send context for the RPC Reply
387 * %-EMSGSIZE on XDR buffer overflow
393 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno]; in svc_rdma_encode_write_segment()
398 p = xdr_reserve_space(&sctxt->sc_stream, len); in svc_rdma_encode_write_segment()
399 if (!p) in svc_rdma_encode_write_segment()
400 return -EMSGSIZE; in svc_rdma_encode_write_segment()
402 length = min_t(u32, *remaining, segment->rs_length); in svc_rdma_encode_write_segment()
403 *remaining -= length; in svc_rdma_encode_write_segment()
404 xdr_encode_rdma_segment(p, segment->rs_handle, length, in svc_rdma_encode_write_segment()
405 segment->rs_offset); in svc_rdma_encode_write_segment()
406 trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length, in svc_rdma_encode_write_segment()
407 segment->rs_offset); in svc_rdma_encode_write_segment()
412 * svc_rdma_encode_write_chunk - Encode one Write chunk
413 * @sctxt: Send context for the RPC Reply
423 * %-EMSGSIZE on XDR buffer overflow
428 u32 remaining = chunk->ch_payload_length; in svc_rdma_encode_write_chunk()
433 ret = xdr_stream_encode_item_present(&sctxt->sc_stream); in svc_rdma_encode_write_chunk()
434 if (ret < 0) in svc_rdma_encode_write_chunk()
438 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount); in svc_rdma_encode_write_chunk()
439 if (ret < 0) in svc_rdma_encode_write_chunk()
443 for (segno = 0; segno < chunk->ch_segcount; segno++) { in svc_rdma_encode_write_chunk()
445 if (ret < 0) in svc_rdma_encode_write_chunk()
454 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
455 * @rctxt: Reply context with information about the RPC Call
456 * @sctxt: Send context for the RPC Reply
461 * %-EMSGSIZE on XDR buffer overflow
470 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) { in svc_rdma_encode_write_list()
472 if (ret < 0) in svc_rdma_encode_write_list()
478 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream); in svc_rdma_encode_write_list()
479 if (ret < 0) in svc_rdma_encode_write_list()
486 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
487 * @rctxt: Reply context with information about the RPC Call
488 * @sctxt: Send context for the RPC Reply
494 * %-EMSGSIZE on XDR buffer overflow
495 * %-E2BIG if the RPC message is larger than the Reply chunk
504 if (pcl_is_empty(&rctxt->rc_reply_pcl)) in svc_rdma_encode_reply_chunk()
505 return xdr_stream_encode_item_absent(&sctxt->sc_stream); in svc_rdma_encode_reply_chunk()
507 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl); in svc_rdma_encode_reply_chunk()
508 if (length > chunk->ch_length) in svc_rdma_encode_reply_chunk()
509 return -E2BIG; in svc_rdma_encode_reply_chunk()
511 chunk->ch_payload_length = length; in svc_rdma_encode_reply_chunk()
521 * svc_rdma_page_dma_map - DMA map one page
528 * %0 if DMA mapping was successful
529 * %-EIO if the page cannot be DMA mapped
535 struct svcxprt_rdma *rdma = args->md_rdma; in svc_rdma_page_dma_map()
536 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt; in svc_rdma_page_dma_map()
537 struct ib_device *dev = rdma->sc_cm_id->device; in svc_rdma_page_dma_map()
540 ++ctxt->sc_cur_sge_no; in svc_rdma_page_dma_map()
543 if (ib_dma_mapping_error(dev, dma_addr)) in svc_rdma_page_dma_map()
546 trace_svcrdma_dma_map_page(&ctxt->sc_cid, dma_addr, len); in svc_rdma_page_dma_map()
547 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr; in svc_rdma_page_dma_map()
548 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len; in svc_rdma_page_dma_map()
549 ctxt->sc_send_wr.num_sge++; in svc_rdma_page_dma_map()
553 trace_svcrdma_dma_map_err(&ctxt->sc_cid, dma_addr, len); in svc_rdma_page_dma_map()
554 return -EIO; in svc_rdma_page_dma_map()
558 * svc_rdma_iov_dma_map - DMA map an iovec
563 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
566 * %0 if DMA mapping was successful
567 * %-EIO if the iovec cannot be DMA mapped
571 if (!iov->iov_len) in svc_rdma_iov_dma_map()
573 return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base), in svc_rdma_iov_dma_map()
574 offset_in_page(iov->iov_base), in svc_rdma_iov_dma_map()
575 iov->iov_len); in svc_rdma_iov_dma_map()
579 * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf
580 * @xdr: xdr_buf containing portion of an RPC message to transmit
584 * %0 if DMA mapping was successful
585 * %-EIO if DMA mapping failed
597 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]); in svc_rdma_xb_dma_map()
598 if (ret < 0) in svc_rdma_xb_dma_map()
601 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); in svc_rdma_xb_dma_map()
602 pageoff = offset_in_page(xdr->page_base); in svc_rdma_xb_dma_map()
603 remaining = xdr->page_len; in svc_rdma_xb_dma_map()
605 len = min_t(u32, PAGE_SIZE - pageoff, remaining); in svc_rdma_xb_dma_map()
608 if (ret < 0) in svc_rdma_xb_dma_map()
611 remaining -= len; in svc_rdma_xb_dma_map()
615 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]); in svc_rdma_xb_dma_map()
616 if (ret < 0) in svc_rdma_xb_dma_map()
619 return xdr->len; in svc_rdma_xb_dma_map()
629 * svc_rdma_xb_count_sges - Count how many SGEs will be needed
630 * @xdr: xdr_buf containing portion of an RPC message to transmit
643 if (xdr->head[0].iov_len) in svc_rdma_xb_count_sges()
644 ++args->pd_num_sges; in svc_rdma_xb_count_sges()
646 offset = offset_in_page(xdr->page_base); in svc_rdma_xb_count_sges()
647 remaining = xdr->page_len; in svc_rdma_xb_count_sges()
649 ++args->pd_num_sges; in svc_rdma_xb_count_sges()
650 remaining -= min_t(u32, PAGE_SIZE - offset, remaining); in svc_rdma_xb_count_sges()
654 if (xdr->tail[0].iov_len) in svc_rdma_xb_count_sges()
655 ++args->pd_num_sges; in svc_rdma_xb_count_sges()
657 args->pd_length += xdr->len; in svc_rdma_xb_count_sges()
662 * svc_rdma_pull_up_needed - Determine whether to use pull-up
666 * @xdr: xdr_buf containing RPC message to transmit
669 * %true if pull-up must be used
679 .pd_length = sctxt->sc_hdrbuf.len, in svc_rdma_pull_up_needed()
686 if (ret < 0) in svc_rdma_pull_up_needed()
689 if (args.pd_length < RPCRDMA_PULLUP_THRESH) in svc_rdma_pull_up_needed()
691 return args.pd_num_sges >= rdma->sc_max_send_sges; in svc_rdma_pull_up_needed()
695 * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer
696 * @xdr: xdr_buf containing portion of an RPC message to copy
710 if (xdr->head[0].iov_len) { in svc_rdma_xb_linearize()
711 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len); in svc_rdma_xb_linearize()
712 args->pd_dest += xdr->head[0].iov_len; in svc_rdma_xb_linearize()
715 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); in svc_rdma_xb_linearize()
716 pageoff = offset_in_page(xdr->page_base); in svc_rdma_xb_linearize()
717 remaining = xdr->page_len; in svc_rdma_xb_linearize()
719 len = min_t(u32, PAGE_SIZE - pageoff, remaining); in svc_rdma_xb_linearize()
720 memcpy(args->pd_dest, page_address(*ppages) + pageoff, len); in svc_rdma_xb_linearize()
721 remaining -= len; in svc_rdma_xb_linearize()
722 args->pd_dest += len; in svc_rdma_xb_linearize()
727 if (xdr->tail[0].iov_len) { in svc_rdma_xb_linearize()
728 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len); in svc_rdma_xb_linearize()
729 args->pd_dest += xdr->tail[0].iov_len; in svc_rdma_xb_linearize()
732 args->pd_length += xdr->len; in svc_rdma_xb_linearize()
737 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
741 * @xdr: prepared xdr_buf containing RPC message
750 * %0 if pull-up was successful
751 * %-EMSGSIZE if a buffer manipulation problem occurred
759 .pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len, in svc_rdma_pull_up_reply_msg()
765 if (ret < 0) in svc_rdma_pull_up_reply_msg()
768 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length; in svc_rdma_pull_up_reply_msg()
773 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
778 * @xdr: prepared xdr_buf containing RPC message
781 * %0 if DMA mapping was successful.
782 * %-EMSGSIZE if a buffer manipulation problem occurred
783 * %-EIO if DMA mapping failed
798 /* Set up the (persistently-mapped) transport header SGE. */ in svc_rdma_map_reply_msg()
799 sctxt->sc_send_wr.num_sge = 1; in svc_rdma_map_reply_msg()
800 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; in svc_rdma_map_reply_msg()
802 /* If there is a Reply chunk, nothing follows the transport in svc_rdma_map_reply_msg()
805 if (!pcl_is_empty(reply_pcl)) in svc_rdma_map_reply_msg()
808 /* For pull-up, svc_rdma_send() will sync the transport header. in svc_rdma_map_reply_msg()
811 if (svc_rdma_pull_up_needed(rdma, sctxt, write_pcl, xdr)) in svc_rdma_map_reply_msg()
825 int i, pages = rqstp->rq_next_page - rqstp->rq_respages; in svc_rdma_save_io_pages()
827 ctxt->sc_page_count += pages; in svc_rdma_save_io_pages()
829 ctxt->sc_pages[i] = rqstp->rq_respages[i]; in svc_rdma_save_io_pages()
830 rqstp->rq_respages[i] = NULL; in svc_rdma_save_io_pages()
834 rqstp->rq_next_page = rqstp->rq_respages; in svc_rdma_save_io_pages()
837 /* Prepare the portion of the RPC Reply that will be transmitted
838 * via RDMA Send. The RPC-over-RDMA transport header is prepared
839 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
846 * RDMA Send is the last step of transmitting an RPC reply. Pages
853 * - The Reply's transport header will never be larger than a page.
862 ret = svc_rdma_map_reply_msg(rdma, sctxt, &rctxt->rc_write_pcl, in svc_rdma_send_reply_msg()
863 &rctxt->rc_reply_pcl, &rqstp->rq_res); in svc_rdma_send_reply_msg()
864 if (ret < 0) in svc_rdma_send_reply_msg()
869 if (rctxt->rc_inv_rkey) { in svc_rdma_send_reply_msg()
870 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV; in svc_rdma_send_reply_msg()
871 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey; in svc_rdma_send_reply_msg()
873 sctxt->sc_send_wr.opcode = IB_WR_SEND; in svc_rdma_send_reply_msg()
880 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
886 * Given the client-provided Read, Write, and Reply chunks, the
888 * Return an RDMA_ERROR message so the client can retire the RPC
899 __be32 *rdma_argp = rctxt->rc_recv_buf; in svc_rdma_send_error_msg()
902 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0); in svc_rdma_send_error_msg()
903 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf, in svc_rdma_send_error_msg()
904 sctxt->sc_xprt_buf, NULL); in svc_rdma_send_error_msg()
906 p = xdr_reserve_space(&sctxt->sc_stream, in svc_rdma_send_error_msg()
908 if (!p) in svc_rdma_send_error_msg()
913 *p++ = rdma->sc_fc_credits; in svc_rdma_send_error_msg()
917 case -EPROTONOSUPPORT: in svc_rdma_send_error_msg()
918 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p)); in svc_rdma_send_error_msg()
919 if (!p) in svc_rdma_send_error_msg()
928 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p)); in svc_rdma_send_error_msg()
929 if (!p) in svc_rdma_send_error_msg()
937 sctxt->sc_send_wr.num_sge = 1; in svc_rdma_send_error_msg()
938 sctxt->sc_send_wr.opcode = IB_WR_SEND; in svc_rdma_send_error_msg()
939 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; in svc_rdma_send_error_msg()
940 if (svc_rdma_send(rdma, sctxt)) in svc_rdma_send_error_msg()
949 * svc_rdma_sendto - Transmit an RPC reply
950 * @rqstp: processed RPC request, reply XDR already in ::rq_res
953 * If no reply message was possible, the connection is closed.
956 * %0 if an RPC reply has been successfully posted,
957 * %-ENOMEM if a resource shortage occurred (connection is lost),
958 * %-ENOTCONN if posting failed (connection is lost).
962 struct svc_xprt *xprt = rqstp->rq_xprt; in svc_rdma_sendto()
965 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; in svc_rdma_sendto()
966 __be32 *rdma_argp = rctxt->rc_recv_buf; in svc_rdma_sendto()
972 ret = -ENOTCONN; in svc_rdma_sendto()
973 if (svc_xprt_is_dead(xprt)) in svc_rdma_sendto()
976 ret = -ENOMEM; in svc_rdma_sendto()
978 if (!sctxt) in svc_rdma_sendto()
981 ret = -EMSGSIZE; in svc_rdma_sendto()
982 p = xdr_reserve_space(&sctxt->sc_stream, in svc_rdma_sendto()
984 if (!p) in svc_rdma_sendto()
987 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res); in svc_rdma_sendto()
988 if (ret < 0) in svc_rdma_sendto()
994 *p++ = rdma->sc_fc_credits; in svc_rdma_sendto()
995 *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg; in svc_rdma_sendto()
998 if (ret < 0) in svc_rdma_sendto()
1001 if (ret < 0) in svc_rdma_sendto()
1004 if (ret < 0) in svc_rdma_sendto()
1008 if (ret < 0) in svc_rdma_sendto()
1013 if (ret != -E2BIG && ret != -EINVAL) in svc_rdma_sendto()
1027 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_sendto()
1028 return -ENOTCONN; in svc_rdma_sendto()
1032 * svc_rdma_result_payload - special processing for a result payload
1038 * %0 if successful or nothing needed to be done
1039 * %-EMSGSIZE on XDR buffer overflow
1040 * %-E2BIG if the payload was larger than the Write chunk
1041 * %-EINVAL if client provided too many segments
1042 * %-ENOMEM if rdma_rw context pool was exhausted
1043 * %-ENOTCONN if posting failed (connection is lost)
1044 * %-EIO if rdma_rw initialization failed (DMA mapping, etc)
1049 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; in svc_rdma_result_payload()
1055 chunk = rctxt->rc_cur_result_payload; in svc_rdma_result_payload()
1056 if (!length || !chunk) in svc_rdma_result_payload()
1058 rctxt->rc_cur_result_payload = in svc_rdma_result_payload()
1059 pcl_next_chunk(&rctxt->rc_write_pcl, chunk); in svc_rdma_result_payload()
1060 if (length > chunk->ch_length) in svc_rdma_result_payload()
1061 return -E2BIG; in svc_rdma_result_payload()
1063 chunk->ch_position = offset; in svc_rdma_result_payload()
1064 chunk->ch_payload_length = length; in svc_rdma_result_payload()
1066 if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length)) in svc_rdma_result_payload()
1067 return -EMSGSIZE; in svc_rdma_result_payload()
1069 rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt); in svc_rdma_result_payload()
1071 if (ret < 0) in svc_rdma_result_payload()