Lines Matching +full:supports +full:- +full:cqe
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
10 * COPYING in the main directory of this source tree, or the BSD-type
123 cid->ci_queue_id = rdma->sc_rq_cq->res.id; in svc_rdma_recv_cid_init()
124 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_recv_cid_init()
137 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); in svc_rdma_recv_ctxt_alloc()
140 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, in svc_rdma_recv_ctxt_alloc()
141 rdma->sc_max_req_size, DMA_FROM_DEVICE); in svc_rdma_recv_ctxt_alloc()
142 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) in svc_rdma_recv_ctxt_alloc()
145 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); in svc_rdma_recv_ctxt_alloc()
147 ctxt->rc_recv_wr.next = NULL; in svc_rdma_recv_ctxt_alloc()
148 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc()
149 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; in svc_rdma_recv_ctxt_alloc()
150 ctxt->rc_recv_wr.num_sge = 1; in svc_rdma_recv_ctxt_alloc()
151 ctxt->rc_cqe.done = svc_rdma_wc_receive; in svc_rdma_recv_ctxt_alloc()
152 ctxt->rc_recv_sge.addr = addr; in svc_rdma_recv_ctxt_alloc()
153 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; in svc_rdma_recv_ctxt_alloc()
154 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey; in svc_rdma_recv_ctxt_alloc()
155 ctxt->rc_recv_buf = buffer; in svc_rdma_recv_ctxt_alloc()
156 ctxt->rc_temp = false; in svc_rdma_recv_ctxt_alloc()
170 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr, in svc_rdma_recv_ctxt_destroy()
171 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE); in svc_rdma_recv_ctxt_destroy()
172 kfree(ctxt->rc_recv_buf); in svc_rdma_recv_ctxt_destroy()
177 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
186 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) { in svc_rdma_recv_ctxts_destroy()
198 node = llist_del_first(&rdma->sc_recv_ctxts); in svc_rdma_recv_ctxt_get()
204 ctxt->rc_page_count = 0; in svc_rdma_recv_ctxt_get()
205 ctxt->rc_read_payload_length = 0; in svc_rdma_recv_ctxt_get()
216 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
226 for (i = 0; i < ctxt->rc_page_count; i++) in svc_rdma_recv_ctxt_put()
227 put_page(ctxt->rc_pages[i]); in svc_rdma_recv_ctxt_put()
229 if (!ctxt->rc_temp) in svc_rdma_recv_ctxt_put()
230 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts); in svc_rdma_recv_ctxt_put()
236 * svc_rdma_release_rqst - Release transport-specific per-rqst resources
245 struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt; in svc_rdma_release_rqst()
246 struct svc_xprt *xprt = rqstp->rq_xprt; in svc_rdma_release_rqst()
250 rqstp->rq_xprt_ctxt = NULL; in svc_rdma_release_rqst()
261 ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); in __svc_rdma_post_recv()
276 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) in svc_rdma_post_recv()
280 return -ENOMEM; in svc_rdma_post_recv()
285 * svc_rdma_post_recvs - Post initial set of Recv WRs
296 for (i = 0; i < rdma->sc_max_requests; i++) { in svc_rdma_post_recvs()
300 ctxt->rc_temp = true; in svc_rdma_post_recvs()
309 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
318 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_receive()
319 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_receive() local
322 /* WARNING: Only wc->wr_cqe and wc->status are reliable */ in svc_rdma_wc_receive()
323 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); in svc_rdma_wc_receive()
325 trace_svcrdma_wc_receive(wc, &ctxt->rc_cid); in svc_rdma_wc_receive()
326 if (wc->status != IB_WC_SUCCESS) in svc_rdma_wc_receive()
333 ctxt->rc_byte_len = wc->byte_len; in svc_rdma_wc_receive()
334 ib_dma_sync_single_for_cpu(rdma->sc_pd->device, in svc_rdma_wc_receive()
335 ctxt->rc_recv_sge.addr, in svc_rdma_wc_receive()
336 wc->byte_len, DMA_FROM_DEVICE); in svc_rdma_wc_receive()
338 spin_lock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_receive()
339 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q); in svc_rdma_wc_receive()
341 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); in svc_rdma_wc_receive()
342 spin_unlock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_receive()
343 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags)) in svc_rdma_wc_receive()
344 svc_xprt_enqueue(&rdma->sc_xprt); in svc_rdma_wc_receive()
350 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); in svc_rdma_wc_receive()
351 svc_xprt_enqueue(&rdma->sc_xprt); in svc_rdma_wc_receive()
355 * svc_rdma_flush_recv_queues - Drain pending Receive work
363 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) { in svc_rdma_flush_recv_queues()
364 list_del(&ctxt->rc_list); in svc_rdma_flush_recv_queues()
367 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) { in svc_rdma_flush_recv_queues()
368 list_del(&ctxt->rc_list); in svc_rdma_flush_recv_queues()
376 struct xdr_buf *arg = &rqstp->rq_arg; in svc_rdma_build_arg_xdr()
378 arg->head[0].iov_base = ctxt->rc_recv_buf; in svc_rdma_build_arg_xdr()
379 arg->head[0].iov_len = ctxt->rc_byte_len; in svc_rdma_build_arg_xdr()
380 arg->tail[0].iov_base = NULL; in svc_rdma_build_arg_xdr()
381 arg->tail[0].iov_len = 0; in svc_rdma_build_arg_xdr()
382 arg->page_len = 0; in svc_rdma_build_arg_xdr()
383 arg->page_base = 0; in svc_rdma_build_arg_xdr()
384 arg->buflen = ctxt->rc_byte_len; in svc_rdma_build_arg_xdr()
385 arg->len = ctxt->rc_byte_len; in svc_rdma_build_arg_xdr()
392 /* This accommodates the largest possible Position-Zero
400 * - This implementation supports only one Read chunk.
403 * - Read list does not overflow Receive buffer.
404 * - Segment size limited by largest NFS data payload.
423 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_check_read_list()
430 p = xdr_inline_decode(&rctxt->rc_stream, in xdr_check_read_list()
444 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_check_read_list()
461 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_check_write_chunk()
471 p = xdr_inline_decode(&rctxt->rc_stream, in xdr_check_write_chunk()
487 * - This implementation currently supports only one Write chunk.
490 * - Write list does not overflow Receive buffer.
491 * - Chunk size limited by largest NFS data payload.
504 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_check_write_list()
507 rctxt->rc_write_list = p; in xdr_check_write_list()
512 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_check_write_list()
517 rctxt->rc_write_list = NULL; in xdr_check_write_list()
524 * - Reply chunk does not overflow Receive buffer.
525 * - Chunk size limited by largest NFS data payload.
537 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); in xdr_check_reply_chunk()
540 rctxt->rc_reply_chunk = NULL; in xdr_check_reply_chunk()
544 rctxt->rc_reply_chunk = p; in xdr_check_reply_chunk()
549 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
565 ctxt->rc_inv_rkey = 0; in svc_rdma_get_inv_rkey()
567 if (!rdma->sc_snd_w_inv) in svc_rdma_get_inv_rkey()
571 p = ctxt->rc_recv_buf; in svc_rdma_get_inv_rkey()
608 ctxt->rc_inv_rkey = be32_to_cpu(inv_rkey); in svc_rdma_get_inv_rkey()
612 * svc_rdma_xdr_decode_req - Decode the transport header
616 * On entry, xdr->head[0].iov_base points to first byte of the
617 * RPC-over-RDMA transport header.
620 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
622 * The length of the RPC-over-RDMA header is returned.
625 * - The transport header is entirely contained in the head iovec.
633 rdma_argp = rq_arg->head[0].iov_base; in svc_rdma_xdr_decode_req()
634 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL); in svc_rdma_xdr_decode_req()
636 p = xdr_inline_decode(&rctxt->rc_stream, in svc_rdma_xdr_decode_req()
664 rq_arg->head[0].iov_base = rctxt->rc_stream.p; in svc_rdma_xdr_decode_req()
665 hdr_len = xdr_stream_pos(&rctxt->rc_stream); in svc_rdma_xdr_decode_req()
666 rq_arg->head[0].iov_len -= hdr_len; in svc_rdma_xdr_decode_req()
667 rq_arg->len -= hdr_len; in svc_rdma_xdr_decode_req()
672 trace_svcrdma_decode_short_err(rctxt, rq_arg->len); in svc_rdma_xdr_decode_req()
673 return -EINVAL; in svc_rdma_xdr_decode_req()
677 return -EPROTONOSUPPORT; in svc_rdma_xdr_decode_req()
685 return -EINVAL; in svc_rdma_xdr_decode_req()
689 return -EINVAL; in svc_rdma_xdr_decode_req()
700 for (page_no = 0; page_no < head->rc_page_count; page_no++) { in rdma_read_complete()
701 put_page(rqstp->rq_pages[page_no]); in rdma_read_complete()
702 rqstp->rq_pages[page_no] = head->rc_pages[page_no]; in rdma_read_complete()
704 head->rc_page_count = 0; in rdma_read_complete()
707 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count]; in rdma_read_complete()
708 rqstp->rq_arg.page_len = head->rc_arg.page_len; in rdma_read_complete()
711 rqstp->rq_respages = &rqstp->rq_pages[page_no]; in rdma_read_complete()
712 rqstp->rq_next_page = rqstp->rq_respages + 1; in rdma_read_complete()
715 rqstp->rq_arg.head[0] = head->rc_arg.head[0]; in rdma_read_complete()
716 rqstp->rq_arg.tail[0] = head->rc_arg.tail[0]; in rdma_read_complete()
717 rqstp->rq_arg.len = head->rc_arg.len; in rdma_read_complete()
718 rqstp->rq_arg.buflen = head->rc_arg.buflen; in rdma_read_complete()
743 if (!xprt->xpt_bc_xprt) in svc_rdma_is_backchannel_reply()
768 * svc_rdma_recvfrom - Receive an RPC call
774 * %-EINVAL if the Read chunk data is too large,
775 * %-ENOMEM if rdma_rw context pool was exhausted,
776 * %-ENOTCONN if posting failed (connection is lost),
777 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
784 * - If the ctxt completes a Read, then finish assembling the Call
787 * - If the ctxt completes a Receive, then construct the Call
790 * - If there are no Read chunks in this message, then finish
794 * - If there are Read chunks in this message, post Read WRs to
799 struct svc_xprt *xprt = rqstp->rq_xprt; in svc_rdma_recvfrom()
806 rqstp->rq_xprt_ctxt = NULL; in svc_rdma_recvfrom()
808 spin_lock(&rdma_xprt->sc_rq_dto_lock); in svc_rdma_recvfrom()
809 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q); in svc_rdma_recvfrom()
811 list_del(&ctxt->rc_list); in svc_rdma_recvfrom()
812 spin_unlock(&rdma_xprt->sc_rq_dto_lock); in svc_rdma_recvfrom()
816 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q); in svc_rdma_recvfrom()
819 clear_bit(XPT_DATA, &xprt->xpt_flags); in svc_rdma_recvfrom()
820 spin_unlock(&rdma_xprt->sc_rq_dto_lock); in svc_rdma_recvfrom()
823 list_del(&ctxt->rc_list); in svc_rdma_recvfrom()
824 spin_unlock(&rdma_xprt->sc_rq_dto_lock); in svc_rdma_recvfrom()
833 rqstp->rq_respages = rqstp->rq_pages; in svc_rdma_recvfrom()
834 rqstp->rq_next_page = rqstp->rq_respages; in svc_rdma_recvfrom()
836 p = (__be32 *)rqstp->rq_arg.head[0].iov_base; in svc_rdma_recvfrom()
837 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt); in svc_rdma_recvfrom()
842 rqstp->rq_xprt_hlen = ret; in svc_rdma_recvfrom()
854 rqstp->rq_xprt_ctxt = ctxt; in svc_rdma_recvfrom()
855 rqstp->rq_prot = IPPROTO_MAX; in svc_rdma_recvfrom()
857 return rqstp->rq_arg.len; in svc_rdma_recvfrom()
871 if (ret == -EINVAL) in svc_rdma_recvfrom()