Lines Matching full:rdma
5 * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
8 #include <rdma/rw.h>
20 /* Each R/W context contains state for one chain of RDMA Read or
27 * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
55 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) in svc_rdma_get_rw_ctxt() argument
57 struct ib_device *dev = rdma->sc_cm_id->device; in svc_rdma_get_rw_ctxt()
62 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
63 node = llist_del_first(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt()
64 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
87 trace_svcrdma_rwctx_empty(rdma, sges); in svc_rdma_get_rw_ctxt()
98 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_put_rw_ctxt() argument
101 __svc_rdma_put_rw_ctxt(ctxt, &rdma->sc_rw_ctxts); in svc_rdma_put_rw_ctxt()
106 * @rdma: transport about to be destroyed
109 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) in svc_rdma_destroy_rw_ctxts() argument
114 while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) { in svc_rdma_destroy_rw_ctxts()
122 * @rdma: controlling transport instance
124 * @offset: RDMA offset
125 * @handle: RDMA tag/handle
131 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma, in svc_rdma_rw_ctx_init() argument
138 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, in svc_rdma_rw_ctx_init()
142 trace_svcrdma_dma_map_rw_err(rdma, offset, handle, in svc_rdma_rw_ctx_init()
144 svc_rdma_put_rw_ctxt(rdma, ctxt); in svc_rdma_rw_ctx_init()
151 * @rdma: controlling transport instance
154 void svc_rdma_cc_init(struct svcxprt_rdma *rdma, in svc_rdma_cc_init() argument
160 svc_rdma_send_cid_init(rdma, cid); in svc_rdma_cc_init()
168 * @rdma: controlling transport instance
172 void svc_rdma_cc_release(struct svcxprt_rdma *rdma, in svc_rdma_cc_release() argument
186 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, in svc_rdma_cc_release()
187 rdma->sc_port_num, ctxt->rw_sg_table.sgl, in svc_rdma_cc_release()
197 llist_add_batch(first, last, &rdma->sc_rw_ctxts); in svc_rdma_cc_release()
223 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, in svc_rdma_write_info_alloc() argument
229 ibdev_to_node(rdma->sc_cm_id->device)); in svc_rdma_write_info_alloc()
233 info->wi_rdma = rdma; in svc_rdma_write_info_alloc()
235 svc_rdma_cc_init(rdma, &info->wi_cc); in svc_rdma_write_info_alloc()
264 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_write_done() local
282 svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount); in svc_rdma_write_done()
285 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_write_done()
291 * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
298 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_read_done() local
304 svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount); in svc_rdma_wc_read_done()
312 spin_lock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_read_done()
313 list_add_tail(&ctxt->rc_list, &rdma->sc_read_complete_q); in svc_rdma_wc_read_done()
315 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); in svc_rdma_wc_read_done()
316 spin_unlock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_read_done()
317 svc_xprt_enqueue(&rdma->sc_xprt); in svc_rdma_wc_read_done()
326 /* The RDMA Read has flushed, so the incoming RPC message in svc_rdma_wc_read_done()
330 svc_rdma_cc_release(rdma, cc, DMA_FROM_DEVICE); in svc_rdma_wc_read_done()
331 svc_rdma_recv_ctxt_put(rdma, ctxt); in svc_rdma_wc_read_done()
332 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_wc_read_done()
341 static int svc_rdma_post_chunk_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_post_chunk_ctxt() argument
352 if (cc->cc_sqecount > rdma->sc_sq_depth) in svc_rdma_post_chunk_ctxt()
361 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, in svc_rdma_post_chunk_ctxt()
362 rdma->sc_port_num, cqe, first_wr); in svc_rdma_post_chunk_ctxt()
368 &rdma->sc_sq_avail) > 0) { in svc_rdma_post_chunk_ctxt()
370 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); in svc_rdma_post_chunk_ctxt()
377 trace_svcrdma_sq_full(rdma, &cc->cc_cid); in svc_rdma_post_chunk_ctxt()
378 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_post_chunk_ctxt()
379 wait_event(rdma->sc_send_wait, in svc_rdma_post_chunk_ctxt()
380 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); in svc_rdma_post_chunk_ctxt()
381 trace_svcrdma_sq_retry(rdma, &cc->cc_cid); in svc_rdma_post_chunk_ctxt()
384 trace_svcrdma_sq_post_err(rdma, &cc->cc_cid, ret); in svc_rdma_post_chunk_ctxt()
385 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_post_chunk_ctxt()
391 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_post_chunk_ctxt()
392 wake_up(&rdma->sc_send_wait); in svc_rdma_post_chunk_ctxt()
443 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
454 struct svcxprt_rdma *rdma = info->wi_rdma; in svc_rdma_build_writes() local
470 ctxt = svc_rdma_get_rw_ctxt(rdma, in svc_rdma_build_writes()
477 ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, seg->rs_handle, in svc_rdma_build_writes()
503 * svc_rdma_iov_write - Construct RDMA Writes from an iov
511 * %-EIO if an rdma-rw error occurred
522 * svc_rdma_pages_write - Construct RDMA Writes from pages
532 * %-EIO if an rdma-rw error occurred
546 * svc_rdma_xb_write - Construct RDMA Writes to write an xdr_buf
554 * %-EIO if an rdma-rw error occurred
585 * @rdma: controlling RDMA transport
596 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, in svc_rdma_send_write_chunk() argument
604 info = svc_rdma_write_info_alloc(rdma, chunk); in svc_rdma_send_write_chunk()
614 ret = svc_rdma_post_chunk_ctxt(rdma, cc); in svc_rdma_send_write_chunk()
626 * @rdma: controlling RDMA transport
637 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, in svc_rdma_send_reply_chunk() argument
650 info = svc_rdma_write_info_alloc(rdma, chunk); in svc_rdma_send_reply_chunk()
661 ret = svc_rdma_post_chunk_ctxt(rdma, cc); in svc_rdma_send_reply_chunk()
673 * svc_rdma_build_read_segment - Build RDMA Read WQEs to pull one RDMA segment
688 struct svcxprt_rdma *rdma = svc_rdma_rqst_rdma(rqstp); in svc_rdma_build_read_segment() local
697 ctxt = svc_rdma_get_rw_ctxt(rdma, sge_no); in svc_rdma_build_read_segment()
725 ret = svc_rdma_rw_ctx_init(rdma, ctxt, segment->rs_offset, in svc_rdma_build_read_segment()
741 * svc_rdma_build_read_chunk - Build RDMA Read WQEs to pull one RDMA chunk
778 * head->rc_curpage and head->rc_pageoff so that the next RDMA Read
820 * svc_rdma_read_multiple_chunks - Construct RDMA Reads to pull data item Read chunks
828 * %0: RDMA Read WQEs were successfully built
872 * svc_rdma_read_data_item - Construct RDMA Reads to pull data item Read chunks
883 * %0: RDMA Read WQEs were successfully built
897 * svc_rdma_read_chunk_range - Build RDMA Read WRs for portion of a chunk
905 * %0: RDMA Read WQEs were successfully built
944 * svc_rdma_read_call_chunk - Build RDMA Read WQEs to pull a Long Message
949 * %0: RDMA Read WQEs were successfully built
1000 * svc_rdma_read_special - Build RDMA Read WQEs to pull a Long Message
1012 * %0: RDMA Read WQEs were successfully built
1028 * of two different RDMA segments.
1047 * @rdma: controlling RDMA transport
1051 * The RPC/RDMA protocol assumes that the upper layer's XDR decoders
1059 * RDMA Reads have completed.
1062 * %1: all needed RDMA Reads were posted successfully,
1068 int svc_rdma_process_read_list(struct svcxprt_rdma *rdma, in svc_rdma_process_read_list() argument
1093 ret = svc_rdma_post_chunk_ctxt(rdma, cc); in svc_rdma_process_read_list()