1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 *
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Author: Tom Tucker <tom@opengridcomputing.com>
43 */
44
45 /* Operation
46 *
47 * The main entry point is svc_rdma_sendto. This is called by the
48 * RPC server when an RPC Reply is ready to be transmitted to a client.
49 *
50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52 * transport header, post all Write WRs needed for this Reply, then post
53 * a Send WR conveying the transport header and the RPC message itself to
54 * the client.
55 *
56 * svc_rdma_sendto must fully transmit the Reply before returning, as
57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
58 * resources referred to by the svc_rqst are also recycled at that time.
59 * Therefore any resources that must remain longer must be detached
60 * from the svc_rqst and released later.
61 *
62 * Page Management
63 *
64 * The I/O that performs Reply transmission is asynchronous, and may
65 * complete well after sendto returns. Thus pages under I/O must be
66 * removed from the svc_rqst before sendto returns.
67 *
68 * The logic here depends on Send Queue and completion ordering. Since
69 * the Send WR is always posted last, it will always complete last. Thus
70 * when it completes, it is guaranteed that all previous Write WRs have
71 * also completed.
72 *
73 * Write WRs are constructed and posted. Each Write segment gets its own
74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75 * DMA-unmap the pages under I/O for that Write segment. The Write
76 * completion handler does not release any pages.
77 *
78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79 * The ownership of all of the Reply's pages are transferred into that
80 * ctxt, the Send WR is posted, and sendto returns.
81 *
82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83 * Send completion handler finally releases the Reply's pages.
84 *
85 * This mechanism also assumes that completions on the transport's Send
86 * Completion Queue do not run in parallel. Otherwise a Write completion
87 * and Send completion running at the same time could release pages that
88 * are still DMA-mapped.
89 *
90 * Error Handling
91 *
92 * - If the Send WR is posted successfully, it will either complete
93 * successfully, or get flushed. Either way, the Send completion
94 * handler releases the Reply's pages.
95 * - If the Send WR cannot be not posted, the forward path releases
96 * the Reply's pages.
97 *
98 * This handles the case, without the use of page reference counting,
99 * where two different Write segments send portions of the same page.
100 */
101
102 #include <linux/spinlock.h>
103 #include <linux/unaligned.h>
104
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
107
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/svc_rdma.h>
110
111 #include "xprt_rdma.h"
112 #include <trace/events/rpcrdma.h>
113
114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
115
116 static struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_alloc(struct svcxprt_rdma * rdma)117 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
118 {
119 int node = ibdev_to_node(rdma->sc_cm_id->device);
120 struct svc_rdma_send_ctxt *ctxt;
121 unsigned long pages;
122 dma_addr_t addr;
123 void *buffer;
124 int i;
125
126 ctxt = kzalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges),
127 GFP_KERNEL, node);
128 if (!ctxt)
129 goto fail0;
130 pages = svc_serv_maxpages(rdma->sc_xprt.xpt_server);
131 ctxt->sc_pages = kcalloc_node(pages, sizeof(struct page *),
132 GFP_KERNEL, node);
133 if (!ctxt->sc_pages)
134 goto fail1;
135 ctxt->sc_maxpages = pages;
136 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
137 if (!buffer)
138 goto fail2;
139 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
140 rdma->sc_max_req_size, DMA_TO_DEVICE);
141 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
142 goto fail3;
143
144 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
145
146 ctxt->sc_rdma = rdma;
147 ctxt->sc_send_wr.next = NULL;
148 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
149 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
150 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
151 ctxt->sc_cqe.done = svc_rdma_wc_send;
152 ctxt->sc_xprt_buf = buffer;
153 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
154 rdma->sc_max_req_size);
155 ctxt->sc_sges[0].addr = addr;
156
157 for (i = 0; i < rdma->sc_max_send_sges; i++)
158 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
159 return ctxt;
160
161 fail3:
162 kfree(buffer);
163 fail2:
164 kfree(ctxt->sc_pages);
165 fail1:
166 kfree(ctxt);
167 fail0:
168 return NULL;
169 }
170
171 /**
172 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
173 * @rdma: svcxprt_rdma being torn down
174 *
175 */
svc_rdma_send_ctxts_destroy(struct svcxprt_rdma * rdma)176 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
177 {
178 struct svc_rdma_send_ctxt *ctxt;
179 struct llist_node *node;
180
181 while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) {
182 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
183 ib_dma_unmap_single(rdma->sc_pd->device,
184 ctxt->sc_sges[0].addr,
185 rdma->sc_max_req_size,
186 DMA_TO_DEVICE);
187 kfree(ctxt->sc_xprt_buf);
188 kfree(ctxt->sc_pages);
189 kfree(ctxt);
190 }
191 }
192
193 /**
194 * svc_rdma_send_ctxt_get - Get a free send_ctxt
195 * @rdma: controlling svcxprt_rdma
196 *
197 * Returns a ready-to-use send_ctxt, or NULL if none are
198 * available and a fresh one cannot be allocated.
199 */
svc_rdma_send_ctxt_get(struct svcxprt_rdma * rdma)200 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
201 {
202 struct svc_rdma_send_ctxt *ctxt;
203 struct llist_node *node;
204
205 spin_lock(&rdma->sc_send_lock);
206 node = llist_del_first(&rdma->sc_send_ctxts);
207 spin_unlock(&rdma->sc_send_lock);
208 if (!node)
209 goto out_empty;
210
211 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
212
213 out:
214 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
215 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
216 ctxt->sc_xprt_buf, NULL);
217
218 svc_rdma_cc_init(rdma, &ctxt->sc_reply_info.wi_cc);
219 ctxt->sc_send_wr.num_sge = 0;
220 ctxt->sc_cur_sge_no = 0;
221 ctxt->sc_page_count = 0;
222 ctxt->sc_wr_chain = &ctxt->sc_send_wr;
223 ctxt->sc_sqecount = 1;
224
225 return ctxt;
226
227 out_empty:
228 ctxt = svc_rdma_send_ctxt_alloc(rdma);
229 if (!ctxt)
230 return NULL;
231 goto out;
232 }
233
svc_rdma_send_ctxt_release(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)234 static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma,
235 struct svc_rdma_send_ctxt *ctxt)
236 {
237 struct ib_device *device = rdma->sc_cm_id->device;
238 unsigned int i;
239
240 svc_rdma_reply_chunk_release(rdma, ctxt);
241
242 if (ctxt->sc_page_count)
243 release_pages(ctxt->sc_pages, ctxt->sc_page_count);
244
245 /* The first SGE contains the transport header, which
246 * remains mapped until @ctxt is destroyed.
247 */
248 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
249 trace_svcrdma_dma_unmap_page(&ctxt->sc_cid,
250 ctxt->sc_sges[i].addr,
251 ctxt->sc_sges[i].length);
252 ib_dma_unmap_page(device,
253 ctxt->sc_sges[i].addr,
254 ctxt->sc_sges[i].length,
255 DMA_TO_DEVICE);
256 }
257
258 llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
259 }
260
svc_rdma_send_ctxt_put_async(struct work_struct * work)261 static void svc_rdma_send_ctxt_put_async(struct work_struct *work)
262 {
263 struct svc_rdma_send_ctxt *ctxt;
264
265 ctxt = container_of(work, struct svc_rdma_send_ctxt, sc_work);
266 svc_rdma_send_ctxt_release(ctxt->sc_rdma, ctxt);
267 }
268
269 /**
270 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
271 * @rdma: controlling svcxprt_rdma
272 * @ctxt: object to return to the free list
273 *
274 * Pages left in sc_pages are DMA unmapped and released.
275 */
svc_rdma_send_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)276 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
277 struct svc_rdma_send_ctxt *ctxt)
278 {
279 INIT_WORK(&ctxt->sc_work, svc_rdma_send_ctxt_put_async);
280 queue_work(svcrdma_wq, &ctxt->sc_work);
281 }
282
283 /**
284 * svc_rdma_wake_send_waiters - manage Send Queue accounting
285 * @rdma: controlling transport
286 * @avail: Number of additional SQEs that are now available
287 *
288 */
svc_rdma_wake_send_waiters(struct svcxprt_rdma * rdma,int avail)289 void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail)
290 {
291 atomic_add(avail, &rdma->sc_sq_avail);
292 smp_mb__after_atomic();
293 if (unlikely(waitqueue_active(&rdma->sc_send_wait)))
294 wake_up(&rdma->sc_send_wait);
295 }
296
297 /**
298 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
299 * @cq: Completion Queue context
300 * @wc: Work Completion object
301 *
302 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
303 * the Send completion handler could be running.
304 */
svc_rdma_wc_send(struct ib_cq * cq,struct ib_wc * wc)305 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
306 {
307 struct svcxprt_rdma *rdma = cq->cq_context;
308 struct ib_cqe *cqe = wc->wr_cqe;
309 struct svc_rdma_send_ctxt *ctxt =
310 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
311
312 svc_rdma_wake_send_waiters(rdma, ctxt->sc_sqecount);
313
314 if (unlikely(wc->status != IB_WC_SUCCESS))
315 goto flushed;
316
317 trace_svcrdma_wc_send(&ctxt->sc_cid);
318 svc_rdma_send_ctxt_put(rdma, ctxt);
319 return;
320
321 flushed:
322 if (wc->status != IB_WC_WR_FLUSH_ERR)
323 trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid);
324 else
325 trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid);
326 svc_rdma_send_ctxt_put(rdma, ctxt);
327 svc_xprt_deferred_close(&rdma->sc_xprt);
328 }
329
330 /**
331 * svc_rdma_post_send - Post a WR chain to the Send Queue
332 * @rdma: transport context
333 * @ctxt: WR chain to post
334 *
335 * Copy fields in @ctxt to stack variables in order to guarantee
336 * that these values remain available after the ib_post_send() call.
337 * In some error flow cases, svc_rdma_wc_send() releases @ctxt.
338 *
339 * Note there is potential for starvation when the Send Queue is
340 * full because there is no order to when waiting threads are
341 * awoken. The transport is typically provisioned with a deep
342 * enough Send Queue that SQ exhaustion should be a rare event.
343 *
344 * Return values:
345 * %0: @ctxt's WR chain was posted successfully
346 * %-ENOTCONN: The connection was lost
347 */
svc_rdma_post_send(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)348 int svc_rdma_post_send(struct svcxprt_rdma *rdma,
349 struct svc_rdma_send_ctxt *ctxt)
350 {
351 struct ib_send_wr *first_wr = ctxt->sc_wr_chain;
352 struct ib_send_wr *send_wr = &ctxt->sc_send_wr;
353 const struct ib_send_wr *bad_wr = first_wr;
354 struct rpc_rdma_cid cid = ctxt->sc_cid;
355 int ret, sqecount = ctxt->sc_sqecount;
356
357 might_sleep();
358
359 /* Sync the transport header buffer */
360 ib_dma_sync_single_for_device(rdma->sc_pd->device,
361 send_wr->sg_list[0].addr,
362 send_wr->sg_list[0].length,
363 DMA_TO_DEVICE);
364
365 /* If the SQ is full, wait until an SQ entry is available */
366 while (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) {
367 if (atomic_sub_return(sqecount, &rdma->sc_sq_avail) < 0) {
368 svc_rdma_wake_send_waiters(rdma, sqecount);
369
370 /* When the transport is torn down, assume
371 * ib_drain_sq() will trigger enough Send
372 * completions to wake us. The XPT_CLOSE test
373 * above should then cause the while loop to
374 * exit.
375 */
376 percpu_counter_inc(&svcrdma_stat_sq_starve);
377 trace_svcrdma_sq_full(rdma, &cid);
378 wait_event(rdma->sc_send_wait,
379 atomic_read(&rdma->sc_sq_avail) > 0);
380 trace_svcrdma_sq_retry(rdma, &cid);
381 continue;
382 }
383
384 trace_svcrdma_post_send(ctxt);
385 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
386 if (ret) {
387 trace_svcrdma_sq_post_err(rdma, &cid, ret);
388 svc_xprt_deferred_close(&rdma->sc_xprt);
389
390 /* If even one WR was posted, there will be a
391 * Send completion that bumps sc_sq_avail.
392 */
393 if (bad_wr == first_wr) {
394 svc_rdma_wake_send_waiters(rdma, sqecount);
395 break;
396 }
397 }
398 return 0;
399 }
400 return -ENOTCONN;
401 }
402
403 /**
404 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
405 * @sctxt: Send context for the RPC Reply
406 *
407 * Return values:
408 * On success, returns length in bytes of the Reply XDR buffer
409 * that was consumed by the Reply Read list
410 * %-EMSGSIZE on XDR buffer overflow
411 */
svc_rdma_encode_read_list(struct svc_rdma_send_ctxt * sctxt)412 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
413 {
414 /* RPC-over-RDMA version 1 replies never have a Read list. */
415 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
416 }
417
418 /**
419 * svc_rdma_encode_write_segment - Encode one Write segment
420 * @sctxt: Send context for the RPC Reply
421 * @chunk: Write chunk to push
422 * @remaining: remaining bytes of the payload left in the Write chunk
423 * @segno: which segment in the chunk
424 *
425 * Return values:
426 * On success, returns length in bytes of the Reply XDR buffer
427 * that was consumed by the Write segment, and updates @remaining
428 * %-EMSGSIZE on XDR buffer overflow
429 */
svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_chunk * chunk,u32 * remaining,unsigned int segno)430 static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt,
431 const struct svc_rdma_chunk *chunk,
432 u32 *remaining, unsigned int segno)
433 {
434 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno];
435 const size_t len = rpcrdma_segment_maxsz * sizeof(__be32);
436 u32 length;
437 __be32 *p;
438
439 p = xdr_reserve_space(&sctxt->sc_stream, len);
440 if (!p)
441 return -EMSGSIZE;
442
443 length = min_t(u32, *remaining, segment->rs_length);
444 *remaining -= length;
445 xdr_encode_rdma_segment(p, segment->rs_handle, length,
446 segment->rs_offset);
447 trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length,
448 segment->rs_offset);
449 return len;
450 }
451
452 /**
453 * svc_rdma_encode_write_chunk - Encode one Write chunk
454 * @sctxt: Send context for the RPC Reply
455 * @chunk: Write chunk to push
456 *
457 * Copy a Write chunk from the Call transport header to the
458 * Reply transport header. Update each segment's length field
459 * to reflect the number of bytes written in that segment.
460 *
461 * Return values:
462 * On success, returns length in bytes of the Reply XDR buffer
463 * that was consumed by the Write chunk
464 * %-EMSGSIZE on XDR buffer overflow
465 */
svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_chunk * chunk)466 static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt,
467 const struct svc_rdma_chunk *chunk)
468 {
469 u32 remaining = chunk->ch_payload_length;
470 unsigned int segno;
471 ssize_t len, ret;
472
473 len = 0;
474 ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
475 if (ret < 0)
476 return ret;
477 len += ret;
478
479 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount);
480 if (ret < 0)
481 return ret;
482 len += ret;
483
484 for (segno = 0; segno < chunk->ch_segcount; segno++) {
485 ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno);
486 if (ret < 0)
487 return ret;
488 len += ret;
489 }
490
491 return len;
492 }
493
494 /**
495 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
496 * @rctxt: Reply context with information about the RPC Call
497 * @sctxt: Send context for the RPC Reply
498 *
499 * Return values:
500 * On success, returns length in bytes of the Reply XDR buffer
501 * that was consumed by the Reply's Write list
502 * %-EMSGSIZE on XDR buffer overflow
503 */
svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt * rctxt,struct svc_rdma_send_ctxt * sctxt)504 static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt,
505 struct svc_rdma_send_ctxt *sctxt)
506 {
507 struct svc_rdma_chunk *chunk;
508 ssize_t len, ret;
509
510 len = 0;
511 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
512 ret = svc_rdma_encode_write_chunk(sctxt, chunk);
513 if (ret < 0)
514 return ret;
515 len += ret;
516 }
517
518 /* Terminate the Write list */
519 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
520 if (ret < 0)
521 return ret;
522
523 return len + ret;
524 }
525
526 /**
527 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
528 * @rctxt: Reply context with information about the RPC Call
529 * @sctxt: Send context for the RPC Reply
530 * @length: size in bytes of the payload in the Reply chunk
531 *
532 * Return values:
533 * On success, returns length in bytes of the Reply XDR buffer
534 * that was consumed by the Reply's Reply chunk
535 * %-EMSGSIZE on XDR buffer overflow
536 * %-E2BIG if the RPC message is larger than the Reply chunk
537 */
538 static ssize_t
svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt * rctxt,struct svc_rdma_send_ctxt * sctxt,unsigned int length)539 svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt,
540 struct svc_rdma_send_ctxt *sctxt,
541 unsigned int length)
542 {
543 struct svc_rdma_chunk *chunk;
544
545 if (pcl_is_empty(&rctxt->rc_reply_pcl))
546 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
547
548 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
549 if (length > chunk->ch_length)
550 return -E2BIG;
551
552 chunk->ch_payload_length = length;
553 return svc_rdma_encode_write_chunk(sctxt, chunk);
554 }
555
556 struct svc_rdma_map_data {
557 struct svcxprt_rdma *md_rdma;
558 struct svc_rdma_send_ctxt *md_ctxt;
559 };
560
561 /**
562 * svc_rdma_page_dma_map - DMA map one page
563 * @data: pointer to arguments
564 * @page: struct page to DMA map
565 * @offset: offset into the page
566 * @len: number of bytes to map
567 *
568 * Returns:
569 * %0 if DMA mapping was successful
570 * %-EIO if the page cannot be DMA mapped
571 */
svc_rdma_page_dma_map(void * data,struct page * page,unsigned long offset,unsigned int len)572 static int svc_rdma_page_dma_map(void *data, struct page *page,
573 unsigned long offset, unsigned int len)
574 {
575 struct svc_rdma_map_data *args = data;
576 struct svcxprt_rdma *rdma = args->md_rdma;
577 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt;
578 struct ib_device *dev = rdma->sc_cm_id->device;
579 dma_addr_t dma_addr;
580
581 ++ctxt->sc_cur_sge_no;
582
583 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
584 if (ib_dma_mapping_error(dev, dma_addr))
585 goto out_maperr;
586
587 trace_svcrdma_dma_map_page(&ctxt->sc_cid, dma_addr, len);
588 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
589 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
590 ctxt->sc_send_wr.num_sge++;
591 return 0;
592
593 out_maperr:
594 trace_svcrdma_dma_map_err(&ctxt->sc_cid, dma_addr, len);
595 return -EIO;
596 }
597
598 /**
599 * svc_rdma_iov_dma_map - DMA map an iovec
600 * @data: pointer to arguments
601 * @iov: kvec to DMA map
602 *
603 * ib_dma_map_page() is used here because svc_rdma_dma_unmap()
604 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
605 *
606 * Returns:
607 * %0 if DMA mapping was successful
608 * %-EIO if the iovec cannot be DMA mapped
609 */
svc_rdma_iov_dma_map(void * data,const struct kvec * iov)610 static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov)
611 {
612 if (!iov->iov_len)
613 return 0;
614 return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base),
615 offset_in_page(iov->iov_base),
616 iov->iov_len);
617 }
618
619 /**
620 * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf
621 * @xdr: xdr_buf containing portion of an RPC message to transmit
622 * @data: pointer to arguments
623 *
624 * Returns:
625 * %0 if DMA mapping was successful
626 * %-EIO if DMA mapping failed
627 *
628 * On failure, any DMA mappings that have been already done must be
629 * unmapped by the caller.
630 */
svc_rdma_xb_dma_map(const struct xdr_buf * xdr,void * data)631 static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data)
632 {
633 unsigned int len, remaining;
634 unsigned long pageoff;
635 struct page **ppages;
636 int ret;
637
638 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]);
639 if (ret < 0)
640 return ret;
641
642 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
643 pageoff = offset_in_page(xdr->page_base);
644 remaining = xdr->page_len;
645 while (remaining) {
646 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
647
648 ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len);
649 if (ret < 0)
650 return ret;
651
652 remaining -= len;
653 pageoff = 0;
654 }
655
656 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]);
657 if (ret < 0)
658 return ret;
659
660 return xdr->len;
661 }
662
663 struct svc_rdma_pullup_data {
664 u8 *pd_dest;
665 unsigned int pd_length;
666 unsigned int pd_num_sges;
667 };
668
669 /**
670 * svc_rdma_xb_count_sges - Count how many SGEs will be needed
671 * @xdr: xdr_buf containing portion of an RPC message to transmit
672 * @data: pointer to arguments
673 *
674 * Returns:
675 * Number of SGEs needed to Send the contents of @xdr inline
676 */
svc_rdma_xb_count_sges(const struct xdr_buf * xdr,void * data)677 static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr,
678 void *data)
679 {
680 struct svc_rdma_pullup_data *args = data;
681 unsigned int remaining;
682 unsigned long offset;
683
684 if (xdr->head[0].iov_len)
685 ++args->pd_num_sges;
686
687 offset = offset_in_page(xdr->page_base);
688 remaining = xdr->page_len;
689 while (remaining) {
690 ++args->pd_num_sges;
691 remaining -= min_t(u32, PAGE_SIZE - offset, remaining);
692 offset = 0;
693 }
694
695 if (xdr->tail[0].iov_len)
696 ++args->pd_num_sges;
697
698 args->pd_length += xdr->len;
699 return 0;
700 }
701
702 /**
703 * svc_rdma_pull_up_needed - Determine whether to use pull-up
704 * @rdma: controlling transport
705 * @sctxt: send_ctxt for the Send WR
706 * @write_pcl: Write chunk list provided by client
707 * @xdr: xdr_buf containing RPC message to transmit
708 *
709 * Returns:
710 * %true if pull-up must be used
711 * %false otherwise
712 */
svc_rdma_pull_up_needed(const struct svcxprt_rdma * rdma,const struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_pcl * write_pcl,const struct xdr_buf * xdr)713 static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma,
714 const struct svc_rdma_send_ctxt *sctxt,
715 const struct svc_rdma_pcl *write_pcl,
716 const struct xdr_buf *xdr)
717 {
718 /* Resources needed for the transport header */
719 struct svc_rdma_pullup_data args = {
720 .pd_length = sctxt->sc_hdrbuf.len,
721 .pd_num_sges = 1,
722 };
723 int ret;
724
725 ret = pcl_process_nonpayloads(write_pcl, xdr,
726 svc_rdma_xb_count_sges, &args);
727 if (ret < 0)
728 return false;
729
730 if (args.pd_length < RPCRDMA_PULLUP_THRESH)
731 return true;
732 return args.pd_num_sges >= rdma->sc_max_send_sges;
733 }
734
735 /**
736 * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer
737 * @xdr: xdr_buf containing portion of an RPC message to copy
738 * @data: pointer to arguments
739 *
740 * Returns:
741 * Always zero.
742 */
svc_rdma_xb_linearize(const struct xdr_buf * xdr,void * data)743 static int svc_rdma_xb_linearize(const struct xdr_buf *xdr,
744 void *data)
745 {
746 struct svc_rdma_pullup_data *args = data;
747 unsigned int len, remaining;
748 unsigned long pageoff;
749 struct page **ppages;
750
751 if (xdr->head[0].iov_len) {
752 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len);
753 args->pd_dest += xdr->head[0].iov_len;
754 }
755
756 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
757 pageoff = offset_in_page(xdr->page_base);
758 remaining = xdr->page_len;
759 while (remaining) {
760 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
761 memcpy(args->pd_dest, page_address(*ppages) + pageoff, len);
762 remaining -= len;
763 args->pd_dest += len;
764 pageoff = 0;
765 ppages++;
766 }
767
768 if (xdr->tail[0].iov_len) {
769 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
770 args->pd_dest += xdr->tail[0].iov_len;
771 }
772
773 args->pd_length += xdr->len;
774 return 0;
775 }
776
777 /**
778 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
779 * @rdma: controlling transport
780 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
781 * @write_pcl: Write chunk list provided by client
782 * @xdr: prepared xdr_buf containing RPC message
783 *
784 * The device is not capable of sending the reply directly.
785 * Assemble the elements of @xdr into the transport header buffer.
786 *
787 * Assumptions:
788 * pull_up_needed has determined that @xdr will fit in the buffer.
789 *
790 * Returns:
791 * %0 if pull-up was successful
792 * %-EMSGSIZE if a buffer manipulation problem occurred
793 */
svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_pcl * write_pcl,const struct xdr_buf * xdr)794 static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
795 struct svc_rdma_send_ctxt *sctxt,
796 const struct svc_rdma_pcl *write_pcl,
797 const struct xdr_buf *xdr)
798 {
799 struct svc_rdma_pullup_data args = {
800 .pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len,
801 };
802 int ret;
803
804 ret = pcl_process_nonpayloads(write_pcl, xdr,
805 svc_rdma_xb_linearize, &args);
806 if (ret < 0)
807 return ret;
808
809 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length;
810 trace_svcrdma_send_pullup(sctxt, args.pd_length);
811 return 0;
812 }
813
814 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
815 * @rdma: controlling transport
816 * @sctxt: send_ctxt for the Send WR
817 * @write_pcl: Write chunk list provided by client
818 * @reply_pcl: Reply chunk provided by client
819 * @xdr: prepared xdr_buf containing RPC message
820 *
821 * Returns:
822 * %0 if DMA mapping was successful.
823 * %-EMSGSIZE if a buffer manipulation problem occurred
824 * %-EIO if DMA mapping failed
825 *
826 * The Send WR's num_sge field is set in all cases.
827 */
svc_rdma_map_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_pcl * write_pcl,const struct svc_rdma_pcl * reply_pcl,const struct xdr_buf * xdr)828 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
829 struct svc_rdma_send_ctxt *sctxt,
830 const struct svc_rdma_pcl *write_pcl,
831 const struct svc_rdma_pcl *reply_pcl,
832 const struct xdr_buf *xdr)
833 {
834 struct svc_rdma_map_data args = {
835 .md_rdma = rdma,
836 .md_ctxt = sctxt,
837 };
838
839 /* Set up the (persistently-mapped) transport header SGE. */
840 sctxt->sc_send_wr.num_sge = 1;
841 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
842
843 /* If there is a Reply chunk, nothing follows the transport
844 * header, so there is nothing to map.
845 */
846 if (!pcl_is_empty(reply_pcl))
847 return 0;
848
849 /* For pull-up, svc_rdma_send() will sync the transport header.
850 * No additional DMA mapping is necessary.
851 */
852 if (svc_rdma_pull_up_needed(rdma, sctxt, write_pcl, xdr))
853 return svc_rdma_pull_up_reply_msg(rdma, sctxt, write_pcl, xdr);
854
855 return pcl_process_nonpayloads(write_pcl, xdr,
856 svc_rdma_xb_dma_map, &args);
857 }
858
859 /* The svc_rqst and all resources it owns are released as soon as
860 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
861 * so they are released by the Send completion handler.
862 */
svc_rdma_save_io_pages(struct svc_rqst * rqstp,struct svc_rdma_send_ctxt * ctxt)863 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
864 struct svc_rdma_send_ctxt *ctxt)
865 {
866 int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
867
868 ctxt->sc_page_count += pages;
869 for (i = 0; i < pages; i++) {
870 ctxt->sc_pages[i] = rqstp->rq_respages[i];
871 rqstp->rq_respages[i] = NULL;
872 }
873
874 /* Prevent svc_xprt_release from releasing pages in rq_pages */
875 rqstp->rq_next_page = rqstp->rq_respages;
876 }
877
878 /* Prepare the portion of the RPC Reply that will be transmitted
879 * via RDMA Send. The RPC-over-RDMA transport header is prepared
880 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
881 *
882 * Depending on whether a Write list or Reply chunk is present,
883 * the server may Send all, a portion of, or none of the xdr_buf.
884 * In the latter case, only the transport header (sc_sges[0]) is
885 * transmitted.
886 *
887 * Assumptions:
888 * - The Reply's transport header will never be larger than a page.
889 */
svc_rdma_send_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,struct svc_rqst * rqstp)890 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
891 struct svc_rdma_send_ctxt *sctxt,
892 const struct svc_rdma_recv_ctxt *rctxt,
893 struct svc_rqst *rqstp)
894 {
895 struct ib_send_wr *send_wr = &sctxt->sc_send_wr;
896 int ret;
897
898 ret = svc_rdma_map_reply_msg(rdma, sctxt, &rctxt->rc_write_pcl,
899 &rctxt->rc_reply_pcl, &rqstp->rq_res);
900 if (ret < 0)
901 return ret;
902
903 /* Transfer pages involved in RDMA Writes to the sctxt's
904 * page array. Completion handling releases these pages.
905 */
906 svc_rdma_save_io_pages(rqstp, sctxt);
907
908 if (rctxt->rc_inv_rkey) {
909 send_wr->opcode = IB_WR_SEND_WITH_INV;
910 send_wr->ex.invalidate_rkey = rctxt->rc_inv_rkey;
911 } else {
912 send_wr->opcode = IB_WR_SEND;
913 }
914
915 return svc_rdma_post_send(rdma, sctxt);
916 }
917
918 /**
919 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
920 * @rdma: controlling transport context
921 * @sctxt: Send context for the response
922 * @rctxt: Receive context for incoming bad message
923 * @status: negative errno indicating error that occurred
924 *
925 * Given the client-provided Read, Write, and Reply chunks, the
926 * server was not able to parse the Call or form a complete Reply.
927 * Return an RDMA_ERROR message so the client can retire the RPC
928 * transaction.
929 *
930 * The caller does not have to release @sctxt. It is released by
931 * Send completion, or by this function on error.
932 */
svc_rdma_send_error_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,struct svc_rdma_recv_ctxt * rctxt,int status)933 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
934 struct svc_rdma_send_ctxt *sctxt,
935 struct svc_rdma_recv_ctxt *rctxt,
936 int status)
937 {
938 __be32 *rdma_argp = rctxt->rc_recv_buf;
939 __be32 *p;
940
941 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
942 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
943 sctxt->sc_xprt_buf, NULL);
944
945 p = xdr_reserve_space(&sctxt->sc_stream,
946 rpcrdma_fixed_maxsz * sizeof(*p));
947 if (!p)
948 goto put_ctxt;
949
950 *p++ = *rdma_argp;
951 *p++ = *(rdma_argp + 1);
952 *p++ = rdma->sc_fc_credits;
953 *p = rdma_error;
954
955 switch (status) {
956 case -EPROTONOSUPPORT:
957 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
958 if (!p)
959 goto put_ctxt;
960
961 *p++ = err_vers;
962 *p++ = rpcrdma_version;
963 *p = rpcrdma_version;
964 trace_svcrdma_err_vers(*rdma_argp);
965 break;
966 default:
967 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
968 if (!p)
969 goto put_ctxt;
970
971 *p = err_chunk;
972 trace_svcrdma_err_chunk(*rdma_argp);
973 }
974
975 /* Remote Invalidation is skipped for simplicity. */
976 sctxt->sc_send_wr.num_sge = 1;
977 sctxt->sc_send_wr.opcode = IB_WR_SEND;
978 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
979 if (svc_rdma_post_send(rdma, sctxt))
980 goto put_ctxt;
981 return;
982
983 put_ctxt:
984 svc_rdma_send_ctxt_put(rdma, sctxt);
985 }
986
987 /**
988 * svc_rdma_sendto - Transmit an RPC reply
989 * @rqstp: processed RPC request, reply XDR already in ::rq_res
990 *
991 * Any resources still associated with @rqstp are released upon return.
992 * If no reply message was possible, the connection is closed.
993 *
994 * Returns:
995 * %0 if an RPC reply has been successfully posted,
996 * %-ENOMEM if a resource shortage occurred (connection is lost),
997 * %-ENOTCONN if posting failed (connection is lost).
998 */
svc_rdma_sendto(struct svc_rqst * rqstp)999 int svc_rdma_sendto(struct svc_rqst *rqstp)
1000 {
1001 struct svc_xprt *xprt = rqstp->rq_xprt;
1002 struct svcxprt_rdma *rdma =
1003 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1004 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
1005 __be32 *rdma_argp = rctxt->rc_recv_buf;
1006 struct svc_rdma_send_ctxt *sctxt;
1007 unsigned int rc_size;
1008 __be32 *p;
1009 int ret;
1010
1011 ret = -ENOTCONN;
1012 if (svc_xprt_is_dead(xprt))
1013 goto drop_connection;
1014
1015 ret = -ENOMEM;
1016 sctxt = svc_rdma_send_ctxt_get(rdma);
1017 if (!sctxt)
1018 goto drop_connection;
1019
1020 ret = -EMSGSIZE;
1021 p = xdr_reserve_space(&sctxt->sc_stream,
1022 rpcrdma_fixed_maxsz * sizeof(*p));
1023 if (!p)
1024 goto put_ctxt;
1025
1026 ret = svc_rdma_send_write_list(rdma, rctxt, &rqstp->rq_res);
1027 if (ret < 0)
1028 goto put_ctxt;
1029
1030 rc_size = 0;
1031 if (!pcl_is_empty(&rctxt->rc_reply_pcl)) {
1032 ret = svc_rdma_prepare_reply_chunk(rdma, &rctxt->rc_write_pcl,
1033 &rctxt->rc_reply_pcl, sctxt,
1034 &rqstp->rq_res);
1035 if (ret < 0)
1036 goto reply_chunk;
1037 rc_size = ret;
1038 }
1039
1040 *p++ = *rdma_argp;
1041 *p++ = *(rdma_argp + 1);
1042 *p++ = rdma->sc_fc_credits;
1043 *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
1044
1045 ret = svc_rdma_encode_read_list(sctxt);
1046 if (ret < 0)
1047 goto put_ctxt;
1048 ret = svc_rdma_encode_write_list(rctxt, sctxt);
1049 if (ret < 0)
1050 goto put_ctxt;
1051 ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size);
1052 if (ret < 0)
1053 goto put_ctxt;
1054
1055 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
1056 if (ret < 0)
1057 goto put_ctxt;
1058 return 0;
1059
1060 reply_chunk:
1061 if (ret != -E2BIG && ret != -EINVAL)
1062 goto put_ctxt;
1063
1064 /* Send completion releases payload pages that were part
1065 * of previously posted RDMA Writes.
1066 */
1067 svc_rdma_save_io_pages(rqstp, sctxt);
1068 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
1069 return 0;
1070
1071 put_ctxt:
1072 svc_rdma_send_ctxt_put(rdma, sctxt);
1073 drop_connection:
1074 trace_svcrdma_send_err(rqstp, ret);
1075 svc_xprt_deferred_close(&rdma->sc_xprt);
1076 return -ENOTCONN;
1077 }
1078
1079 /**
1080 * svc_rdma_result_payload - special processing for a result payload
1081 * @rqstp: RPC transaction context
1082 * @offset: payload's byte offset in @rqstp->rq_res
1083 * @length: size of payload, in bytes
1084 *
1085 * Assign the passed-in result payload to the current Write chunk,
1086 * and advance to cur_result_payload to the next Write chunk, if
1087 * there is one.
1088 *
1089 * Return values:
1090 * %0 if successful or nothing needed to be done
1091 * %-E2BIG if the payload was larger than the Write chunk
1092 */
svc_rdma_result_payload(struct svc_rqst * rqstp,unsigned int offset,unsigned int length)1093 int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1094 unsigned int length)
1095 {
1096 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
1097 struct svc_rdma_chunk *chunk;
1098
1099 chunk = rctxt->rc_cur_result_payload;
1100 if (!length || !chunk)
1101 return 0;
1102 rctxt->rc_cur_result_payload =
1103 pcl_next_chunk(&rctxt->rc_write_pcl, chunk);
1104
1105 if (length > chunk->ch_length)
1106 return -E2BIG;
1107 chunk->ch_position = offset;
1108 chunk->ch_payload_length = length;
1109 return 0;
1110 }
1111