1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 *
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Author: Tom Tucker <tom@opengridcomputing.com>
43 */
44
45 /* Operation
46 *
47 * The main entry point is svc_rdma_sendto. This is called by the
48 * RPC server when an RPC Reply is ready to be transmitted to a client.
49 *
50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52 * transport header, post all Write WRs needed for this Reply, then post
53 * a Send WR conveying the transport header and the RPC message itself to
54 * the client.
55 *
56 * svc_rdma_sendto must fully transmit the Reply before returning, as
57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
58 * resources referred to by the svc_rqst are also recycled at that time.
59 * Therefore any resources that must remain longer must be detached
60 * from the svc_rqst and released later.
61 *
62 * Page Management
63 *
64 * The I/O that performs Reply transmission is asynchronous, and may
65 * complete well after sendto returns. Thus pages under I/O must be
66 * removed from the svc_rqst before sendto returns.
67 *
68 * The logic here depends on Send Queue and completion ordering. Since
69 * the Send WR is always posted last, it will always complete last. Thus
70 * when it completes, it is guaranteed that all previous Write WRs have
71 * also completed.
72 *
73 * Write WRs are constructed and posted. Each Write segment gets its own
74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75 * DMA-unmap the pages under I/O for that Write segment. The Write
76 * completion handler does not release any pages.
77 *
78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79 * The ownership of all of the Reply's pages are transferred into that
80 * ctxt, the Send WR is posted, and sendto returns.
81 *
82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83 * Send completion handler finally releases the Reply's pages.
84 *
85 * This mechanism also assumes that completions on the transport's Send
86 * Completion Queue do not run in parallel. Otherwise a Write completion
87 * and Send completion running at the same time could release pages that
88 * are still DMA-mapped.
89 *
90 * Error Handling
91 *
92 * - If the Send WR is posted successfully, it will either complete
93 * successfully, or get flushed. Either way, the Send completion
94 * handler releases the Reply's pages.
95 * - If the Send WR cannot be not posted, the forward path releases
96 * the Reply's pages.
97 *
98 * This handles the case, without the use of page reference counting,
99 * where two different Write segments send portions of the same page.
100 */
101
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
104
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
107
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/svc_rdma.h>
110
111 #include "xprt_rdma.h"
112 #include <trace/events/rpcrdma.h>
113
114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
115
116 static struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_alloc(struct svcxprt_rdma * rdma)117 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
118 {
119 int node = ibdev_to_node(rdma->sc_cm_id->device);
120 struct svc_rdma_send_ctxt *ctxt;
121 dma_addr_t addr;
122 void *buffer;
123 int i;
124
125 ctxt = kzalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges),
126 GFP_KERNEL, node);
127 if (!ctxt)
128 goto fail0;
129 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
130 if (!buffer)
131 goto fail1;
132 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
133 rdma->sc_max_req_size, DMA_TO_DEVICE);
134 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
135 goto fail2;
136
137 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
138
139 ctxt->sc_rdma = rdma;
140 ctxt->sc_send_wr.next = NULL;
141 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
142 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
143 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
144 ctxt->sc_cqe.done = svc_rdma_wc_send;
145 ctxt->sc_xprt_buf = buffer;
146 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
147 rdma->sc_max_req_size);
148 ctxt->sc_sges[0].addr = addr;
149
150 for (i = 0; i < rdma->sc_max_send_sges; i++)
151 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
152 return ctxt;
153
154 fail2:
155 kfree(buffer);
156 fail1:
157 kfree(ctxt);
158 fail0:
159 return NULL;
160 }
161
162 /**
163 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
164 * @rdma: svcxprt_rdma being torn down
165 *
166 */
svc_rdma_send_ctxts_destroy(struct svcxprt_rdma * rdma)167 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
168 {
169 struct svc_rdma_send_ctxt *ctxt;
170 struct llist_node *node;
171
172 while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) {
173 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
174 ib_dma_unmap_single(rdma->sc_pd->device,
175 ctxt->sc_sges[0].addr,
176 rdma->sc_max_req_size,
177 DMA_TO_DEVICE);
178 kfree(ctxt->sc_xprt_buf);
179 kfree(ctxt);
180 }
181 }
182
183 /**
184 * svc_rdma_send_ctxt_get - Get a free send_ctxt
185 * @rdma: controlling svcxprt_rdma
186 *
187 * Returns a ready-to-use send_ctxt, or NULL if none are
188 * available and a fresh one cannot be allocated.
189 */
svc_rdma_send_ctxt_get(struct svcxprt_rdma * rdma)190 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
191 {
192 struct svc_rdma_send_ctxt *ctxt;
193 struct llist_node *node;
194
195 spin_lock(&rdma->sc_send_lock);
196 node = llist_del_first(&rdma->sc_send_ctxts);
197 spin_unlock(&rdma->sc_send_lock);
198 if (!node)
199 goto out_empty;
200
201 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
202
203 out:
204 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
205 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
206 ctxt->sc_xprt_buf, NULL);
207
208 ctxt->sc_send_wr.num_sge = 0;
209 ctxt->sc_cur_sge_no = 0;
210 ctxt->sc_page_count = 0;
211 return ctxt;
212
213 out_empty:
214 ctxt = svc_rdma_send_ctxt_alloc(rdma);
215 if (!ctxt)
216 return NULL;
217 goto out;
218 }
219
svc_rdma_send_ctxt_release(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)220 static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma,
221 struct svc_rdma_send_ctxt *ctxt)
222 {
223 struct ib_device *device = rdma->sc_cm_id->device;
224 unsigned int i;
225
226 if (ctxt->sc_page_count)
227 release_pages(ctxt->sc_pages, ctxt->sc_page_count);
228
229 /* The first SGE contains the transport header, which
230 * remains mapped until @ctxt is destroyed.
231 */
232 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
233 trace_svcrdma_dma_unmap_page(&ctxt->sc_cid,
234 ctxt->sc_sges[i].addr,
235 ctxt->sc_sges[i].length);
236 ib_dma_unmap_page(device,
237 ctxt->sc_sges[i].addr,
238 ctxt->sc_sges[i].length,
239 DMA_TO_DEVICE);
240 }
241
242 llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
243 }
244
svc_rdma_send_ctxt_put_async(struct work_struct * work)245 static void svc_rdma_send_ctxt_put_async(struct work_struct *work)
246 {
247 struct svc_rdma_send_ctxt *ctxt;
248
249 ctxt = container_of(work, struct svc_rdma_send_ctxt, sc_work);
250 svc_rdma_send_ctxt_release(ctxt->sc_rdma, ctxt);
251 }
252
253 /**
254 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
255 * @rdma: controlling svcxprt_rdma
256 * @ctxt: object to return to the free list
257 *
258 * Pages left in sc_pages are DMA unmapped and released.
259 */
svc_rdma_send_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)260 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
261 struct svc_rdma_send_ctxt *ctxt)
262 {
263 INIT_WORK(&ctxt->sc_work, svc_rdma_send_ctxt_put_async);
264 queue_work(svcrdma_wq, &ctxt->sc_work);
265 }
266
267 /**
268 * svc_rdma_wake_send_waiters - manage Send Queue accounting
269 * @rdma: controlling transport
270 * @avail: Number of additional SQEs that are now available
271 *
272 */
svc_rdma_wake_send_waiters(struct svcxprt_rdma * rdma,int avail)273 void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail)
274 {
275 atomic_add(avail, &rdma->sc_sq_avail);
276 smp_mb__after_atomic();
277 if (unlikely(waitqueue_active(&rdma->sc_send_wait)))
278 wake_up(&rdma->sc_send_wait);
279 }
280
281 /**
282 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
283 * @cq: Completion Queue context
284 * @wc: Work Completion object
285 *
286 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
287 * the Send completion handler could be running.
288 */
svc_rdma_wc_send(struct ib_cq * cq,struct ib_wc * wc)289 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
290 {
291 struct svcxprt_rdma *rdma = cq->cq_context;
292 struct ib_cqe *cqe = wc->wr_cqe;
293 struct svc_rdma_send_ctxt *ctxt =
294 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
295
296 svc_rdma_wake_send_waiters(rdma, 1);
297
298 if (unlikely(wc->status != IB_WC_SUCCESS))
299 goto flushed;
300
301 trace_svcrdma_wc_send(&ctxt->sc_cid);
302 svc_rdma_send_ctxt_put(rdma, ctxt);
303 return;
304
305 flushed:
306 if (wc->status != IB_WC_WR_FLUSH_ERR)
307 trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid);
308 else
309 trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid);
310 svc_rdma_send_ctxt_put(rdma, ctxt);
311 svc_xprt_deferred_close(&rdma->sc_xprt);
312 }
313
314 /**
315 * svc_rdma_send - Post a single Send WR
316 * @rdma: transport on which to post the WR
317 * @ctxt: send ctxt with a Send WR ready to post
318 *
319 * Returns zero if the Send WR was posted successfully. Otherwise, a
320 * negative errno is returned.
321 */
svc_rdma_send(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)322 int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
323 {
324 struct ib_send_wr *wr = &ctxt->sc_send_wr;
325 int ret;
326
327 might_sleep();
328
329 /* Sync the transport header buffer */
330 ib_dma_sync_single_for_device(rdma->sc_pd->device,
331 wr->sg_list[0].addr,
332 wr->sg_list[0].length,
333 DMA_TO_DEVICE);
334
335 /* If the SQ is full, wait until an SQ entry is available */
336 while (1) {
337 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
338 percpu_counter_inc(&svcrdma_stat_sq_starve);
339 trace_svcrdma_sq_full(rdma, &ctxt->sc_cid);
340 atomic_inc(&rdma->sc_sq_avail);
341 wait_event(rdma->sc_send_wait,
342 atomic_read(&rdma->sc_sq_avail) > 1);
343 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
344 return -ENOTCONN;
345 trace_svcrdma_sq_retry(rdma, &ctxt->sc_cid);
346 continue;
347 }
348
349 trace_svcrdma_post_send(ctxt);
350 ret = ib_post_send(rdma->sc_qp, wr, NULL);
351 if (ret)
352 break;
353 return 0;
354 }
355
356 trace_svcrdma_sq_post_err(rdma, &ctxt->sc_cid, ret);
357 svc_xprt_deferred_close(&rdma->sc_xprt);
358 wake_up(&rdma->sc_send_wait);
359 return ret;
360 }
361
362 /**
363 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
364 * @sctxt: Send context for the RPC Reply
365 *
366 * Return values:
367 * On success, returns length in bytes of the Reply XDR buffer
368 * that was consumed by the Reply Read list
369 * %-EMSGSIZE on XDR buffer overflow
370 */
svc_rdma_encode_read_list(struct svc_rdma_send_ctxt * sctxt)371 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
372 {
373 /* RPC-over-RDMA version 1 replies never have a Read list. */
374 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
375 }
376
377 /**
378 * svc_rdma_encode_write_segment - Encode one Write segment
379 * @sctxt: Send context for the RPC Reply
380 * @chunk: Write chunk to push
381 * @remaining: remaining bytes of the payload left in the Write chunk
382 * @segno: which segment in the chunk
383 *
384 * Return values:
385 * On success, returns length in bytes of the Reply XDR buffer
386 * that was consumed by the Write segment, and updates @remaining
387 * %-EMSGSIZE on XDR buffer overflow
388 */
svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_chunk * chunk,u32 * remaining,unsigned int segno)389 static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt,
390 const struct svc_rdma_chunk *chunk,
391 u32 *remaining, unsigned int segno)
392 {
393 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno];
394 const size_t len = rpcrdma_segment_maxsz * sizeof(__be32);
395 u32 length;
396 __be32 *p;
397
398 p = xdr_reserve_space(&sctxt->sc_stream, len);
399 if (!p)
400 return -EMSGSIZE;
401
402 length = min_t(u32, *remaining, segment->rs_length);
403 *remaining -= length;
404 xdr_encode_rdma_segment(p, segment->rs_handle, length,
405 segment->rs_offset);
406 trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length,
407 segment->rs_offset);
408 return len;
409 }
410
411 /**
412 * svc_rdma_encode_write_chunk - Encode one Write chunk
413 * @sctxt: Send context for the RPC Reply
414 * @chunk: Write chunk to push
415 *
416 * Copy a Write chunk from the Call transport header to the
417 * Reply transport header. Update each segment's length field
418 * to reflect the number of bytes written in that segment.
419 *
420 * Return values:
421 * On success, returns length in bytes of the Reply XDR buffer
422 * that was consumed by the Write chunk
423 * %-EMSGSIZE on XDR buffer overflow
424 */
svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_chunk * chunk)425 static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt,
426 const struct svc_rdma_chunk *chunk)
427 {
428 u32 remaining = chunk->ch_payload_length;
429 unsigned int segno;
430 ssize_t len, ret;
431
432 len = 0;
433 ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
434 if (ret < 0)
435 return ret;
436 len += ret;
437
438 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount);
439 if (ret < 0)
440 return ret;
441 len += ret;
442
443 for (segno = 0; segno < chunk->ch_segcount; segno++) {
444 ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno);
445 if (ret < 0)
446 return ret;
447 len += ret;
448 }
449
450 return len;
451 }
452
453 /**
454 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
455 * @rctxt: Reply context with information about the RPC Call
456 * @sctxt: Send context for the RPC Reply
457 *
458 * Return values:
459 * On success, returns length in bytes of the Reply XDR buffer
460 * that was consumed by the Reply's Write list
461 * %-EMSGSIZE on XDR buffer overflow
462 */
svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt * rctxt,struct svc_rdma_send_ctxt * sctxt)463 static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt,
464 struct svc_rdma_send_ctxt *sctxt)
465 {
466 struct svc_rdma_chunk *chunk;
467 ssize_t len, ret;
468
469 len = 0;
470 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
471 ret = svc_rdma_encode_write_chunk(sctxt, chunk);
472 if (ret < 0)
473 return ret;
474 len += ret;
475 }
476
477 /* Terminate the Write list */
478 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
479 if (ret < 0)
480 return ret;
481
482 return len + ret;
483 }
484
485 /**
486 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
487 * @rctxt: Reply context with information about the RPC Call
488 * @sctxt: Send context for the RPC Reply
489 * @length: size in bytes of the payload in the Reply chunk
490 *
491 * Return values:
492 * On success, returns length in bytes of the Reply XDR buffer
493 * that was consumed by the Reply's Reply chunk
494 * %-EMSGSIZE on XDR buffer overflow
495 * %-E2BIG if the RPC message is larger than the Reply chunk
496 */
497 static ssize_t
svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt * rctxt,struct svc_rdma_send_ctxt * sctxt,unsigned int length)498 svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt,
499 struct svc_rdma_send_ctxt *sctxt,
500 unsigned int length)
501 {
502 struct svc_rdma_chunk *chunk;
503
504 if (pcl_is_empty(&rctxt->rc_reply_pcl))
505 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
506
507 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
508 if (length > chunk->ch_length)
509 return -E2BIG;
510
511 chunk->ch_payload_length = length;
512 return svc_rdma_encode_write_chunk(sctxt, chunk);
513 }
514
515 struct svc_rdma_map_data {
516 struct svcxprt_rdma *md_rdma;
517 struct svc_rdma_send_ctxt *md_ctxt;
518 };
519
520 /**
521 * svc_rdma_page_dma_map - DMA map one page
522 * @data: pointer to arguments
523 * @page: struct page to DMA map
524 * @offset: offset into the page
525 * @len: number of bytes to map
526 *
527 * Returns:
528 * %0 if DMA mapping was successful
529 * %-EIO if the page cannot be DMA mapped
530 */
svc_rdma_page_dma_map(void * data,struct page * page,unsigned long offset,unsigned int len)531 static int svc_rdma_page_dma_map(void *data, struct page *page,
532 unsigned long offset, unsigned int len)
533 {
534 struct svc_rdma_map_data *args = data;
535 struct svcxprt_rdma *rdma = args->md_rdma;
536 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt;
537 struct ib_device *dev = rdma->sc_cm_id->device;
538 dma_addr_t dma_addr;
539
540 ++ctxt->sc_cur_sge_no;
541
542 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
543 if (ib_dma_mapping_error(dev, dma_addr))
544 goto out_maperr;
545
546 trace_svcrdma_dma_map_page(&ctxt->sc_cid, dma_addr, len);
547 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
548 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
549 ctxt->sc_send_wr.num_sge++;
550 return 0;
551
552 out_maperr:
553 trace_svcrdma_dma_map_err(&ctxt->sc_cid, dma_addr, len);
554 return -EIO;
555 }
556
557 /**
558 * svc_rdma_iov_dma_map - DMA map an iovec
559 * @data: pointer to arguments
560 * @iov: kvec to DMA map
561 *
562 * ib_dma_map_page() is used here because svc_rdma_dma_unmap()
563 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
564 *
565 * Returns:
566 * %0 if DMA mapping was successful
567 * %-EIO if the iovec cannot be DMA mapped
568 */
svc_rdma_iov_dma_map(void * data,const struct kvec * iov)569 static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov)
570 {
571 if (!iov->iov_len)
572 return 0;
573 return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base),
574 offset_in_page(iov->iov_base),
575 iov->iov_len);
576 }
577
578 /**
579 * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf
580 * @xdr: xdr_buf containing portion of an RPC message to transmit
581 * @data: pointer to arguments
582 *
583 * Returns:
584 * %0 if DMA mapping was successful
585 * %-EIO if DMA mapping failed
586 *
587 * On failure, any DMA mappings that have been already done must be
588 * unmapped by the caller.
589 */
svc_rdma_xb_dma_map(const struct xdr_buf * xdr,void * data)590 static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data)
591 {
592 unsigned int len, remaining;
593 unsigned long pageoff;
594 struct page **ppages;
595 int ret;
596
597 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]);
598 if (ret < 0)
599 return ret;
600
601 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
602 pageoff = offset_in_page(xdr->page_base);
603 remaining = xdr->page_len;
604 while (remaining) {
605 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
606
607 ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len);
608 if (ret < 0)
609 return ret;
610
611 remaining -= len;
612 pageoff = 0;
613 }
614
615 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]);
616 if (ret < 0)
617 return ret;
618
619 return xdr->len;
620 }
621
622 struct svc_rdma_pullup_data {
623 u8 *pd_dest;
624 unsigned int pd_length;
625 unsigned int pd_num_sges;
626 };
627
628 /**
629 * svc_rdma_xb_count_sges - Count how many SGEs will be needed
630 * @xdr: xdr_buf containing portion of an RPC message to transmit
631 * @data: pointer to arguments
632 *
633 * Returns:
634 * Number of SGEs needed to Send the contents of @xdr inline
635 */
svc_rdma_xb_count_sges(const struct xdr_buf * xdr,void * data)636 static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr,
637 void *data)
638 {
639 struct svc_rdma_pullup_data *args = data;
640 unsigned int remaining;
641 unsigned long offset;
642
643 if (xdr->head[0].iov_len)
644 ++args->pd_num_sges;
645
646 offset = offset_in_page(xdr->page_base);
647 remaining = xdr->page_len;
648 while (remaining) {
649 ++args->pd_num_sges;
650 remaining -= min_t(u32, PAGE_SIZE - offset, remaining);
651 offset = 0;
652 }
653
654 if (xdr->tail[0].iov_len)
655 ++args->pd_num_sges;
656
657 args->pd_length += xdr->len;
658 return 0;
659 }
660
661 /**
662 * svc_rdma_pull_up_needed - Determine whether to use pull-up
663 * @rdma: controlling transport
664 * @sctxt: send_ctxt for the Send WR
665 * @write_pcl: Write chunk list provided by client
666 * @xdr: xdr_buf containing RPC message to transmit
667 *
668 * Returns:
669 * %true if pull-up must be used
670 * %false otherwise
671 */
svc_rdma_pull_up_needed(const struct svcxprt_rdma * rdma,const struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_pcl * write_pcl,const struct xdr_buf * xdr)672 static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma,
673 const struct svc_rdma_send_ctxt *sctxt,
674 const struct svc_rdma_pcl *write_pcl,
675 const struct xdr_buf *xdr)
676 {
677 /* Resources needed for the transport header */
678 struct svc_rdma_pullup_data args = {
679 .pd_length = sctxt->sc_hdrbuf.len,
680 .pd_num_sges = 1,
681 };
682 int ret;
683
684 ret = pcl_process_nonpayloads(write_pcl, xdr,
685 svc_rdma_xb_count_sges, &args);
686 if (ret < 0)
687 return false;
688
689 if (args.pd_length < RPCRDMA_PULLUP_THRESH)
690 return true;
691 return args.pd_num_sges >= rdma->sc_max_send_sges;
692 }
693
694 /**
695 * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer
696 * @xdr: xdr_buf containing portion of an RPC message to copy
697 * @data: pointer to arguments
698 *
699 * Returns:
700 * Always zero.
701 */
svc_rdma_xb_linearize(const struct xdr_buf * xdr,void * data)702 static int svc_rdma_xb_linearize(const struct xdr_buf *xdr,
703 void *data)
704 {
705 struct svc_rdma_pullup_data *args = data;
706 unsigned int len, remaining;
707 unsigned long pageoff;
708 struct page **ppages;
709
710 if (xdr->head[0].iov_len) {
711 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len);
712 args->pd_dest += xdr->head[0].iov_len;
713 }
714
715 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
716 pageoff = offset_in_page(xdr->page_base);
717 remaining = xdr->page_len;
718 while (remaining) {
719 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
720 memcpy(args->pd_dest, page_address(*ppages) + pageoff, len);
721 remaining -= len;
722 args->pd_dest += len;
723 pageoff = 0;
724 ppages++;
725 }
726
727 if (xdr->tail[0].iov_len) {
728 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
729 args->pd_dest += xdr->tail[0].iov_len;
730 }
731
732 args->pd_length += xdr->len;
733 return 0;
734 }
735
736 /**
737 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
738 * @rdma: controlling transport
739 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
740 * @write_pcl: Write chunk list provided by client
741 * @xdr: prepared xdr_buf containing RPC message
742 *
743 * The device is not capable of sending the reply directly.
744 * Assemble the elements of @xdr into the transport header buffer.
745 *
746 * Assumptions:
747 * pull_up_needed has determined that @xdr will fit in the buffer.
748 *
749 * Returns:
750 * %0 if pull-up was successful
751 * %-EMSGSIZE if a buffer manipulation problem occurred
752 */
svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_pcl * write_pcl,const struct xdr_buf * xdr)753 static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
754 struct svc_rdma_send_ctxt *sctxt,
755 const struct svc_rdma_pcl *write_pcl,
756 const struct xdr_buf *xdr)
757 {
758 struct svc_rdma_pullup_data args = {
759 .pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len,
760 };
761 int ret;
762
763 ret = pcl_process_nonpayloads(write_pcl, xdr,
764 svc_rdma_xb_linearize, &args);
765 if (ret < 0)
766 return ret;
767
768 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length;
769 trace_svcrdma_send_pullup(sctxt, args.pd_length);
770 return 0;
771 }
772
773 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
774 * @rdma: controlling transport
775 * @sctxt: send_ctxt for the Send WR
776 * @write_pcl: Write chunk list provided by client
777 * @reply_pcl: Reply chunk provided by client
778 * @xdr: prepared xdr_buf containing RPC message
779 *
780 * Returns:
781 * %0 if DMA mapping was successful.
782 * %-EMSGSIZE if a buffer manipulation problem occurred
783 * %-EIO if DMA mapping failed
784 *
785 * The Send WR's num_sge field is set in all cases.
786 */
svc_rdma_map_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_pcl * write_pcl,const struct svc_rdma_pcl * reply_pcl,const struct xdr_buf * xdr)787 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
788 struct svc_rdma_send_ctxt *sctxt,
789 const struct svc_rdma_pcl *write_pcl,
790 const struct svc_rdma_pcl *reply_pcl,
791 const struct xdr_buf *xdr)
792 {
793 struct svc_rdma_map_data args = {
794 .md_rdma = rdma,
795 .md_ctxt = sctxt,
796 };
797
798 /* Set up the (persistently-mapped) transport header SGE. */
799 sctxt->sc_send_wr.num_sge = 1;
800 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
801
802 /* If there is a Reply chunk, nothing follows the transport
803 * header, so there is nothing to map.
804 */
805 if (!pcl_is_empty(reply_pcl))
806 return 0;
807
808 /* For pull-up, svc_rdma_send() will sync the transport header.
809 * No additional DMA mapping is necessary.
810 */
811 if (svc_rdma_pull_up_needed(rdma, sctxt, write_pcl, xdr))
812 return svc_rdma_pull_up_reply_msg(rdma, sctxt, write_pcl, xdr);
813
814 return pcl_process_nonpayloads(write_pcl, xdr,
815 svc_rdma_xb_dma_map, &args);
816 }
817
818 /* The svc_rqst and all resources it owns are released as soon as
819 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
820 * so they are released by the Send completion handler.
821 */
svc_rdma_save_io_pages(struct svc_rqst * rqstp,struct svc_rdma_send_ctxt * ctxt)822 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
823 struct svc_rdma_send_ctxt *ctxt)
824 {
825 int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
826
827 ctxt->sc_page_count += pages;
828 for (i = 0; i < pages; i++) {
829 ctxt->sc_pages[i] = rqstp->rq_respages[i];
830 rqstp->rq_respages[i] = NULL;
831 }
832
833 /* Prevent svc_xprt_release from releasing pages in rq_pages */
834 rqstp->rq_next_page = rqstp->rq_respages;
835 }
836
837 /* Prepare the portion of the RPC Reply that will be transmitted
838 * via RDMA Send. The RPC-over-RDMA transport header is prepared
839 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
840 *
841 * Depending on whether a Write list or Reply chunk is present,
842 * the server may send all, a portion of, or none of the xdr_buf.
843 * In the latter case, only the transport header (sc_sges[0]) is
844 * transmitted.
845 *
846 * RDMA Send is the last step of transmitting an RPC reply. Pages
847 * involved in the earlier RDMA Writes are here transferred out
848 * of the rqstp and into the sctxt's page array. These pages are
849 * DMA unmapped by each Write completion, but the subsequent Send
850 * completion finally releases these pages.
851 *
852 * Assumptions:
853 * - The Reply's transport header will never be larger than a page.
854 */
svc_rdma_send_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,struct svc_rqst * rqstp)855 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
856 struct svc_rdma_send_ctxt *sctxt,
857 const struct svc_rdma_recv_ctxt *rctxt,
858 struct svc_rqst *rqstp)
859 {
860 int ret;
861
862 ret = svc_rdma_map_reply_msg(rdma, sctxt, &rctxt->rc_write_pcl,
863 &rctxt->rc_reply_pcl, &rqstp->rq_res);
864 if (ret < 0)
865 return ret;
866
867 svc_rdma_save_io_pages(rqstp, sctxt);
868
869 if (rctxt->rc_inv_rkey) {
870 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
871 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
872 } else {
873 sctxt->sc_send_wr.opcode = IB_WR_SEND;
874 }
875
876 return svc_rdma_send(rdma, sctxt);
877 }
878
879 /**
880 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
881 * @rdma: controlling transport context
882 * @sctxt: Send context for the response
883 * @rctxt: Receive context for incoming bad message
884 * @status: negative errno indicating error that occurred
885 *
886 * Given the client-provided Read, Write, and Reply chunks, the
887 * server was not able to parse the Call or form a complete Reply.
888 * Return an RDMA_ERROR message so the client can retire the RPC
889 * transaction.
890 *
891 * The caller does not have to release @sctxt. It is released by
892 * Send completion, or by this function on error.
893 */
svc_rdma_send_error_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,struct svc_rdma_recv_ctxt * rctxt,int status)894 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
895 struct svc_rdma_send_ctxt *sctxt,
896 struct svc_rdma_recv_ctxt *rctxt,
897 int status)
898 {
899 __be32 *rdma_argp = rctxt->rc_recv_buf;
900 __be32 *p;
901
902 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
903 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
904 sctxt->sc_xprt_buf, NULL);
905
906 p = xdr_reserve_space(&sctxt->sc_stream,
907 rpcrdma_fixed_maxsz * sizeof(*p));
908 if (!p)
909 goto put_ctxt;
910
911 *p++ = *rdma_argp;
912 *p++ = *(rdma_argp + 1);
913 *p++ = rdma->sc_fc_credits;
914 *p = rdma_error;
915
916 switch (status) {
917 case -EPROTONOSUPPORT:
918 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
919 if (!p)
920 goto put_ctxt;
921
922 *p++ = err_vers;
923 *p++ = rpcrdma_version;
924 *p = rpcrdma_version;
925 trace_svcrdma_err_vers(*rdma_argp);
926 break;
927 default:
928 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
929 if (!p)
930 goto put_ctxt;
931
932 *p = err_chunk;
933 trace_svcrdma_err_chunk(*rdma_argp);
934 }
935
936 /* Remote Invalidation is skipped for simplicity. */
937 sctxt->sc_send_wr.num_sge = 1;
938 sctxt->sc_send_wr.opcode = IB_WR_SEND;
939 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
940 if (svc_rdma_send(rdma, sctxt))
941 goto put_ctxt;
942 return;
943
944 put_ctxt:
945 svc_rdma_send_ctxt_put(rdma, sctxt);
946 }
947
948 /**
949 * svc_rdma_sendto - Transmit an RPC reply
950 * @rqstp: processed RPC request, reply XDR already in ::rq_res
951 *
952 * Any resources still associated with @rqstp are released upon return.
953 * If no reply message was possible, the connection is closed.
954 *
955 * Returns:
956 * %0 if an RPC reply has been successfully posted,
957 * %-ENOMEM if a resource shortage occurred (connection is lost),
958 * %-ENOTCONN if posting failed (connection is lost).
959 */
svc_rdma_sendto(struct svc_rqst * rqstp)960 int svc_rdma_sendto(struct svc_rqst *rqstp)
961 {
962 struct svc_xprt *xprt = rqstp->rq_xprt;
963 struct svcxprt_rdma *rdma =
964 container_of(xprt, struct svcxprt_rdma, sc_xprt);
965 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
966 __be32 *rdma_argp = rctxt->rc_recv_buf;
967 struct svc_rdma_send_ctxt *sctxt;
968 unsigned int rc_size;
969 __be32 *p;
970 int ret;
971
972 ret = -ENOTCONN;
973 if (svc_xprt_is_dead(xprt))
974 goto drop_connection;
975
976 ret = -ENOMEM;
977 sctxt = svc_rdma_send_ctxt_get(rdma);
978 if (!sctxt)
979 goto drop_connection;
980
981 ret = -EMSGSIZE;
982 p = xdr_reserve_space(&sctxt->sc_stream,
983 rpcrdma_fixed_maxsz * sizeof(*p));
984 if (!p)
985 goto put_ctxt;
986
987 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
988 if (ret < 0)
989 goto reply_chunk;
990 rc_size = ret;
991
992 *p++ = *rdma_argp;
993 *p++ = *(rdma_argp + 1);
994 *p++ = rdma->sc_fc_credits;
995 *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
996
997 ret = svc_rdma_encode_read_list(sctxt);
998 if (ret < 0)
999 goto put_ctxt;
1000 ret = svc_rdma_encode_write_list(rctxt, sctxt);
1001 if (ret < 0)
1002 goto put_ctxt;
1003 ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size);
1004 if (ret < 0)
1005 goto put_ctxt;
1006
1007 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
1008 if (ret < 0)
1009 goto put_ctxt;
1010 return 0;
1011
1012 reply_chunk:
1013 if (ret != -E2BIG && ret != -EINVAL)
1014 goto put_ctxt;
1015
1016 /* Send completion releases payload pages that were part
1017 * of previously posted RDMA Writes.
1018 */
1019 svc_rdma_save_io_pages(rqstp, sctxt);
1020 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
1021 return 0;
1022
1023 put_ctxt:
1024 svc_rdma_send_ctxt_put(rdma, sctxt);
1025 drop_connection:
1026 trace_svcrdma_send_err(rqstp, ret);
1027 svc_xprt_deferred_close(&rdma->sc_xprt);
1028 return -ENOTCONN;
1029 }
1030
1031 /**
1032 * svc_rdma_result_payload - special processing for a result payload
1033 * @rqstp: svc_rqst to operate on
1034 * @offset: payload's byte offset in @xdr
1035 * @length: size of payload, in bytes
1036 *
1037 * Return values:
1038 * %0 if successful or nothing needed to be done
1039 * %-EMSGSIZE on XDR buffer overflow
1040 * %-E2BIG if the payload was larger than the Write chunk
1041 * %-EINVAL if client provided too many segments
1042 * %-ENOMEM if rdma_rw context pool was exhausted
1043 * %-ENOTCONN if posting failed (connection is lost)
1044 * %-EIO if rdma_rw initialization failed (DMA mapping, etc)
1045 */
svc_rdma_result_payload(struct svc_rqst * rqstp,unsigned int offset,unsigned int length)1046 int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1047 unsigned int length)
1048 {
1049 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
1050 struct svc_rdma_chunk *chunk;
1051 struct svcxprt_rdma *rdma;
1052 struct xdr_buf subbuf;
1053 int ret;
1054
1055 chunk = rctxt->rc_cur_result_payload;
1056 if (!length || !chunk)
1057 return 0;
1058 rctxt->rc_cur_result_payload =
1059 pcl_next_chunk(&rctxt->rc_write_pcl, chunk);
1060 if (length > chunk->ch_length)
1061 return -E2BIG;
1062
1063 chunk->ch_position = offset;
1064 chunk->ch_payload_length = length;
1065
1066 if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length))
1067 return -EMSGSIZE;
1068
1069 rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
1070 ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf);
1071 if (ret < 0)
1072 return ret;
1073 return 0;
1074 }
1075