1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2016-2018 Oracle. All rights reserved.
4  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the BSD-type
11  * license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  *      Redistributions of source code must retain the above copyright
18  *      notice, this list of conditions and the following disclaimer.
19  *
20  *      Redistributions in binary form must reproduce the above
21  *      copyright notice, this list of conditions and the following
22  *      disclaimer in the documentation and/or other materials provided
23  *      with the distribution.
24  *
25  *      Neither the name of the Network Appliance, Inc. nor the names of
26  *      its contributors may be used to endorse or promote products
27  *      derived from this software without specific prior written
28  *      permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  * Author: Tom Tucker <tom@opengridcomputing.com>
43  */
44 
45 /* Operation
46  *
47  * The main entry point is svc_rdma_recvfrom. This is called from
48  * svc_recv when the transport indicates there is incoming data to
49  * be read. "Data Ready" is signaled when an RDMA Receive completes,
50  * or when a set of RDMA Reads complete.
51  *
52  * An svc_rqst is passed in. This structure contains an array of
53  * free pages (rq_pages) that will contain the incoming RPC message.
54  *
55  * Short messages are moved directly into svc_rqst::rq_arg, and
56  * the RPC Call is ready to be processed by the Upper Layer.
57  * svc_rdma_recvfrom returns the length of the RPC Call message,
58  * completing the reception of the RPC Call.
59  *
60  * However, when an incoming message has Read chunks,
61  * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62  * data payload from the client. svc_rdma_recvfrom sets up the
63  * RDMA Reads using pages in svc_rqst::rq_pages, which are
64  * transferred to an svc_rdma_recv_ctxt for the duration of the
65  * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66  * is still not yet ready.
67  *
68  * When the Read chunk payloads have become available on the
69  * server, "Data Ready" is raised again, and svc_recv calls
70  * svc_rdma_recvfrom again. This second call may use a different
71  * svc_rqst than the first one, thus any information that needs
72  * to be preserved across these two calls is kept in an
73  * svc_rdma_recv_ctxt.
74  *
75  * The second call to svc_rdma_recvfrom performs final assembly
76  * of the RPC Call message, using the RDMA Read sink pages kept in
77  * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78  * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79  * the length of the completed RPC Call message.
80  *
81  * Page Management
82  *
83  * Pages under I/O must be transferred from the first svc_rqst to an
84  * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
85  *
86  * The first svc_rqst supplies pages for RDMA Reads. These are moved
87  * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88  * the rq_pages array are set to NULL and refilled with the first
89  * svc_rdma_recvfrom call returns.
90  *
91  * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92  * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst.
93  */
94 
95 #include <linux/slab.h>
96 #include <linux/spinlock.h>
97 #include <linux/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
100 
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
105 
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
108 
109 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
110 
111 static inline struct svc_rdma_recv_ctxt *
112 svc_rdma_next_recv_ctxt(struct list_head *list)
113 {
114 	return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
115 					rc_list);
116 }
117 
118 static struct svc_rdma_recv_ctxt *
119 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
120 {
121 	int node = ibdev_to_node(rdma->sc_cm_id->device);
122 	struct svc_rdma_recv_ctxt *ctxt;
123 	unsigned long pages;
124 	dma_addr_t addr;
125 	void *buffer;
126 
127 	pages = svc_serv_maxpages(rdma->sc_xprt.xpt_server);
128 	ctxt = kzalloc_node(struct_size(ctxt, rc_pages, pages),
129 			    GFP_KERNEL, node);
130 	if (!ctxt)
131 		goto fail0;
132 	ctxt->rc_maxpages = pages;
133 	buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
134 	if (!buffer)
135 		goto fail1;
136 	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
137 				 rdma->sc_max_req_size, DMA_FROM_DEVICE);
138 	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
139 		goto fail2;
140 
141 	svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
142 	pcl_init(&ctxt->rc_call_pcl);
143 	pcl_init(&ctxt->rc_read_pcl);
144 	pcl_init(&ctxt->rc_write_pcl);
145 	pcl_init(&ctxt->rc_reply_pcl);
146 
147 	ctxt->rc_recv_wr.next = NULL;
148 	ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
149 	ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
150 	ctxt->rc_recv_wr.num_sge = 1;
151 	ctxt->rc_cqe.done = svc_rdma_wc_receive;
152 	ctxt->rc_recv_sge.addr = addr;
153 	ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
154 	ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
155 	ctxt->rc_recv_buf = buffer;
156 	svc_rdma_cc_init(rdma, &ctxt->rc_cc);
157 	return ctxt;
158 
159 fail2:
160 	kfree(buffer);
161 fail1:
162 	kfree(ctxt);
163 fail0:
164 	return NULL;
165 }
166 
167 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
168 				       struct svc_rdma_recv_ctxt *ctxt)
169 {
170 	ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
171 			    ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
172 	kfree(ctxt->rc_recv_buf);
173 	kfree(ctxt);
174 }
175 
176 /**
177  * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
178  * @rdma: svcxprt_rdma being torn down
179  *
180  */
181 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
182 {
183 	struct svc_rdma_recv_ctxt *ctxt;
184 	struct llist_node *node;
185 
186 	while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
187 		ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
188 		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
189 	}
190 }
191 
192 /**
193  * svc_rdma_recv_ctxt_get - Allocate a recv_ctxt
194  * @rdma: controlling svcxprt_rdma
195  *
196  * Returns a recv_ctxt or (rarely) NULL if none are available.
197  */
198 struct svc_rdma_recv_ctxt *svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
199 {
200 	struct svc_rdma_recv_ctxt *ctxt;
201 	struct llist_node *node;
202 
203 	node = llist_del_first(&rdma->sc_recv_ctxts);
204 	if (!node)
205 		return NULL;
206 
207 	ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
208 	ctxt->rc_page_count = 0;
209 	return ctxt;
210 }
211 
212 /**
213  * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
214  * @rdma: controlling svcxprt_rdma
215  * @ctxt: object to return to the free list
216  *
217  */
218 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
219 			    struct svc_rdma_recv_ctxt *ctxt)
220 {
221 	svc_rdma_cc_release(rdma, &ctxt->rc_cc, DMA_FROM_DEVICE);
222 
223 	/* @rc_page_count is normally zero here, but error flows
224 	 * can leave pages in @rc_pages.
225 	 */
226 	release_pages(ctxt->rc_pages, ctxt->rc_page_count);
227 
228 	pcl_free(&ctxt->rc_call_pcl);
229 	pcl_free(&ctxt->rc_read_pcl);
230 	pcl_free(&ctxt->rc_write_pcl);
231 	pcl_free(&ctxt->rc_reply_pcl);
232 
233 	llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
234 }
235 
236 /**
237  * svc_rdma_release_ctxt - Release transport-specific per-rqst resources
238  * @xprt: the transport which owned the context
239  * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
240  *
241  * Ensure that the recv_ctxt is released whether or not a Reply
242  * was sent. For example, the client could close the connection,
243  * or svc_process could drop an RPC, before the Reply is sent.
244  */
245 void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
246 {
247 	struct svc_rdma_recv_ctxt *ctxt = vctxt;
248 	struct svcxprt_rdma *rdma =
249 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
250 
251 	if (ctxt)
252 		svc_rdma_recv_ctxt_put(rdma, ctxt);
253 }
254 
255 static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
256 				   unsigned int wanted)
257 {
258 	const struct ib_recv_wr *bad_wr = NULL;
259 	struct svc_rdma_recv_ctxt *ctxt;
260 	struct ib_recv_wr *recv_chain;
261 	int ret;
262 
263 	if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
264 		return false;
265 
266 	recv_chain = NULL;
267 	while (wanted--) {
268 		ctxt = svc_rdma_recv_ctxt_get(rdma);
269 		if (!ctxt)
270 			break;
271 
272 		trace_svcrdma_post_recv(&ctxt->rc_cid);
273 		ctxt->rc_recv_wr.next = recv_chain;
274 		recv_chain = &ctxt->rc_recv_wr;
275 		rdma->sc_pending_recvs++;
276 	}
277 	if (!recv_chain)
278 		return true;
279 
280 	ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
281 	if (ret)
282 		goto err_free;
283 	return true;
284 
285 err_free:
286 	trace_svcrdma_rq_post_err(rdma, ret);
287 	while (bad_wr) {
288 		ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt,
289 				    rc_recv_wr);
290 		bad_wr = bad_wr->next;
291 		svc_rdma_recv_ctxt_put(rdma, ctxt);
292 	}
293 	/* Since we're destroying the xprt, no need to reset
294 	 * sc_pending_recvs. */
295 	return false;
296 }
297 
298 /**
299  * svc_rdma_post_recvs - Post initial set of Recv WRs
300  * @rdma: fresh svcxprt_rdma
301  *
302  * Return values:
303  *   %true: Receive Queue initialization successful
304  *   %false: memory allocation or DMA error
305  */
306 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
307 {
308 	unsigned int total;
309 
310 	/* For each credit, allocate enough recv_ctxts for one
311 	 * posted Receive and one RPC in process.
312 	 */
313 	total = (rdma->sc_max_requests * 2) + rdma->sc_recv_batch;
314 	while (total--) {
315 		struct svc_rdma_recv_ctxt *ctxt;
316 
317 		ctxt = svc_rdma_recv_ctxt_alloc(rdma);
318 		if (!ctxt)
319 			return false;
320 		llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
321 	}
322 
323 	return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests);
324 }
325 
326 /**
327  * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
328  * @cq: Completion Queue context
329  * @wc: Work Completion object
330  *
331  */
332 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
333 {
334 	struct svcxprt_rdma *rdma = cq->cq_context;
335 	struct ib_cqe *cqe = wc->wr_cqe;
336 	struct svc_rdma_recv_ctxt *ctxt;
337 
338 	rdma->sc_pending_recvs--;
339 
340 	/* WARNING: Only wc->wr_cqe and wc->status are reliable */
341 	ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
342 
343 	if (wc->status != IB_WC_SUCCESS)
344 		goto flushed;
345 	trace_svcrdma_wc_recv(wc, &ctxt->rc_cid);
346 
347 	/* If receive posting fails, the connection is about to be
348 	 * lost anyway. The server will not be able to send a reply
349 	 * for this RPC, and the client will retransmit this RPC
350 	 * anyway when it reconnects.
351 	 *
352 	 * Therefore we drop the Receive, even if status was SUCCESS
353 	 * to reduce the likelihood of replayed requests once the
354 	 * client reconnects.
355 	 */
356 	if (rdma->sc_pending_recvs < rdma->sc_max_requests)
357 		if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch))
358 			goto dropped;
359 
360 	/* All wc fields are now known to be valid */
361 	ctxt->rc_byte_len = wc->byte_len;
362 
363 	spin_lock(&rdma->sc_rq_dto_lock);
364 	list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
365 	/* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
366 	set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
367 	spin_unlock(&rdma->sc_rq_dto_lock);
368 	if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
369 		svc_xprt_enqueue(&rdma->sc_xprt);
370 	return;
371 
372 flushed:
373 	if (wc->status == IB_WC_WR_FLUSH_ERR)
374 		trace_svcrdma_wc_recv_flush(wc, &ctxt->rc_cid);
375 	else
376 		trace_svcrdma_wc_recv_err(wc, &ctxt->rc_cid);
377 dropped:
378 	svc_rdma_recv_ctxt_put(rdma, ctxt);
379 	svc_xprt_deferred_close(&rdma->sc_xprt);
380 }
381 
382 /**
383  * svc_rdma_flush_recv_queues - Drain pending Receive work
384  * @rdma: svcxprt_rdma being shut down
385  *
386  */
387 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
388 {
389 	struct svc_rdma_recv_ctxt *ctxt;
390 
391 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
392 		list_del(&ctxt->rc_list);
393 		svc_rdma_recv_ctxt_put(rdma, ctxt);
394 	}
395 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
396 		list_del(&ctxt->rc_list);
397 		svc_rdma_recv_ctxt_put(rdma, ctxt);
398 	}
399 }
400 
401 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
402 				   struct svc_rdma_recv_ctxt *ctxt)
403 {
404 	struct xdr_buf *arg = &rqstp->rq_arg;
405 
406 	arg->head[0].iov_base = ctxt->rc_recv_buf;
407 	arg->head[0].iov_len = ctxt->rc_byte_len;
408 	arg->tail[0].iov_base = NULL;
409 	arg->tail[0].iov_len = 0;
410 	arg->page_len = 0;
411 	arg->page_base = 0;
412 	arg->buflen = ctxt->rc_byte_len;
413 	arg->len = ctxt->rc_byte_len;
414 }
415 
416 /**
417  * xdr_count_read_segments - Count number of Read segments in Read list
418  * @rctxt: Ingress receive context
419  * @p: Start of an un-decoded Read list
420  *
421  * Before allocating anything, ensure the ingress Read list is safe
422  * to use.
423  *
424  * The segment count is limited to how many segments can fit in the
425  * transport header without overflowing the buffer. That's about 40
426  * Read segments for a 1KB inline threshold.
427  *
428  * Return values:
429  *   %true: Read list is valid. @rctxt's xdr_stream is updated to point
430  *	    to the first byte past the Read list. rc_read_pcl and
431  *	    rc_call_pcl cl_count fields are set to the number of
432  *	    Read segments in the list.
433  *  %false: Read list is corrupt. @rctxt's xdr_stream is left in an
434  *	    unknown state.
435  */
436 static bool xdr_count_read_segments(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
437 {
438 	rctxt->rc_call_pcl.cl_count = 0;
439 	rctxt->rc_read_pcl.cl_count = 0;
440 	while (xdr_item_is_present(p)) {
441 		u32 position, handle, length;
442 		u64 offset;
443 
444 		p = xdr_inline_decode(&rctxt->rc_stream,
445 				      rpcrdma_readseg_maxsz * sizeof(*p));
446 		if (!p)
447 			return false;
448 
449 		xdr_decode_read_segment(p, &position, &handle,
450 					    &length, &offset);
451 		if (position) {
452 			if (position & 3)
453 				return false;
454 			++rctxt->rc_read_pcl.cl_count;
455 		} else {
456 			++rctxt->rc_call_pcl.cl_count;
457 		}
458 
459 		p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
460 		if (!p)
461 			return false;
462 	}
463 	return true;
464 }
465 
466 /* Sanity check the Read list.
467  *
468  * Sanity checks:
469  * - Read list does not overflow Receive buffer.
470  * - Chunk size limited by largest NFS data payload.
471  *
472  * Return values:
473  *   %true: Read list is valid. @rctxt's xdr_stream is updated
474  *	    to point to the first byte past the Read list.
475  *  %false: Read list is corrupt. @rctxt's xdr_stream is left
476  *	    in an unknown state.
477  */
478 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt)
479 {
480 	__be32 *p;
481 
482 	p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
483 	if (!p)
484 		return false;
485 	if (!xdr_count_read_segments(rctxt, p))
486 		return false;
487 	if (!pcl_alloc_call(rctxt, p))
488 		return false;
489 	return pcl_alloc_read(rctxt, p);
490 }
491 
492 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt)
493 {
494 	u32 segcount;
495 	__be32 *p;
496 
497 	if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount))
498 		return false;
499 
500 	/* Before trusting the segcount value enough to use it in
501 	 * a computation, perform a simple range check. This is an
502 	 * arbitrary but sensible limit (ie, not architectural).
503 	 */
504 	if (unlikely(segcount > rctxt->rc_maxpages))
505 		return false;
506 
507 	p = xdr_inline_decode(&rctxt->rc_stream,
508 			      segcount * rpcrdma_segment_maxsz * sizeof(*p));
509 	return p != NULL;
510 }
511 
512 /**
513  * xdr_count_write_chunks - Count number of Write chunks in Write list
514  * @rctxt: Received header and decoding state
515  * @p: start of an un-decoded Write list
516  *
517  * Before allocating anything, ensure the ingress Write list is
518  * safe to use.
519  *
520  * Return values:
521  *       %true: Write list is valid. @rctxt's xdr_stream is updated
522  *		to point to the first byte past the Write list, and
523  *		the number of Write chunks is in rc_write_pcl.cl_count.
524  *      %false: Write list is corrupt. @rctxt's xdr_stream is left
525  *		in an indeterminate state.
526  */
527 static bool xdr_count_write_chunks(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
528 {
529 	rctxt->rc_write_pcl.cl_count = 0;
530 	while (xdr_item_is_present(p)) {
531 		if (!xdr_check_write_chunk(rctxt))
532 			return false;
533 		++rctxt->rc_write_pcl.cl_count;
534 		p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
535 		if (!p)
536 			return false;
537 	}
538 	return true;
539 }
540 
541 /* Sanity check the Write list.
542  *
543  * Implementation limits:
544  * - This implementation currently supports only one Write chunk.
545  *
546  * Sanity checks:
547  * - Write list does not overflow Receive buffer.
548  * - Chunk size limited by largest NFS data payload.
549  *
550  * Return values:
551  *       %true: Write list is valid. @rctxt's xdr_stream is updated
552  *		to point to the first byte past the Write list.
553  *      %false: Write list is corrupt. @rctxt's xdr_stream is left
554  *		in an unknown state.
555  */
556 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
557 {
558 	__be32 *p;
559 
560 	p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
561 	if (!p)
562 		return false;
563 	if (!xdr_count_write_chunks(rctxt, p))
564 		return false;
565 	if (!pcl_alloc_write(rctxt, &rctxt->rc_write_pcl, p))
566 		return false;
567 
568 	rctxt->rc_cur_result_payload = pcl_first_chunk(&rctxt->rc_write_pcl);
569 	return true;
570 }
571 
572 /* Sanity check the Reply chunk.
573  *
574  * Sanity checks:
575  * - Reply chunk does not overflow Receive buffer.
576  * - Chunk size limited by largest NFS data payload.
577  *
578  * Return values:
579  *       %true: Reply chunk is valid. @rctxt's xdr_stream is updated
580  *		to point to the first byte past the Reply chunk.
581  *      %false: Reply chunk is corrupt. @rctxt's xdr_stream is left
582  *		in an unknown state.
583  */
584 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt)
585 {
586 	__be32 *p;
587 
588 	p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
589 	if (!p)
590 		return false;
591 
592 	if (!xdr_item_is_present(p))
593 		return true;
594 	if (!xdr_check_write_chunk(rctxt))
595 		return false;
596 
597 	rctxt->rc_reply_pcl.cl_count = 1;
598 	return pcl_alloc_write(rctxt, &rctxt->rc_reply_pcl, p);
599 }
600 
601 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
602  * Responder's choice: requester signals it can handle Send With
603  * Invalidate, and responder chooses one R_key to invalidate.
604  *
605  * If there is exactly one distinct R_key in the received transport
606  * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
607  */
608 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
609 				  struct svc_rdma_recv_ctxt *ctxt)
610 {
611 	struct svc_rdma_segment *segment;
612 	struct svc_rdma_chunk *chunk;
613 	u32 inv_rkey;
614 
615 	ctxt->rc_inv_rkey = 0;
616 
617 	if (!rdma->sc_snd_w_inv)
618 		return;
619 
620 	inv_rkey = 0;
621 	pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) {
622 		pcl_for_each_segment(segment, chunk) {
623 			if (inv_rkey == 0)
624 				inv_rkey = segment->rs_handle;
625 			else if (inv_rkey != segment->rs_handle)
626 				return;
627 		}
628 	}
629 	pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) {
630 		pcl_for_each_segment(segment, chunk) {
631 			if (inv_rkey == 0)
632 				inv_rkey = segment->rs_handle;
633 			else if (inv_rkey != segment->rs_handle)
634 				return;
635 		}
636 	}
637 	pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) {
638 		pcl_for_each_segment(segment, chunk) {
639 			if (inv_rkey == 0)
640 				inv_rkey = segment->rs_handle;
641 			else if (inv_rkey != segment->rs_handle)
642 				return;
643 		}
644 	}
645 	pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) {
646 		pcl_for_each_segment(segment, chunk) {
647 			if (inv_rkey == 0)
648 				inv_rkey = segment->rs_handle;
649 			else if (inv_rkey != segment->rs_handle)
650 				return;
651 		}
652 	}
653 	ctxt->rc_inv_rkey = inv_rkey;
654 }
655 
656 /**
657  * svc_rdma_xdr_decode_req - Decode the transport header
658  * @rq_arg: xdr_buf containing ingress RPC/RDMA message
659  * @rctxt: state of decoding
660  *
661  * On entry, xdr->head[0].iov_base points to first byte of the
662  * RPC-over-RDMA transport header.
663  *
664  * On successful exit, head[0] points to first byte past the
665  * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
666  *
667  * The length of the RPC-over-RDMA header is returned.
668  *
669  * Assumptions:
670  * - The transport header is entirely contained in the head iovec.
671  */
672 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
673 				   struct svc_rdma_recv_ctxt *rctxt)
674 {
675 	__be32 *p, *rdma_argp;
676 	unsigned int hdr_len;
677 
678 	rdma_argp = rq_arg->head[0].iov_base;
679 	xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL);
680 
681 	p = xdr_inline_decode(&rctxt->rc_stream,
682 			      rpcrdma_fixed_maxsz * sizeof(*p));
683 	if (unlikely(!p))
684 		goto out_short;
685 	p++;
686 	if (*p != rpcrdma_version)
687 		goto out_version;
688 	p += 2;
689 	rctxt->rc_msgtype = *p;
690 	switch (rctxt->rc_msgtype) {
691 	case rdma_msg:
692 		break;
693 	case rdma_nomsg:
694 		break;
695 	case rdma_done:
696 		goto out_drop;
697 	case rdma_error:
698 		goto out_drop;
699 	default:
700 		goto out_proc;
701 	}
702 
703 	if (!xdr_check_read_list(rctxt))
704 		goto out_inval;
705 	if (!xdr_check_write_list(rctxt))
706 		goto out_inval;
707 	if (!xdr_check_reply_chunk(rctxt))
708 		goto out_inval;
709 
710 	rq_arg->head[0].iov_base = rctxt->rc_stream.p;
711 	hdr_len = xdr_stream_pos(&rctxt->rc_stream);
712 	rq_arg->head[0].iov_len -= hdr_len;
713 	rq_arg->len -= hdr_len;
714 	trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len);
715 	return hdr_len;
716 
717 out_short:
718 	trace_svcrdma_decode_short_err(rctxt, rq_arg->len);
719 	return -EINVAL;
720 
721 out_version:
722 	trace_svcrdma_decode_badvers_err(rctxt, rdma_argp);
723 	return -EPROTONOSUPPORT;
724 
725 out_drop:
726 	trace_svcrdma_decode_drop_err(rctxt, rdma_argp);
727 	return 0;
728 
729 out_proc:
730 	trace_svcrdma_decode_badproc_err(rctxt, rdma_argp);
731 	return -EINVAL;
732 
733 out_inval:
734 	trace_svcrdma_decode_parse_err(rctxt, rdma_argp);
735 	return -EINVAL;
736 }
737 
738 static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
739 				struct svc_rdma_recv_ctxt *rctxt,
740 				int status)
741 {
742 	struct svc_rdma_send_ctxt *sctxt;
743 
744 	sctxt = svc_rdma_send_ctxt_get(rdma);
745 	if (!sctxt)
746 		return;
747 	svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);
748 }
749 
750 /* By convention, backchannel calls arrive via rdma_msg type
751  * messages, and never populate the chunk lists. This makes
752  * the RPC/RDMA header small and fixed in size, so it is
753  * straightforward to check the RPC header's direction field.
754  */
755 static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt,
756 						struct svc_rdma_recv_ctxt *rctxt)
757 {
758 	__be32 *p = rctxt->rc_recv_buf;
759 
760 	if (!xprt->xpt_bc_xprt)
761 		return false;
762 
763 	if (rctxt->rc_msgtype != rdma_msg)
764 		return false;
765 
766 	if (!pcl_is_empty(&rctxt->rc_call_pcl))
767 		return false;
768 	if (!pcl_is_empty(&rctxt->rc_read_pcl))
769 		return false;
770 	if (!pcl_is_empty(&rctxt->rc_write_pcl))
771 		return false;
772 	if (!pcl_is_empty(&rctxt->rc_reply_pcl))
773 		return false;
774 
775 	/* RPC call direction */
776 	if (*(p + 8) == cpu_to_be32(RPC_CALL))
777 		return false;
778 
779 	return true;
780 }
781 
782 /* Finish constructing the RPC Call message in rqstp::rq_arg.
783  *
784  * The incoming RPC/RDMA message is an RDMA_MSG type message
785  * with a single Read chunk (only the upper layer data payload
786  * was conveyed via RDMA Read).
787  */
788 static void svc_rdma_read_complete_one(struct svc_rqst *rqstp,
789 				       struct svc_rdma_recv_ctxt *ctxt)
790 {
791 	struct svc_rdma_chunk *chunk = pcl_first_chunk(&ctxt->rc_read_pcl);
792 	struct xdr_buf *buf = &rqstp->rq_arg;
793 	unsigned int length;
794 
795 	/* Split the Receive buffer between the head and tail
796 	 * buffers at Read chunk's position. XDR roundup of the
797 	 * chunk is not included in either the pagelist or in
798 	 * the tail.
799 	 */
800 	buf->tail[0].iov_base = buf->head[0].iov_base + chunk->ch_position;
801 	buf->tail[0].iov_len = buf->head[0].iov_len - chunk->ch_position;
802 	buf->head[0].iov_len = chunk->ch_position;
803 
804 	/* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
805 	 *
806 	 * If the client already rounded up the chunk length, the
807 	 * length does not change. Otherwise, the length of the page
808 	 * list is increased to include XDR round-up.
809 	 *
810 	 * Currently these chunks always start at page offset 0,
811 	 * thus the rounded-up length never crosses a page boundary.
812 	 */
813 	buf->pages = &rqstp->rq_pages[0];
814 	length = xdr_align_size(chunk->ch_length);
815 	buf->page_len = length;
816 	buf->len += length;
817 	buf->buflen += length;
818 }
819 
820 /* Finish constructing the RPC Call message in rqstp::rq_arg.
821  *
822  * The incoming RPC/RDMA message is an RDMA_MSG type message
823  * with payload in multiple Read chunks and no PZRC.
824  */
825 static void svc_rdma_read_complete_multiple(struct svc_rqst *rqstp,
826 					    struct svc_rdma_recv_ctxt *ctxt)
827 {
828 	struct xdr_buf *buf = &rqstp->rq_arg;
829 
830 	buf->len += ctxt->rc_readbytes;
831 	buf->buflen += ctxt->rc_readbytes;
832 
833 	buf->head[0].iov_base = page_address(rqstp->rq_pages[0]);
834 	buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes);
835 	buf->pages = &rqstp->rq_pages[1];
836 	buf->page_len = ctxt->rc_readbytes - buf->head[0].iov_len;
837 }
838 
839 /* Finish constructing the RPC Call message in rqstp::rq_arg.
840  *
841  * The incoming RPC/RDMA message is an RDMA_NOMSG type message
842  * (the RPC message body was conveyed via RDMA Read).
843  */
844 static void svc_rdma_read_complete_pzrc(struct svc_rqst *rqstp,
845 					struct svc_rdma_recv_ctxt *ctxt)
846 {
847 	struct xdr_buf *buf = &rqstp->rq_arg;
848 
849 	buf->len += ctxt->rc_readbytes;
850 	buf->buflen += ctxt->rc_readbytes;
851 
852 	buf->head[0].iov_base = page_address(rqstp->rq_pages[0]);
853 	buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes);
854 	buf->pages = &rqstp->rq_pages[1];
855 	buf->page_len = ctxt->rc_readbytes - buf->head[0].iov_len;
856 }
857 
858 static noinline void svc_rdma_read_complete(struct svc_rqst *rqstp,
859 					    struct svc_rdma_recv_ctxt *ctxt)
860 {
861 	unsigned int i;
862 
863 	/* Transfer the Read chunk pages into @rqstp.rq_pages, replacing
864 	 * the rq_pages that were already allocated for this rqstp.
865 	 */
866 	release_pages(rqstp->rq_respages, ctxt->rc_page_count);
867 	for (i = 0; i < ctxt->rc_page_count; i++)
868 		rqstp->rq_pages[i] = ctxt->rc_pages[i];
869 
870 	/* Update @rqstp's result send buffer to start after the
871 	 * last page in the RDMA Read payload.
872 	 */
873 	rqstp->rq_respages = &rqstp->rq_pages[ctxt->rc_page_count];
874 	rqstp->rq_next_page = rqstp->rq_respages + 1;
875 
876 	/* Prevent svc_rdma_recv_ctxt_put() from releasing the
877 	 * pages in ctxt::rc_pages a second time.
878 	 */
879 	ctxt->rc_page_count = 0;
880 
881 	/* Finish constructing the RPC Call message. The exact
882 	 * procedure for that depends on what kind of RPC/RDMA
883 	 * chunks were provided by the client.
884 	 */
885 	rqstp->rq_arg = ctxt->rc_saved_arg;
886 	if (pcl_is_empty(&ctxt->rc_call_pcl)) {
887 		if (ctxt->rc_read_pcl.cl_count == 1)
888 			svc_rdma_read_complete_one(rqstp, ctxt);
889 		else
890 			svc_rdma_read_complete_multiple(rqstp, ctxt);
891 	} else {
892 		svc_rdma_read_complete_pzrc(rqstp, ctxt);
893 	}
894 
895 	trace_svcrdma_read_finished(&ctxt->rc_cid);
896 }
897 
898 /**
899  * svc_rdma_recvfrom - Receive an RPC call
900  * @rqstp: request structure into which to receive an RPC Call
901  *
902  * Returns:
903  *	The positive number of bytes in the RPC Call message,
904  *	%0 if there were no Calls ready to return,
905  *	%-EINVAL if the Read chunk data is too large,
906  *	%-ENOMEM if rdma_rw context pool was exhausted,
907  *	%-ENOTCONN if posting failed (connection is lost),
908  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
909  *
910  * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
911  * when there are no remaining ctxt's to process.
912  *
913  * The next ctxt is removed from the "receive" lists.
914  *
915  * - If the ctxt completes a Receive, then construct the Call
916  *   message from the contents of the Receive buffer.
917  *
918  *   - If there are no Read chunks in this message, then finish
919  *     assembling the Call message and return the number of bytes
920  *     in the message.
921  *
922  *   - If there are Read chunks in this message, post Read WRs to
923  *     pull that payload. When the Read WRs complete, build the
924  *     full message and return the number of bytes in it.
925  */
926 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
927 {
928 	struct svc_xprt *xprt = rqstp->rq_xprt;
929 	struct svcxprt_rdma *rdma_xprt =
930 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
931 	struct svc_rdma_recv_ctxt *ctxt;
932 	int ret;
933 
934 	/* Prevent svc_xprt_release() from releasing pages in rq_pages
935 	 * when returning 0 or an error.
936 	 */
937 	rqstp->rq_respages = rqstp->rq_pages;
938 	rqstp->rq_next_page = rqstp->rq_respages;
939 
940 	rqstp->rq_xprt_ctxt = NULL;
941 
942 	spin_lock(&rdma_xprt->sc_rq_dto_lock);
943 	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
944 	if (ctxt) {
945 		list_del(&ctxt->rc_list);
946 		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
947 		svc_xprt_received(xprt);
948 		svc_rdma_read_complete(rqstp, ctxt);
949 		goto complete;
950 	}
951 	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
952 	if (ctxt)
953 		list_del(&ctxt->rc_list);
954 	else
955 		/* No new incoming requests, terminate the loop */
956 		clear_bit(XPT_DATA, &xprt->xpt_flags);
957 	spin_unlock(&rdma_xprt->sc_rq_dto_lock);
958 
959 	/* Unblock the transport for the next receive */
960 	svc_xprt_received(xprt);
961 	if (!ctxt)
962 		return 0;
963 
964 	percpu_counter_inc(&svcrdma_stat_recv);
965 	ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,
966 				   ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
967 				   DMA_FROM_DEVICE);
968 	svc_rdma_build_arg_xdr(rqstp, ctxt);
969 
970 	ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
971 	if (ret < 0)
972 		goto out_err;
973 	if (ret == 0)
974 		goto out_drop;
975 
976 	if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
977 		goto out_backchannel;
978 
979 	svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
980 
981 	if (!pcl_is_empty(&ctxt->rc_read_pcl) ||
982 	    !pcl_is_empty(&ctxt->rc_call_pcl))
983 		goto out_readlist;
984 
985 complete:
986 	rqstp->rq_xprt_ctxt = ctxt;
987 	rqstp->rq_prot = IPPROTO_MAX;
988 	svc_xprt_copy_addrs(rqstp, xprt);
989 	set_bit(RQ_SECURE, &rqstp->rq_flags);
990 	return rqstp->rq_arg.len;
991 
992 out_err:
993 	svc_rdma_send_error(rdma_xprt, ctxt, ret);
994 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
995 	return 0;
996 
997 out_readlist:
998 	/* This @rqstp is about to be recycled. Save the work
999 	 * already done constructing the Call message in rq_arg
1000 	 * so it can be restored when the RDMA Reads have
1001 	 * completed.
1002 	 */
1003 	ctxt->rc_saved_arg = rqstp->rq_arg;
1004 
1005 	ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt);
1006 	if (ret < 0) {
1007 		if (ret == -EINVAL)
1008 			svc_rdma_send_error(rdma_xprt, ctxt, ret);
1009 		svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
1010 		svc_xprt_deferred_close(xprt);
1011 		return ret;
1012 	}
1013 	return 0;
1014 
1015 out_backchannel:
1016 	svc_rdma_handle_bc_reply(rqstp, ctxt);
1017 out_drop:
1018 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
1019 	return 0;
1020 }
1021