1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/net/sunrpc/xdr.c
4 *
5 * Generic XDR support.
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/pagemap.h>
16 #include <linux/errno.h>
17 #include <linux/sunrpc/xdr.h>
18 #include <linux/sunrpc/msg_prot.h>
19 #include <linux/bvec.h>
20 #include <trace/events/sunrpc.h>
21
22 static void _copy_to_pages(struct page **, size_t, const char *, size_t);
23
24
25 /*
26 * XDR functions for basic NFS types
27 */
28 __be32 *
xdr_encode_netobj(__be32 * p,const struct xdr_netobj * obj)29 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
30 {
31 unsigned int quadlen = XDR_QUADLEN(obj->len);
32
33 p[quadlen] = 0; /* zero trailing bytes */
34 *p++ = cpu_to_be32(obj->len);
35 memcpy(p, obj->data, obj->len);
36 return p + XDR_QUADLEN(obj->len);
37 }
38 EXPORT_SYMBOL_GPL(xdr_encode_netobj);
39
40 /**
41 * xdr_encode_opaque_fixed - Encode fixed length opaque data
42 * @p: pointer to current position in XDR buffer.
43 * @ptr: pointer to data to encode (or NULL)
44 * @nbytes: size of data.
45 *
46 * Copy the array of data of length nbytes at ptr to the XDR buffer
47 * at position p, then align to the next 32-bit boundary by padding
48 * with zero bytes (see RFC1832).
49 * Note: if ptr is NULL, only the padding is performed.
50 *
51 * Returns the updated current XDR buffer position
52 *
53 */
xdr_encode_opaque_fixed(__be32 * p,const void * ptr,unsigned int nbytes)54 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
55 {
56 if (likely(nbytes != 0)) {
57 unsigned int quadlen = XDR_QUADLEN(nbytes);
58 unsigned int padding = (quadlen << 2) - nbytes;
59
60 if (ptr != NULL)
61 memcpy(p, ptr, nbytes);
62 if (padding != 0)
63 memset((char *)p + nbytes, 0, padding);
64 p += quadlen;
65 }
66 return p;
67 }
68 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
69
70 /**
71 * xdr_encode_opaque - Encode variable length opaque data
72 * @p: pointer to current position in XDR buffer.
73 * @ptr: pointer to data to encode (or NULL)
74 * @nbytes: size of data.
75 *
76 * Returns the updated current XDR buffer position
77 */
xdr_encode_opaque(__be32 * p,const void * ptr,unsigned int nbytes)78 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
79 {
80 *p++ = cpu_to_be32(nbytes);
81 return xdr_encode_opaque_fixed(p, ptr, nbytes);
82 }
83 EXPORT_SYMBOL_GPL(xdr_encode_opaque);
84
85 __be32 *
xdr_encode_string(__be32 * p,const char * string)86 xdr_encode_string(__be32 *p, const char *string)
87 {
88 return xdr_encode_array(p, string, strlen(string));
89 }
90 EXPORT_SYMBOL_GPL(xdr_encode_string);
91
92 /**
93 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
94 * @buf: XDR buffer where string resides
95 * @len: length of string, in bytes
96 *
97 */
xdr_terminate_string(const struct xdr_buf * buf,const u32 len)98 void xdr_terminate_string(const struct xdr_buf *buf, const u32 len)
99 {
100 char *kaddr;
101
102 kaddr = kmap_atomic(buf->pages[0]);
103 kaddr[buf->page_base + len] = '\0';
104 kunmap_atomic(kaddr);
105 }
106 EXPORT_SYMBOL_GPL(xdr_terminate_string);
107
xdr_buf_pagecount(const struct xdr_buf * buf)108 size_t xdr_buf_pagecount(const struct xdr_buf *buf)
109 {
110 if (!buf->page_len)
111 return 0;
112 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
113 }
114
115 int
xdr_alloc_bvec(struct xdr_buf * buf,gfp_t gfp)116 xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
117 {
118 size_t i, n = xdr_buf_pagecount(buf);
119
120 if (n != 0 && buf->bvec == NULL) {
121 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
122 if (!buf->bvec)
123 return -ENOMEM;
124 for (i = 0; i < n; i++) {
125 bvec_set_page(&buf->bvec[i], buf->pages[i], PAGE_SIZE,
126 0);
127 }
128 }
129 return 0;
130 }
131
132 void
xdr_free_bvec(struct xdr_buf * buf)133 xdr_free_bvec(struct xdr_buf *buf)
134 {
135 kfree(buf->bvec);
136 buf->bvec = NULL;
137 }
138
139 /**
140 * xdr_buf_to_bvec - Copy components of an xdr_buf into a bio_vec array
141 * @bvec: bio_vec array to populate
142 * @bvec_size: element count of @bio_vec
143 * @xdr: xdr_buf to be copied
144 *
145 * Returns the number of entries consumed in @bvec.
146 */
xdr_buf_to_bvec(struct bio_vec * bvec,unsigned int bvec_size,const struct xdr_buf * xdr)147 unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size,
148 const struct xdr_buf *xdr)
149 {
150 const struct kvec *head = xdr->head;
151 const struct kvec *tail = xdr->tail;
152 unsigned int count = 0;
153
154 if (head->iov_len) {
155 bvec_set_virt(bvec++, head->iov_base, head->iov_len);
156 ++count;
157 }
158
159 if (xdr->page_len) {
160 unsigned int offset, len, remaining;
161 struct page **pages = xdr->pages;
162
163 offset = offset_in_page(xdr->page_base);
164 remaining = xdr->page_len;
165 while (remaining > 0) {
166 len = min_t(unsigned int, remaining,
167 PAGE_SIZE - offset);
168 bvec_set_page(bvec++, *pages++, len, offset);
169 remaining -= len;
170 offset = 0;
171 if (unlikely(++count > bvec_size))
172 goto bvec_overflow;
173 }
174 }
175
176 if (tail->iov_len) {
177 bvec_set_virt(bvec, tail->iov_base, tail->iov_len);
178 if (unlikely(++count > bvec_size))
179 goto bvec_overflow;
180 }
181
182 return count;
183
184 bvec_overflow:
185 pr_warn_once("%s: bio_vec array overflow\n", __func__);
186 return count - 1;
187 }
188 EXPORT_SYMBOL_GPL(xdr_buf_to_bvec);
189
190 /**
191 * xdr_inline_pages - Prepare receive buffer for a large reply
192 * @xdr: xdr_buf into which reply will be placed
193 * @offset: expected offset where data payload will start, in bytes
194 * @pages: vector of struct page pointers
195 * @base: offset in first page where receive should start, in bytes
196 * @len: expected size of the upper layer data payload, in bytes
197 *
198 */
199 void
xdr_inline_pages(struct xdr_buf * xdr,unsigned int offset,struct page ** pages,unsigned int base,unsigned int len)200 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
201 struct page **pages, unsigned int base, unsigned int len)
202 {
203 struct kvec *head = xdr->head;
204 struct kvec *tail = xdr->tail;
205 char *buf = (char *)head->iov_base;
206 unsigned int buflen = head->iov_len;
207
208 head->iov_len = offset;
209
210 xdr->pages = pages;
211 xdr->page_base = base;
212 xdr->page_len = len;
213
214 tail->iov_base = buf + offset;
215 tail->iov_len = buflen - offset;
216 xdr->buflen += len;
217 }
218 EXPORT_SYMBOL_GPL(xdr_inline_pages);
219
220 /*
221 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
222 */
223
224 /**
225 * _shift_data_left_pages
226 * @pages: vector of pages containing both the source and dest memory area.
227 * @pgto_base: page vector address of destination
228 * @pgfrom_base: page vector address of source
229 * @len: number of bytes to copy
230 *
231 * Note: the addresses pgto_base and pgfrom_base are both calculated in
232 * the same way:
233 * if a memory area starts at byte 'base' in page 'pages[i]',
234 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
235 * Alse note: pgto_base must be < pgfrom_base, but the memory areas
236 * they point to may overlap.
237 */
238 static void
_shift_data_left_pages(struct page ** pages,size_t pgto_base,size_t pgfrom_base,size_t len)239 _shift_data_left_pages(struct page **pages, size_t pgto_base,
240 size_t pgfrom_base, size_t len)
241 {
242 struct page **pgfrom, **pgto;
243 char *vfrom, *vto;
244 size_t copy;
245
246 BUG_ON(pgfrom_base <= pgto_base);
247
248 if (!len)
249 return;
250
251 pgto = pages + (pgto_base >> PAGE_SHIFT);
252 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
253
254 pgto_base &= ~PAGE_MASK;
255 pgfrom_base &= ~PAGE_MASK;
256
257 do {
258 if (pgto_base >= PAGE_SIZE) {
259 pgto_base = 0;
260 pgto++;
261 }
262 if (pgfrom_base >= PAGE_SIZE){
263 pgfrom_base = 0;
264 pgfrom++;
265 }
266
267 copy = len;
268 if (copy > (PAGE_SIZE - pgto_base))
269 copy = PAGE_SIZE - pgto_base;
270 if (copy > (PAGE_SIZE - pgfrom_base))
271 copy = PAGE_SIZE - pgfrom_base;
272
273 vto = kmap_atomic(*pgto);
274 if (*pgto != *pgfrom) {
275 vfrom = kmap_atomic(*pgfrom);
276 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
277 kunmap_atomic(vfrom);
278 } else
279 memmove(vto + pgto_base, vto + pgfrom_base, copy);
280 flush_dcache_page(*pgto);
281 kunmap_atomic(vto);
282
283 pgto_base += copy;
284 pgfrom_base += copy;
285
286 } while ((len -= copy) != 0);
287 }
288
289 /**
290 * _shift_data_right_pages
291 * @pages: vector of pages containing both the source and dest memory area.
292 * @pgto_base: page vector address of destination
293 * @pgfrom_base: page vector address of source
294 * @len: number of bytes to copy
295 *
296 * Note: the addresses pgto_base and pgfrom_base are both calculated in
297 * the same way:
298 * if a memory area starts at byte 'base' in page 'pages[i]',
299 * then its address is given as (i << PAGE_SHIFT) + base
300 * Also note: pgfrom_base must be < pgto_base, but the memory areas
301 * they point to may overlap.
302 */
303 static void
_shift_data_right_pages(struct page ** pages,size_t pgto_base,size_t pgfrom_base,size_t len)304 _shift_data_right_pages(struct page **pages, size_t pgto_base,
305 size_t pgfrom_base, size_t len)
306 {
307 struct page **pgfrom, **pgto;
308 char *vfrom, *vto;
309 size_t copy;
310
311 BUG_ON(pgto_base <= pgfrom_base);
312
313 if (!len)
314 return;
315
316 pgto_base += len;
317 pgfrom_base += len;
318
319 pgto = pages + (pgto_base >> PAGE_SHIFT);
320 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
321
322 pgto_base &= ~PAGE_MASK;
323 pgfrom_base &= ~PAGE_MASK;
324
325 do {
326 /* Are any pointers crossing a page boundary? */
327 if (pgto_base == 0) {
328 pgto_base = PAGE_SIZE;
329 pgto--;
330 }
331 if (pgfrom_base == 0) {
332 pgfrom_base = PAGE_SIZE;
333 pgfrom--;
334 }
335
336 copy = len;
337 if (copy > pgto_base)
338 copy = pgto_base;
339 if (copy > pgfrom_base)
340 copy = pgfrom_base;
341 pgto_base -= copy;
342 pgfrom_base -= copy;
343
344 vto = kmap_atomic(*pgto);
345 if (*pgto != *pgfrom) {
346 vfrom = kmap_atomic(*pgfrom);
347 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
348 kunmap_atomic(vfrom);
349 } else
350 memmove(vto + pgto_base, vto + pgfrom_base, copy);
351 flush_dcache_page(*pgto);
352 kunmap_atomic(vto);
353
354 } while ((len -= copy) != 0);
355 }
356
357 /**
358 * _copy_to_pages
359 * @pages: array of pages
360 * @pgbase: page vector address of destination
361 * @p: pointer to source data
362 * @len: length
363 *
364 * Copies data from an arbitrary memory location into an array of pages
365 * The copy is assumed to be non-overlapping.
366 */
367 static void
_copy_to_pages(struct page ** pages,size_t pgbase,const char * p,size_t len)368 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
369 {
370 struct page **pgto;
371 char *vto;
372 size_t copy;
373
374 if (!len)
375 return;
376
377 pgto = pages + (pgbase >> PAGE_SHIFT);
378 pgbase &= ~PAGE_MASK;
379
380 for (;;) {
381 copy = PAGE_SIZE - pgbase;
382 if (copy > len)
383 copy = len;
384
385 vto = kmap_atomic(*pgto);
386 memcpy(vto + pgbase, p, copy);
387 kunmap_atomic(vto);
388
389 len -= copy;
390 if (len == 0)
391 break;
392
393 pgbase += copy;
394 if (pgbase == PAGE_SIZE) {
395 flush_dcache_page(*pgto);
396 pgbase = 0;
397 pgto++;
398 }
399 p += copy;
400 }
401 flush_dcache_page(*pgto);
402 }
403
404 /**
405 * _copy_from_pages
406 * @p: pointer to destination
407 * @pages: array of pages
408 * @pgbase: offset of source data
409 * @len: length
410 *
411 * Copies data into an arbitrary memory location from an array of pages
412 * The copy is assumed to be non-overlapping.
413 */
414 void
_copy_from_pages(char * p,struct page ** pages,size_t pgbase,size_t len)415 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
416 {
417 struct page **pgfrom;
418 char *vfrom;
419 size_t copy;
420
421 if (!len)
422 return;
423
424 pgfrom = pages + (pgbase >> PAGE_SHIFT);
425 pgbase &= ~PAGE_MASK;
426
427 do {
428 copy = PAGE_SIZE - pgbase;
429 if (copy > len)
430 copy = len;
431
432 vfrom = kmap_atomic(*pgfrom);
433 memcpy(p, vfrom + pgbase, copy);
434 kunmap_atomic(vfrom);
435
436 pgbase += copy;
437 if (pgbase == PAGE_SIZE) {
438 pgbase = 0;
439 pgfrom++;
440 }
441 p += copy;
442
443 } while ((len -= copy) != 0);
444 }
445 EXPORT_SYMBOL_GPL(_copy_from_pages);
446
xdr_buf_iov_zero(const struct kvec * iov,unsigned int base,unsigned int len)447 static void xdr_buf_iov_zero(const struct kvec *iov, unsigned int base,
448 unsigned int len)
449 {
450 if (base >= iov->iov_len)
451 return;
452 if (len > iov->iov_len - base)
453 len = iov->iov_len - base;
454 memset(iov->iov_base + base, 0, len);
455 }
456
457 /**
458 * xdr_buf_pages_zero
459 * @buf: xdr_buf
460 * @pgbase: beginning offset
461 * @len: length
462 */
xdr_buf_pages_zero(const struct xdr_buf * buf,unsigned int pgbase,unsigned int len)463 static void xdr_buf_pages_zero(const struct xdr_buf *buf, unsigned int pgbase,
464 unsigned int len)
465 {
466 struct page **pages = buf->pages;
467 struct page **page;
468 char *vpage;
469 unsigned int zero;
470
471 if (!len)
472 return;
473 if (pgbase >= buf->page_len) {
474 xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len);
475 return;
476 }
477 if (pgbase + len > buf->page_len) {
478 xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len);
479 len = buf->page_len - pgbase;
480 }
481
482 pgbase += buf->page_base;
483
484 page = pages + (pgbase >> PAGE_SHIFT);
485 pgbase &= ~PAGE_MASK;
486
487 do {
488 zero = PAGE_SIZE - pgbase;
489 if (zero > len)
490 zero = len;
491
492 vpage = kmap_atomic(*page);
493 memset(vpage + pgbase, 0, zero);
494 kunmap_atomic(vpage);
495
496 flush_dcache_page(*page);
497 pgbase = 0;
498 page++;
499
500 } while ((len -= zero) != 0);
501 }
502
xdr_buf_pages_fill_sparse(const struct xdr_buf * buf,unsigned int buflen,gfp_t gfp)503 static unsigned int xdr_buf_pages_fill_sparse(const struct xdr_buf *buf,
504 unsigned int buflen, gfp_t gfp)
505 {
506 unsigned int i, npages, pagelen;
507
508 if (!(buf->flags & XDRBUF_SPARSE_PAGES))
509 return buflen;
510 if (buflen <= buf->head->iov_len)
511 return buflen;
512 pagelen = buflen - buf->head->iov_len;
513 if (pagelen > buf->page_len)
514 pagelen = buf->page_len;
515 npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT;
516 for (i = 0; i < npages; i++) {
517 if (!buf->pages[i])
518 continue;
519 buf->pages[i] = alloc_page(gfp);
520 if (likely(buf->pages[i]))
521 continue;
522 buflen -= pagelen;
523 pagelen = i << PAGE_SHIFT;
524 if (pagelen > buf->page_base)
525 buflen += pagelen - buf->page_base;
526 break;
527 }
528 return buflen;
529 }
530
xdr_buf_try_expand(struct xdr_buf * buf,unsigned int len)531 static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len)
532 {
533 struct kvec *head = buf->head;
534 struct kvec *tail = buf->tail;
535 unsigned int sum = head->iov_len + buf->page_len + tail->iov_len;
536 unsigned int free_space, newlen;
537
538 if (sum > buf->len) {
539 free_space = min_t(unsigned int, sum - buf->len, len);
540 newlen = xdr_buf_pages_fill_sparse(buf, buf->len + free_space,
541 GFP_KERNEL);
542 free_space = newlen - buf->len;
543 buf->len = newlen;
544 len -= free_space;
545 if (!len)
546 return;
547 }
548
549 if (buf->buflen > sum) {
550 /* Expand the tail buffer */
551 free_space = min_t(unsigned int, buf->buflen - sum, len);
552 tail->iov_len += free_space;
553 buf->len += free_space;
554 }
555 }
556
xdr_buf_tail_copy_right(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)557 static void xdr_buf_tail_copy_right(const struct xdr_buf *buf,
558 unsigned int base, unsigned int len,
559 unsigned int shift)
560 {
561 const struct kvec *tail = buf->tail;
562 unsigned int to = base + shift;
563
564 if (to >= tail->iov_len)
565 return;
566 if (len + to > tail->iov_len)
567 len = tail->iov_len - to;
568 memmove(tail->iov_base + to, tail->iov_base + base, len);
569 }
570
xdr_buf_pages_copy_right(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)571 static void xdr_buf_pages_copy_right(const struct xdr_buf *buf,
572 unsigned int base, unsigned int len,
573 unsigned int shift)
574 {
575 const struct kvec *tail = buf->tail;
576 unsigned int to = base + shift;
577 unsigned int pglen = 0;
578 unsigned int talen = 0, tato = 0;
579
580 if (base >= buf->page_len)
581 return;
582 if (len > buf->page_len - base)
583 len = buf->page_len - base;
584 if (to >= buf->page_len) {
585 tato = to - buf->page_len;
586 if (tail->iov_len >= len + tato)
587 talen = len;
588 else if (tail->iov_len > tato)
589 talen = tail->iov_len - tato;
590 } else if (len + to >= buf->page_len) {
591 pglen = buf->page_len - to;
592 talen = len - pglen;
593 if (talen > tail->iov_len)
594 talen = tail->iov_len;
595 } else
596 pglen = len;
597
598 _copy_from_pages(tail->iov_base + tato, buf->pages,
599 buf->page_base + base + pglen, talen);
600 _shift_data_right_pages(buf->pages, buf->page_base + to,
601 buf->page_base + base, pglen);
602 }
603
xdr_buf_head_copy_right(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)604 static void xdr_buf_head_copy_right(const struct xdr_buf *buf,
605 unsigned int base, unsigned int len,
606 unsigned int shift)
607 {
608 const struct kvec *head = buf->head;
609 const struct kvec *tail = buf->tail;
610 unsigned int to = base + shift;
611 unsigned int pglen = 0, pgto = 0;
612 unsigned int talen = 0, tato = 0;
613
614 if (base >= head->iov_len)
615 return;
616 if (len > head->iov_len - base)
617 len = head->iov_len - base;
618 if (to >= buf->page_len + head->iov_len) {
619 tato = to - buf->page_len - head->iov_len;
620 talen = len;
621 } else if (to >= head->iov_len) {
622 pgto = to - head->iov_len;
623 pglen = len;
624 if (pgto + pglen > buf->page_len) {
625 talen = pgto + pglen - buf->page_len;
626 pglen -= talen;
627 }
628 } else {
629 pglen = len - to;
630 if (pglen > buf->page_len) {
631 talen = pglen - buf->page_len;
632 pglen = buf->page_len;
633 }
634 }
635
636 len -= talen;
637 base += len;
638 if (talen + tato > tail->iov_len)
639 talen = tail->iov_len > tato ? tail->iov_len - tato : 0;
640 memcpy(tail->iov_base + tato, head->iov_base + base, talen);
641
642 len -= pglen;
643 base -= pglen;
644 _copy_to_pages(buf->pages, buf->page_base + pgto, head->iov_base + base,
645 pglen);
646
647 base -= len;
648 memmove(head->iov_base + to, head->iov_base + base, len);
649 }
650
xdr_buf_tail_shift_right(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)651 static void xdr_buf_tail_shift_right(const struct xdr_buf *buf,
652 unsigned int base, unsigned int len,
653 unsigned int shift)
654 {
655 const struct kvec *tail = buf->tail;
656
657 if (base >= tail->iov_len || !shift || !len)
658 return;
659 xdr_buf_tail_copy_right(buf, base, len, shift);
660 }
661
xdr_buf_pages_shift_right(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)662 static void xdr_buf_pages_shift_right(const struct xdr_buf *buf,
663 unsigned int base, unsigned int len,
664 unsigned int shift)
665 {
666 if (!shift || !len)
667 return;
668 if (base >= buf->page_len) {
669 xdr_buf_tail_shift_right(buf, base - buf->page_len, len, shift);
670 return;
671 }
672 if (base + len > buf->page_len)
673 xdr_buf_tail_shift_right(buf, 0, base + len - buf->page_len,
674 shift);
675 xdr_buf_pages_copy_right(buf, base, len, shift);
676 }
677
xdr_buf_head_shift_right(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)678 static void xdr_buf_head_shift_right(const struct xdr_buf *buf,
679 unsigned int base, unsigned int len,
680 unsigned int shift)
681 {
682 const struct kvec *head = buf->head;
683
684 if (!shift)
685 return;
686 if (base >= head->iov_len) {
687 xdr_buf_pages_shift_right(buf, head->iov_len - base, len,
688 shift);
689 return;
690 }
691 if (base + len > head->iov_len)
692 xdr_buf_pages_shift_right(buf, 0, base + len - head->iov_len,
693 shift);
694 xdr_buf_head_copy_right(buf, base, len, shift);
695 }
696
xdr_buf_tail_copy_left(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)697 static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base,
698 unsigned int len, unsigned int shift)
699 {
700 const struct kvec *tail = buf->tail;
701
702 if (base >= tail->iov_len)
703 return;
704 if (len > tail->iov_len - base)
705 len = tail->iov_len - base;
706 /* Shift data into head */
707 if (shift > buf->page_len + base) {
708 const struct kvec *head = buf->head;
709 unsigned int hdto =
710 head->iov_len + buf->page_len + base - shift;
711 unsigned int hdlen = len;
712
713 if (WARN_ONCE(shift > head->iov_len + buf->page_len + base,
714 "SUNRPC: Misaligned data.\n"))
715 return;
716 if (hdto + hdlen > head->iov_len)
717 hdlen = head->iov_len - hdto;
718 memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen);
719 base += hdlen;
720 len -= hdlen;
721 if (!len)
722 return;
723 }
724 /* Shift data into pages */
725 if (shift > base) {
726 unsigned int pgto = buf->page_len + base - shift;
727 unsigned int pglen = len;
728
729 if (pgto + pglen > buf->page_len)
730 pglen = buf->page_len - pgto;
731 _copy_to_pages(buf->pages, buf->page_base + pgto,
732 tail->iov_base + base, pglen);
733 base += pglen;
734 len -= pglen;
735 if (!len)
736 return;
737 }
738 memmove(tail->iov_base + base - shift, tail->iov_base + base, len);
739 }
740
xdr_buf_pages_copy_left(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)741 static void xdr_buf_pages_copy_left(const struct xdr_buf *buf,
742 unsigned int base, unsigned int len,
743 unsigned int shift)
744 {
745 unsigned int pgto;
746
747 if (base >= buf->page_len)
748 return;
749 if (len > buf->page_len - base)
750 len = buf->page_len - base;
751 /* Shift data into head */
752 if (shift > base) {
753 const struct kvec *head = buf->head;
754 unsigned int hdto = head->iov_len + base - shift;
755 unsigned int hdlen = len;
756
757 if (WARN_ONCE(shift > head->iov_len + base,
758 "SUNRPC: Misaligned data.\n"))
759 return;
760 if (hdto + hdlen > head->iov_len)
761 hdlen = head->iov_len - hdto;
762 _copy_from_pages(head->iov_base + hdto, buf->pages,
763 buf->page_base + base, hdlen);
764 base += hdlen;
765 len -= hdlen;
766 if (!len)
767 return;
768 }
769 pgto = base - shift;
770 _shift_data_left_pages(buf->pages, buf->page_base + pgto,
771 buf->page_base + base, len);
772 }
773
xdr_buf_tail_shift_left(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)774 static void xdr_buf_tail_shift_left(const struct xdr_buf *buf,
775 unsigned int base, unsigned int len,
776 unsigned int shift)
777 {
778 if (!shift || !len)
779 return;
780 xdr_buf_tail_copy_left(buf, base, len, shift);
781 }
782
xdr_buf_pages_shift_left(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)783 static void xdr_buf_pages_shift_left(const struct xdr_buf *buf,
784 unsigned int base, unsigned int len,
785 unsigned int shift)
786 {
787 if (!shift || !len)
788 return;
789 if (base >= buf->page_len) {
790 xdr_buf_tail_shift_left(buf, base - buf->page_len, len, shift);
791 return;
792 }
793 xdr_buf_pages_copy_left(buf, base, len, shift);
794 len += base;
795 if (len <= buf->page_len)
796 return;
797 xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift);
798 }
799
xdr_buf_head_shift_left(const struct xdr_buf * buf,unsigned int base,unsigned int len,unsigned int shift)800 static void xdr_buf_head_shift_left(const struct xdr_buf *buf,
801 unsigned int base, unsigned int len,
802 unsigned int shift)
803 {
804 const struct kvec *head = buf->head;
805 unsigned int bytes;
806
807 if (!shift || !len)
808 return;
809
810 if (shift > base) {
811 bytes = (shift - base);
812 if (bytes >= len)
813 return;
814 base += bytes;
815 len -= bytes;
816 }
817
818 if (base < head->iov_len) {
819 bytes = min_t(unsigned int, len, head->iov_len - base);
820 memmove(head->iov_base + (base - shift),
821 head->iov_base + base, bytes);
822 base += bytes;
823 len -= bytes;
824 }
825 xdr_buf_pages_shift_left(buf, base - head->iov_len, len, shift);
826 }
827
828 /**
829 * xdr_shrink_bufhead
830 * @buf: xdr_buf
831 * @len: new length of buf->head[0]
832 *
833 * Shrinks XDR buffer's header kvec buf->head[0], setting it to
834 * 'len' bytes. The extra data is not lost, but is instead
835 * moved into the inlined pages and/or the tail.
836 */
xdr_shrink_bufhead(struct xdr_buf * buf,unsigned int len)837 static unsigned int xdr_shrink_bufhead(struct xdr_buf *buf, unsigned int len)
838 {
839 struct kvec *head = buf->head;
840 unsigned int shift, buflen = max(buf->len, len);
841
842 WARN_ON_ONCE(len > head->iov_len);
843 if (head->iov_len > buflen) {
844 buf->buflen -= head->iov_len - buflen;
845 head->iov_len = buflen;
846 }
847 if (len >= head->iov_len)
848 return 0;
849 shift = head->iov_len - len;
850 xdr_buf_try_expand(buf, shift);
851 xdr_buf_head_shift_right(buf, len, buflen - len, shift);
852 head->iov_len = len;
853 buf->buflen -= shift;
854 buf->len -= shift;
855 return shift;
856 }
857
858 /**
859 * xdr_shrink_pagelen - shrinks buf->pages to @len bytes
860 * @buf: xdr_buf
861 * @len: new page buffer length
862 *
863 * The extra data is not lost, but is instead moved into buf->tail.
864 * Returns the actual number of bytes moved.
865 */
xdr_shrink_pagelen(struct xdr_buf * buf,unsigned int len)866 static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len)
867 {
868 unsigned int shift, buflen = buf->len - buf->head->iov_len;
869
870 WARN_ON_ONCE(len > buf->page_len);
871 if (buf->head->iov_len >= buf->len || len > buflen)
872 buflen = len;
873 if (buf->page_len > buflen) {
874 buf->buflen -= buf->page_len - buflen;
875 buf->page_len = buflen;
876 }
877 if (len >= buf->page_len)
878 return 0;
879 shift = buf->page_len - len;
880 xdr_buf_try_expand(buf, shift);
881 xdr_buf_pages_shift_right(buf, len, buflen - len, shift);
882 buf->page_len = len;
883 buf->len -= shift;
884 buf->buflen -= shift;
885 return shift;
886 }
887
888 /**
889 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
890 * @xdr: pointer to struct xdr_stream
891 */
xdr_stream_pos(const struct xdr_stream * xdr)892 unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
893 {
894 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
895 }
896 EXPORT_SYMBOL_GPL(xdr_stream_pos);
897
xdr_stream_set_pos(struct xdr_stream * xdr,unsigned int pos)898 static void xdr_stream_set_pos(struct xdr_stream *xdr, unsigned int pos)
899 {
900 unsigned int blen = xdr->buf->len;
901
902 xdr->nwords = blen > pos ? XDR_QUADLEN(blen) - XDR_QUADLEN(pos) : 0;
903 }
904
xdr_stream_page_set_pos(struct xdr_stream * xdr,unsigned int pos)905 static void xdr_stream_page_set_pos(struct xdr_stream *xdr, unsigned int pos)
906 {
907 xdr_stream_set_pos(xdr, pos + xdr->buf->head[0].iov_len);
908 }
909
910 /**
911 * xdr_page_pos - Return the current offset from the start of the xdr pages
912 * @xdr: pointer to struct xdr_stream
913 */
xdr_page_pos(const struct xdr_stream * xdr)914 unsigned int xdr_page_pos(const struct xdr_stream *xdr)
915 {
916 unsigned int pos = xdr_stream_pos(xdr);
917
918 WARN_ON(pos < xdr->buf->head[0].iov_len);
919 return pos - xdr->buf->head[0].iov_len;
920 }
921 EXPORT_SYMBOL_GPL(xdr_page_pos);
922
923 /**
924 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
925 * @xdr: pointer to xdr_stream struct
926 * @buf: pointer to XDR buffer in which to encode data
927 * @p: current pointer inside XDR buffer
928 * @rqst: pointer to controlling rpc_rqst, for debugging
929 *
930 * Note: at the moment the RPC client only passes the length of our
931 * scratch buffer in the xdr_buf's header kvec. Previously this
932 * meant we needed to call xdr_adjust_iovec() after encoding the
933 * data. With the new scheme, the xdr_stream manages the details
934 * of the buffer length, and takes care of adjusting the kvec
935 * length for us.
936 */
xdr_init_encode(struct xdr_stream * xdr,struct xdr_buf * buf,__be32 * p,struct rpc_rqst * rqst)937 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
938 struct rpc_rqst *rqst)
939 {
940 struct kvec *iov = buf->head;
941 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
942
943 xdr_reset_scratch_buffer(xdr);
944 BUG_ON(scratch_len < 0);
945 xdr->buf = buf;
946 xdr->iov = iov;
947 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
948 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
949 BUG_ON(iov->iov_len > scratch_len);
950
951 if (p != xdr->p && p != NULL) {
952 size_t len;
953
954 BUG_ON(p < xdr->p || p > xdr->end);
955 len = (char *)p - (char *)xdr->p;
956 xdr->p = p;
957 buf->len += len;
958 iov->iov_len += len;
959 }
960 xdr->rqst = rqst;
961 }
962 EXPORT_SYMBOL_GPL(xdr_init_encode);
963
964 /**
965 * xdr_init_encode_pages - Initialize an xdr_stream for encoding into pages
966 * @xdr: pointer to xdr_stream struct
967 * @buf: pointer to XDR buffer into which to encode data
968 *
969 */
xdr_init_encode_pages(struct xdr_stream * xdr,struct xdr_buf * buf)970 void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf)
971 {
972 xdr_reset_scratch_buffer(xdr);
973
974 xdr->buf = buf;
975 xdr->page_ptr = buf->pages;
976 xdr->iov = NULL;
977 xdr->p = page_address(*xdr->page_ptr);
978 xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
979 xdr->rqst = NULL;
980 }
981 EXPORT_SYMBOL_GPL(xdr_init_encode_pages);
982
983 /**
984 * __xdr_commit_encode - Ensure all data is written to buffer
985 * @xdr: pointer to xdr_stream
986 *
987 * We handle encoding across page boundaries by giving the caller a
988 * temporary location to write to, then later copying the data into
989 * place; xdr_commit_encode does that copying.
990 *
991 * Normally the caller doesn't need to call this directly, as the
992 * following xdr_reserve_space will do it. But an explicit call may be
993 * required at the end of encoding, or any other time when the xdr_buf
994 * data might be read.
995 */
__xdr_commit_encode(struct xdr_stream * xdr)996 void __xdr_commit_encode(struct xdr_stream *xdr)
997 {
998 size_t shift = xdr->scratch.iov_len;
999 void *page;
1000
1001 page = page_address(*xdr->page_ptr);
1002 memcpy(xdr->scratch.iov_base, page, shift);
1003 memmove(page, page + shift, (void *)xdr->p - page);
1004 xdr_reset_scratch_buffer(xdr);
1005 }
1006 EXPORT_SYMBOL_GPL(__xdr_commit_encode);
1007
1008 /*
1009 * The buffer space to be reserved crosses the boundary between
1010 * xdr->buf->head and xdr->buf->pages, or between two pages
1011 * in xdr->buf->pages.
1012 */
xdr_get_next_encode_buffer(struct xdr_stream * xdr,size_t nbytes)1013 static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
1014 size_t nbytes)
1015 {
1016 int space_left;
1017 int frag1bytes, frag2bytes;
1018 void *p;
1019
1020 if (nbytes > PAGE_SIZE)
1021 goto out_overflow; /* Bigger buffers require special handling */
1022 if (xdr->buf->len + nbytes > xdr->buf->buflen)
1023 goto out_overflow; /* Sorry, we're totally out of space */
1024 frag1bytes = (xdr->end - xdr->p) << 2;
1025 frag2bytes = nbytes - frag1bytes;
1026 if (xdr->iov)
1027 xdr->iov->iov_len += frag1bytes;
1028 else
1029 xdr->buf->page_len += frag1bytes;
1030 xdr->page_ptr++;
1031 xdr->iov = NULL;
1032
1033 /*
1034 * If the last encode didn't end exactly on a page boundary, the
1035 * next one will straddle boundaries. Encode into the next
1036 * page, then copy it back later in xdr_commit_encode. We use
1037 * the "scratch" iov to track any temporarily unused fragment of
1038 * space at the end of the previous buffer:
1039 */
1040 xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes);
1041
1042 /*
1043 * xdr->p is where the next encode will start after
1044 * xdr_commit_encode() has shifted this one back:
1045 */
1046 p = page_address(*xdr->page_ptr);
1047 xdr->p = p + frag2bytes;
1048 space_left = xdr->buf->buflen - xdr->buf->len;
1049 if (space_left - frag1bytes >= PAGE_SIZE)
1050 xdr->end = p + PAGE_SIZE;
1051 else
1052 xdr->end = p + space_left - frag1bytes;
1053
1054 xdr->buf->page_len += frag2bytes;
1055 xdr->buf->len += nbytes;
1056 return p;
1057 out_overflow:
1058 trace_rpc_xdr_overflow(xdr, nbytes);
1059 return NULL;
1060 }
1061
1062 /**
1063 * xdr_reserve_space - Reserve buffer space for sending
1064 * @xdr: pointer to xdr_stream
1065 * @nbytes: number of bytes to reserve
1066 *
1067 * Checks that we have enough buffer space to encode 'nbytes' more
1068 * bytes of data. If so, update the total xdr_buf length, and
1069 * adjust the length of the current kvec.
1070 *
1071 * The returned pointer is valid only until the next call to
1072 * xdr_reserve_space() or xdr_commit_encode() on @xdr. The current
1073 * implementation of this API guarantees that space reserved for a
1074 * four-byte data item remains valid until @xdr is destroyed, but
1075 * that might not always be true in the future.
1076 */
xdr_reserve_space(struct xdr_stream * xdr,size_t nbytes)1077 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
1078 {
1079 __be32 *p = xdr->p;
1080 __be32 *q;
1081
1082 xdr_commit_encode(xdr);
1083 /* align nbytes on the next 32-bit boundary */
1084 nbytes += 3;
1085 nbytes &= ~3;
1086 q = p + (nbytes >> 2);
1087 if (unlikely(q > xdr->end || q < p))
1088 return xdr_get_next_encode_buffer(xdr, nbytes);
1089 xdr->p = q;
1090 if (xdr->iov)
1091 xdr->iov->iov_len += nbytes;
1092 else
1093 xdr->buf->page_len += nbytes;
1094 xdr->buf->len += nbytes;
1095 return p;
1096 }
1097 EXPORT_SYMBOL_GPL(xdr_reserve_space);
1098
1099 /**
1100 * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
1101 * @xdr: pointer to xdr_stream
1102 * @nbytes: number of bytes to reserve
1103 *
1104 * The size argument passed to xdr_reserve_space() is determined based
1105 * on the number of bytes remaining in the current page to avoid
1106 * invalidating iov_base pointers when xdr_commit_encode() is called.
1107 *
1108 * Return values:
1109 * %0: success
1110 * %-EMSGSIZE: not enough space is available in @xdr
1111 */
xdr_reserve_space_vec(struct xdr_stream * xdr,size_t nbytes)1112 int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes)
1113 {
1114 size_t thislen;
1115 __be32 *p;
1116
1117 /*
1118 * svcrdma requires every READ payload to start somewhere
1119 * in xdr->pages.
1120 */
1121 if (xdr->iov == xdr->buf->head) {
1122 xdr->iov = NULL;
1123 xdr->end = xdr->p;
1124 }
1125
1126 /* XXX: Let's find a way to make this more efficient */
1127 while (nbytes) {
1128 thislen = xdr->buf->page_len % PAGE_SIZE;
1129 thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
1130
1131 p = xdr_reserve_space(xdr, thislen);
1132 if (!p)
1133 return -EMSGSIZE;
1134
1135 nbytes -= thislen;
1136 }
1137
1138 return 0;
1139 }
1140 EXPORT_SYMBOL_GPL(xdr_reserve_space_vec);
1141
1142 /**
1143 * xdr_truncate_encode - truncate an encode buffer
1144 * @xdr: pointer to xdr_stream
1145 * @len: new length of buffer
1146 *
1147 * Truncates the xdr stream, so that xdr->buf->len == len,
1148 * and xdr->p points at offset len from the start of the buffer, and
1149 * head, tail, and page lengths are adjusted to correspond.
1150 *
1151 * If this means moving xdr->p to a different buffer, we assume that
1152 * the end pointer should be set to the end of the current page,
1153 * except in the case of the head buffer when we assume the head
1154 * buffer's current length represents the end of the available buffer.
1155 *
1156 * This is *not* safe to use on a buffer that already has inlined page
1157 * cache pages (as in a zero-copy server read reply), except for the
1158 * simple case of truncating from one position in the tail to another.
1159 *
1160 */
xdr_truncate_encode(struct xdr_stream * xdr,size_t len)1161 void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
1162 {
1163 struct xdr_buf *buf = xdr->buf;
1164 struct kvec *head = buf->head;
1165 struct kvec *tail = buf->tail;
1166 int fraglen;
1167 int new;
1168
1169 if (len > buf->len) {
1170 WARN_ON_ONCE(1);
1171 return;
1172 }
1173 xdr_commit_encode(xdr);
1174
1175 fraglen = min_t(int, buf->len - len, tail->iov_len);
1176 tail->iov_len -= fraglen;
1177 buf->len -= fraglen;
1178 if (tail->iov_len) {
1179 xdr->p = tail->iov_base + tail->iov_len;
1180 WARN_ON_ONCE(!xdr->end);
1181 WARN_ON_ONCE(!xdr->iov);
1182 return;
1183 }
1184 WARN_ON_ONCE(fraglen);
1185 fraglen = min_t(int, buf->len - len, buf->page_len);
1186 buf->page_len -= fraglen;
1187 buf->len -= fraglen;
1188
1189 new = buf->page_base + buf->page_len;
1190
1191 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
1192
1193 if (buf->page_len) {
1194 xdr->p = page_address(*xdr->page_ptr);
1195 xdr->end = (void *)xdr->p + PAGE_SIZE;
1196 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
1197 WARN_ON_ONCE(xdr->iov);
1198 return;
1199 }
1200 if (fraglen)
1201 xdr->end = head->iov_base + head->iov_len;
1202 /* (otherwise assume xdr->end is already set) */
1203 xdr->page_ptr--;
1204 head->iov_len = len;
1205 buf->len = len;
1206 xdr->p = head->iov_base + head->iov_len;
1207 xdr->iov = buf->head;
1208 }
1209 EXPORT_SYMBOL(xdr_truncate_encode);
1210
1211 /**
1212 * xdr_truncate_decode - Truncate a decoding stream
1213 * @xdr: pointer to struct xdr_stream
1214 * @len: Number of bytes to remove
1215 *
1216 */
xdr_truncate_decode(struct xdr_stream * xdr,size_t len)1217 void xdr_truncate_decode(struct xdr_stream *xdr, size_t len)
1218 {
1219 unsigned int nbytes = xdr_align_size(len);
1220
1221 xdr->buf->len -= nbytes;
1222 xdr->nwords -= XDR_QUADLEN(nbytes);
1223 }
1224 EXPORT_SYMBOL_GPL(xdr_truncate_decode);
1225
1226 /**
1227 * xdr_restrict_buflen - decrease available buffer space
1228 * @xdr: pointer to xdr_stream
1229 * @newbuflen: new maximum number of bytes available
1230 *
1231 * Adjust our idea of how much space is available in the buffer.
1232 * If we've already used too much space in the buffer, returns -1.
1233 * If the available space is already smaller than newbuflen, returns 0
1234 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
1235 * and ensures xdr->end is set at most offset newbuflen from the start
1236 * of the buffer.
1237 */
xdr_restrict_buflen(struct xdr_stream * xdr,int newbuflen)1238 int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
1239 {
1240 struct xdr_buf *buf = xdr->buf;
1241 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
1242 int end_offset = buf->len + left_in_this_buf;
1243
1244 if (newbuflen < 0 || newbuflen < buf->len)
1245 return -1;
1246 if (newbuflen > buf->buflen)
1247 return 0;
1248 if (newbuflen < end_offset)
1249 xdr->end = (void *)xdr->end + newbuflen - end_offset;
1250 buf->buflen = newbuflen;
1251 return 0;
1252 }
1253 EXPORT_SYMBOL(xdr_restrict_buflen);
1254
1255 /**
1256 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
1257 * @xdr: pointer to xdr_stream
1258 * @pages: array of pages to insert
1259 * @base: starting offset of first data byte in @pages
1260 * @len: number of data bytes in @pages to insert
1261 *
1262 * After the @pages are added, the tail iovec is instantiated pointing to
1263 * end of the head buffer, and the stream is set up to encode subsequent
1264 * items into the tail.
1265 */
xdr_write_pages(struct xdr_stream * xdr,struct page ** pages,unsigned int base,unsigned int len)1266 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
1267 unsigned int len)
1268 {
1269 struct xdr_buf *buf = xdr->buf;
1270 struct kvec *tail = buf->tail;
1271
1272 buf->pages = pages;
1273 buf->page_base = base;
1274 buf->page_len = len;
1275
1276 tail->iov_base = xdr->p;
1277 tail->iov_len = 0;
1278 xdr->iov = tail;
1279
1280 if (len & 3) {
1281 unsigned int pad = 4 - (len & 3);
1282
1283 BUG_ON(xdr->p >= xdr->end);
1284 tail->iov_base = (char *)xdr->p + (len & 3);
1285 tail->iov_len += pad;
1286 len += pad;
1287 *xdr->p++ = 0;
1288 }
1289 buf->buflen += len;
1290 buf->len += len;
1291 }
1292 EXPORT_SYMBOL_GPL(xdr_write_pages);
1293
xdr_set_iov(struct xdr_stream * xdr,struct kvec * iov,unsigned int base,unsigned int len)1294 static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
1295 unsigned int base, unsigned int len)
1296 {
1297 if (len > iov->iov_len)
1298 len = iov->iov_len;
1299 if (unlikely(base > len))
1300 base = len;
1301 xdr->p = (__be32*)(iov->iov_base + base);
1302 xdr->end = (__be32*)(iov->iov_base + len);
1303 xdr->iov = iov;
1304 xdr->page_ptr = NULL;
1305 return len - base;
1306 }
1307
xdr_set_tail_base(struct xdr_stream * xdr,unsigned int base,unsigned int len)1308 static unsigned int xdr_set_tail_base(struct xdr_stream *xdr,
1309 unsigned int base, unsigned int len)
1310 {
1311 struct xdr_buf *buf = xdr->buf;
1312
1313 xdr_stream_set_pos(xdr, base + buf->page_len + buf->head->iov_len);
1314 return xdr_set_iov(xdr, buf->tail, base, len);
1315 }
1316
xdr_stream_unmap_current_page(struct xdr_stream * xdr)1317 static void xdr_stream_unmap_current_page(struct xdr_stream *xdr)
1318 {
1319 if (xdr->page_kaddr) {
1320 kunmap_local(xdr->page_kaddr);
1321 xdr->page_kaddr = NULL;
1322 }
1323 }
1324
xdr_set_page_base(struct xdr_stream * xdr,unsigned int base,unsigned int len)1325 static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
1326 unsigned int base, unsigned int len)
1327 {
1328 unsigned int pgnr;
1329 unsigned int maxlen;
1330 unsigned int pgoff;
1331 unsigned int pgend;
1332 void *kaddr;
1333
1334 maxlen = xdr->buf->page_len;
1335 if (base >= maxlen)
1336 return 0;
1337 else
1338 maxlen -= base;
1339 if (len > maxlen)
1340 len = maxlen;
1341
1342 xdr_stream_unmap_current_page(xdr);
1343 xdr_stream_page_set_pos(xdr, base);
1344 base += xdr->buf->page_base;
1345
1346 pgnr = base >> PAGE_SHIFT;
1347 xdr->page_ptr = &xdr->buf->pages[pgnr];
1348
1349 if (PageHighMem(*xdr->page_ptr)) {
1350 xdr->page_kaddr = kmap_local_page(*xdr->page_ptr);
1351 kaddr = xdr->page_kaddr;
1352 } else
1353 kaddr = page_address(*xdr->page_ptr);
1354
1355 pgoff = base & ~PAGE_MASK;
1356 xdr->p = (__be32*)(kaddr + pgoff);
1357
1358 pgend = pgoff + len;
1359 if (pgend > PAGE_SIZE)
1360 pgend = PAGE_SIZE;
1361 xdr->end = (__be32*)(kaddr + pgend);
1362 xdr->iov = NULL;
1363 return len;
1364 }
1365
xdr_set_page(struct xdr_stream * xdr,unsigned int base,unsigned int len)1366 static void xdr_set_page(struct xdr_stream *xdr, unsigned int base,
1367 unsigned int len)
1368 {
1369 if (xdr_set_page_base(xdr, base, len) == 0) {
1370 base -= xdr->buf->page_len;
1371 xdr_set_tail_base(xdr, base, len);
1372 }
1373 }
1374
xdr_set_next_page(struct xdr_stream * xdr)1375 static void xdr_set_next_page(struct xdr_stream *xdr)
1376 {
1377 unsigned int newbase;
1378
1379 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
1380 newbase -= xdr->buf->page_base;
1381 if (newbase < xdr->buf->page_len)
1382 xdr_set_page_base(xdr, newbase, xdr_stream_remaining(xdr));
1383 else
1384 xdr_set_tail_base(xdr, 0, xdr_stream_remaining(xdr));
1385 }
1386
xdr_set_next_buffer(struct xdr_stream * xdr)1387 static bool xdr_set_next_buffer(struct xdr_stream *xdr)
1388 {
1389 if (xdr->page_ptr != NULL)
1390 xdr_set_next_page(xdr);
1391 else if (xdr->iov == xdr->buf->head)
1392 xdr_set_page(xdr, 0, xdr_stream_remaining(xdr));
1393 return xdr->p != xdr->end;
1394 }
1395
1396 /**
1397 * xdr_init_decode - Initialize an xdr_stream for decoding data.
1398 * @xdr: pointer to xdr_stream struct
1399 * @buf: pointer to XDR buffer from which to decode data
1400 * @p: current pointer inside XDR buffer
1401 * @rqst: pointer to controlling rpc_rqst, for debugging
1402 */
xdr_init_decode(struct xdr_stream * xdr,struct xdr_buf * buf,__be32 * p,struct rpc_rqst * rqst)1403 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
1404 struct rpc_rqst *rqst)
1405 {
1406 xdr->buf = buf;
1407 xdr->page_kaddr = NULL;
1408 xdr_reset_scratch_buffer(xdr);
1409 xdr->nwords = XDR_QUADLEN(buf->len);
1410 if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 &&
1411 xdr_set_page_base(xdr, 0, buf->len) == 0)
1412 xdr_set_iov(xdr, buf->tail, 0, buf->len);
1413 if (p != NULL && p > xdr->p && xdr->end >= p) {
1414 xdr->nwords -= p - xdr->p;
1415 xdr->p = p;
1416 }
1417 xdr->rqst = rqst;
1418 }
1419 EXPORT_SYMBOL_GPL(xdr_init_decode);
1420
1421 /**
1422 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
1423 * @xdr: pointer to xdr_stream struct
1424 * @buf: pointer to XDR buffer from which to decode data
1425 * @pages: list of pages to decode into
1426 * @len: length in bytes of buffer in pages
1427 */
xdr_init_decode_pages(struct xdr_stream * xdr,struct xdr_buf * buf,struct page ** pages,unsigned int len)1428 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
1429 struct page **pages, unsigned int len)
1430 {
1431 memset(buf, 0, sizeof(*buf));
1432 buf->pages = pages;
1433 buf->page_len = len;
1434 buf->buflen = len;
1435 buf->len = len;
1436 xdr_init_decode(xdr, buf, NULL, NULL);
1437 }
1438 EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
1439
1440 /**
1441 * xdr_finish_decode - Clean up the xdr_stream after decoding data.
1442 * @xdr: pointer to xdr_stream struct
1443 */
xdr_finish_decode(struct xdr_stream * xdr)1444 void xdr_finish_decode(struct xdr_stream *xdr)
1445 {
1446 xdr_stream_unmap_current_page(xdr);
1447 }
1448 EXPORT_SYMBOL(xdr_finish_decode);
1449
__xdr_inline_decode(struct xdr_stream * xdr,size_t nbytes)1450 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1451 {
1452 unsigned int nwords = XDR_QUADLEN(nbytes);
1453 __be32 *p = xdr->p;
1454 __be32 *q = p + nwords;
1455
1456 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
1457 return NULL;
1458 xdr->p = q;
1459 xdr->nwords -= nwords;
1460 return p;
1461 }
1462
xdr_copy_to_scratch(struct xdr_stream * xdr,size_t nbytes)1463 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
1464 {
1465 __be32 *p;
1466 char *cpdest = xdr->scratch.iov_base;
1467 size_t cplen = (char *)xdr->end - (char *)xdr->p;
1468
1469 if (nbytes > xdr->scratch.iov_len)
1470 goto out_overflow;
1471 p = __xdr_inline_decode(xdr, cplen);
1472 if (p == NULL)
1473 return NULL;
1474 memcpy(cpdest, p, cplen);
1475 if (!xdr_set_next_buffer(xdr))
1476 goto out_overflow;
1477 cpdest += cplen;
1478 nbytes -= cplen;
1479 p = __xdr_inline_decode(xdr, nbytes);
1480 if (p == NULL)
1481 return NULL;
1482 memcpy(cpdest, p, nbytes);
1483 return xdr->scratch.iov_base;
1484 out_overflow:
1485 trace_rpc_xdr_overflow(xdr, nbytes);
1486 return NULL;
1487 }
1488
1489 /**
1490 * xdr_inline_decode - Retrieve XDR data to decode
1491 * @xdr: pointer to xdr_stream struct
1492 * @nbytes: number of bytes of data to decode
1493 *
1494 * Check if the input buffer is long enough to enable us to decode
1495 * 'nbytes' more bytes of data starting at the current position.
1496 * If so return the current pointer, then update the current
1497 * pointer position.
1498 */
xdr_inline_decode(struct xdr_stream * xdr,size_t nbytes)1499 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1500 {
1501 __be32 *p;
1502
1503 if (unlikely(nbytes == 0))
1504 return xdr->p;
1505 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1506 goto out_overflow;
1507 p = __xdr_inline_decode(xdr, nbytes);
1508 if (p != NULL)
1509 return p;
1510 return xdr_copy_to_scratch(xdr, nbytes);
1511 out_overflow:
1512 trace_rpc_xdr_overflow(xdr, nbytes);
1513 return NULL;
1514 }
1515 EXPORT_SYMBOL_GPL(xdr_inline_decode);
1516
xdr_realign_pages(struct xdr_stream * xdr)1517 static void xdr_realign_pages(struct xdr_stream *xdr)
1518 {
1519 struct xdr_buf *buf = xdr->buf;
1520 struct kvec *iov = buf->head;
1521 unsigned int cur = xdr_stream_pos(xdr);
1522 unsigned int copied;
1523
1524 /* Realign pages to current pointer position */
1525 if (iov->iov_len > cur) {
1526 copied = xdr_shrink_bufhead(buf, cur);
1527 trace_rpc_xdr_alignment(xdr, cur, copied);
1528 xdr_set_page(xdr, 0, buf->page_len);
1529 }
1530 }
1531
xdr_align_pages(struct xdr_stream * xdr,unsigned int len)1532 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
1533 {
1534 struct xdr_buf *buf = xdr->buf;
1535 unsigned int nwords = XDR_QUADLEN(len);
1536 unsigned int copied;
1537
1538 if (xdr->nwords == 0)
1539 return 0;
1540
1541 xdr_realign_pages(xdr);
1542 if (nwords > xdr->nwords) {
1543 nwords = xdr->nwords;
1544 len = nwords << 2;
1545 }
1546 if (buf->page_len <= len)
1547 len = buf->page_len;
1548 else if (nwords < xdr->nwords) {
1549 /* Truncate page data and move it into the tail */
1550 copied = xdr_shrink_pagelen(buf, len);
1551 trace_rpc_xdr_alignment(xdr, len, copied);
1552 }
1553 return len;
1554 }
1555
1556 /**
1557 * xdr_read_pages - align page-based XDR data to current pointer position
1558 * @xdr: pointer to xdr_stream struct
1559 * @len: number of bytes of page data
1560 *
1561 * Moves data beyond the current pointer position from the XDR head[] buffer
1562 * into the page list. Any data that lies beyond current position + @len
1563 * bytes is moved into the XDR tail[]. The xdr_stream current position is
1564 * then advanced past that data to align to the next XDR object in the tail.
1565 *
1566 * Returns the number of XDR encoded bytes now contained in the pages
1567 */
xdr_read_pages(struct xdr_stream * xdr,unsigned int len)1568 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1569 {
1570 unsigned int nwords = XDR_QUADLEN(len);
1571 unsigned int base, end, pglen;
1572
1573 pglen = xdr_align_pages(xdr, nwords << 2);
1574 if (pglen == 0)
1575 return 0;
1576
1577 base = (nwords << 2) - pglen;
1578 end = xdr_stream_remaining(xdr) - pglen;
1579
1580 xdr_set_tail_base(xdr, base, end);
1581 return len <= pglen ? len : pglen;
1582 }
1583 EXPORT_SYMBOL_GPL(xdr_read_pages);
1584
1585 /**
1586 * xdr_set_pagelen - Sets the length of the XDR pages
1587 * @xdr: pointer to xdr_stream struct
1588 * @len: new length of the XDR page data
1589 *
1590 * Either grows or shrinks the length of the xdr pages by setting pagelen to
1591 * @len bytes. When shrinking, any extra data is moved into buf->tail, whereas
1592 * when growing any data beyond the current pointer is moved into the tail.
1593 *
1594 * Returns True if the operation was successful, and False otherwise.
1595 */
xdr_set_pagelen(struct xdr_stream * xdr,unsigned int len)1596 void xdr_set_pagelen(struct xdr_stream *xdr, unsigned int len)
1597 {
1598 struct xdr_buf *buf = xdr->buf;
1599 size_t remaining = xdr_stream_remaining(xdr);
1600 size_t base = 0;
1601
1602 if (len < buf->page_len) {
1603 base = buf->page_len - len;
1604 xdr_shrink_pagelen(buf, len);
1605 } else {
1606 xdr_buf_head_shift_right(buf, xdr_stream_pos(xdr),
1607 buf->page_len, remaining);
1608 if (len > buf->page_len)
1609 xdr_buf_try_expand(buf, len - buf->page_len);
1610 }
1611 xdr_set_tail_base(xdr, base, remaining);
1612 }
1613 EXPORT_SYMBOL_GPL(xdr_set_pagelen);
1614
1615 /**
1616 * xdr_enter_page - decode data from the XDR page
1617 * @xdr: pointer to xdr_stream struct
1618 * @len: number of bytes of page data
1619 *
1620 * Moves data beyond the current pointer position from the XDR head[] buffer
1621 * into the page list. Any data that lies beyond current position + "len"
1622 * bytes is moved into the XDR tail[]. The current pointer is then
1623 * repositioned at the beginning of the first XDR page.
1624 */
xdr_enter_page(struct xdr_stream * xdr,unsigned int len)1625 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1626 {
1627 len = xdr_align_pages(xdr, len);
1628 /*
1629 * Position current pointer at beginning of tail, and
1630 * set remaining message length.
1631 */
1632 if (len != 0)
1633 xdr_set_page_base(xdr, 0, len);
1634 }
1635 EXPORT_SYMBOL_GPL(xdr_enter_page);
1636
1637 static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1638
xdr_buf_from_iov(const struct kvec * iov,struct xdr_buf * buf)1639 void xdr_buf_from_iov(const struct kvec *iov, struct xdr_buf *buf)
1640 {
1641 buf->head[0] = *iov;
1642 buf->tail[0] = empty_iov;
1643 buf->page_len = 0;
1644 buf->buflen = buf->len = iov->iov_len;
1645 }
1646 EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1647
1648 /**
1649 * xdr_buf_subsegment - set subbuf to a portion of buf
1650 * @buf: an xdr buffer
1651 * @subbuf: the result buffer
1652 * @base: beginning of range in bytes
1653 * @len: length of range in bytes
1654 *
1655 * sets @subbuf to an xdr buffer representing the portion of @buf of
1656 * length @len starting at offset @base.
1657 *
1658 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1659 *
1660 * Returns -1 if base or length are out of bounds.
1661 */
xdr_buf_subsegment(const struct xdr_buf * buf,struct xdr_buf * subbuf,unsigned int base,unsigned int len)1662 int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf,
1663 unsigned int base, unsigned int len)
1664 {
1665 subbuf->buflen = subbuf->len = len;
1666 if (base < buf->head[0].iov_len) {
1667 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1668 subbuf->head[0].iov_len = min_t(unsigned int, len,
1669 buf->head[0].iov_len - base);
1670 len -= subbuf->head[0].iov_len;
1671 base = 0;
1672 } else {
1673 base -= buf->head[0].iov_len;
1674 subbuf->head[0].iov_base = buf->head[0].iov_base;
1675 subbuf->head[0].iov_len = 0;
1676 }
1677
1678 if (base < buf->page_len) {
1679 subbuf->page_len = min(buf->page_len - base, len);
1680 base += buf->page_base;
1681 subbuf->page_base = base & ~PAGE_MASK;
1682 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1683 len -= subbuf->page_len;
1684 base = 0;
1685 } else {
1686 base -= buf->page_len;
1687 subbuf->pages = buf->pages;
1688 subbuf->page_base = 0;
1689 subbuf->page_len = 0;
1690 }
1691
1692 if (base < buf->tail[0].iov_len) {
1693 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1694 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1695 buf->tail[0].iov_len - base);
1696 len -= subbuf->tail[0].iov_len;
1697 base = 0;
1698 } else {
1699 base -= buf->tail[0].iov_len;
1700 subbuf->tail[0].iov_base = buf->tail[0].iov_base;
1701 subbuf->tail[0].iov_len = 0;
1702 }
1703
1704 if (base || len)
1705 return -1;
1706 return 0;
1707 }
1708 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1709
1710 /**
1711 * xdr_stream_subsegment - set @subbuf to a portion of @xdr
1712 * @xdr: an xdr_stream set up for decoding
1713 * @subbuf: the result buffer
1714 * @nbytes: length of @xdr to extract, in bytes
1715 *
1716 * Sets up @subbuf to represent a portion of @xdr. The portion
1717 * starts at the current offset in @xdr, and extends for a length
1718 * of @nbytes. If this is successful, @xdr is advanced to the next
1719 * XDR data item following that portion.
1720 *
1721 * Return values:
1722 * %true: @subbuf has been initialized, and @xdr has been advanced.
1723 * %false: a bounds error has occurred
1724 */
xdr_stream_subsegment(struct xdr_stream * xdr,struct xdr_buf * subbuf,unsigned int nbytes)1725 bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
1726 unsigned int nbytes)
1727 {
1728 unsigned int start = xdr_stream_pos(xdr);
1729 unsigned int remaining, len;
1730
1731 /* Extract @subbuf and bounds-check the fn arguments */
1732 if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes))
1733 return false;
1734
1735 /* Advance @xdr by @nbytes */
1736 for (remaining = nbytes; remaining;) {
1737 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1738 return false;
1739
1740 len = (char *)xdr->end - (char *)xdr->p;
1741 if (remaining <= len) {
1742 xdr->p = (__be32 *)((char *)xdr->p +
1743 (remaining + xdr_pad_size(nbytes)));
1744 break;
1745 }
1746
1747 xdr->p = (__be32 *)((char *)xdr->p + len);
1748 xdr->end = xdr->p;
1749 remaining -= len;
1750 }
1751
1752 xdr_stream_set_pos(xdr, start + nbytes);
1753 return true;
1754 }
1755 EXPORT_SYMBOL_GPL(xdr_stream_subsegment);
1756
1757 /**
1758 * xdr_stream_move_subsegment - Move part of a stream to another position
1759 * @xdr: the source xdr_stream
1760 * @offset: the source offset of the segment
1761 * @target: the target offset of the segment
1762 * @length: the number of bytes to move
1763 *
1764 * Moves @length bytes from @offset to @target in the xdr_stream, overwriting
1765 * anything in its space. Returns the number of bytes in the segment.
1766 */
xdr_stream_move_subsegment(struct xdr_stream * xdr,unsigned int offset,unsigned int target,unsigned int length)1767 unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset,
1768 unsigned int target, unsigned int length)
1769 {
1770 struct xdr_buf buf;
1771 unsigned int shift;
1772
1773 if (offset < target) {
1774 shift = target - offset;
1775 if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0)
1776 return 0;
1777 xdr_buf_head_shift_right(&buf, 0, length, shift);
1778 } else if (offset > target) {
1779 shift = offset - target;
1780 if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0)
1781 return 0;
1782 xdr_buf_head_shift_left(&buf, shift, length, shift);
1783 }
1784 return length;
1785 }
1786 EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment);
1787
1788 /**
1789 * xdr_stream_zero - zero out a portion of an xdr_stream
1790 * @xdr: an xdr_stream to zero out
1791 * @offset: the starting point in the stream
1792 * @length: the number of bytes to zero
1793 */
xdr_stream_zero(struct xdr_stream * xdr,unsigned int offset,unsigned int length)1794 unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset,
1795 unsigned int length)
1796 {
1797 struct xdr_buf buf;
1798
1799 if (xdr_buf_subsegment(xdr->buf, &buf, offset, length) < 0)
1800 return 0;
1801 if (buf.head[0].iov_len)
1802 xdr_buf_iov_zero(buf.head, 0, buf.head[0].iov_len);
1803 if (buf.page_len > 0)
1804 xdr_buf_pages_zero(&buf, 0, buf.page_len);
1805 if (buf.tail[0].iov_len)
1806 xdr_buf_iov_zero(buf.tail, 0, buf.tail[0].iov_len);
1807 return length;
1808 }
1809 EXPORT_SYMBOL_GPL(xdr_stream_zero);
1810
1811 /**
1812 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1813 * @buf: buf to be trimmed
1814 * @len: number of bytes to reduce "buf" by
1815 *
1816 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1817 * that it's possible that we'll trim less than that amount if the xdr_buf is
1818 * too small, or if (for instance) it's all in the head and the parser has
1819 * already read too far into it.
1820 */
xdr_buf_trim(struct xdr_buf * buf,unsigned int len)1821 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1822 {
1823 size_t cur;
1824 unsigned int trim = len;
1825
1826 if (buf->tail[0].iov_len) {
1827 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1828 buf->tail[0].iov_len -= cur;
1829 trim -= cur;
1830 if (!trim)
1831 goto fix_len;
1832 }
1833
1834 if (buf->page_len) {
1835 cur = min_t(unsigned int, buf->page_len, trim);
1836 buf->page_len -= cur;
1837 trim -= cur;
1838 if (!trim)
1839 goto fix_len;
1840 }
1841
1842 if (buf->head[0].iov_len) {
1843 cur = min_t(size_t, buf->head[0].iov_len, trim);
1844 buf->head[0].iov_len -= cur;
1845 trim -= cur;
1846 }
1847 fix_len:
1848 buf->len -= (len - trim);
1849 }
1850 EXPORT_SYMBOL_GPL(xdr_buf_trim);
1851
__read_bytes_from_xdr_buf(const struct xdr_buf * subbuf,void * obj,unsigned int len)1852 static void __read_bytes_from_xdr_buf(const struct xdr_buf *subbuf,
1853 void *obj, unsigned int len)
1854 {
1855 unsigned int this_len;
1856
1857 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1858 memcpy(obj, subbuf->head[0].iov_base, this_len);
1859 len -= this_len;
1860 obj += this_len;
1861 this_len = min_t(unsigned int, len, subbuf->page_len);
1862 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1863 len -= this_len;
1864 obj += this_len;
1865 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1866 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1867 }
1868
1869 /* obj is assumed to point to allocated memory of size at least len: */
read_bytes_from_xdr_buf(const struct xdr_buf * buf,unsigned int base,void * obj,unsigned int len)1870 int read_bytes_from_xdr_buf(const struct xdr_buf *buf, unsigned int base,
1871 void *obj, unsigned int len)
1872 {
1873 struct xdr_buf subbuf;
1874 int status;
1875
1876 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1877 if (status != 0)
1878 return status;
1879 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1880 return 0;
1881 }
1882 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
1883
__write_bytes_to_xdr_buf(const struct xdr_buf * subbuf,void * obj,unsigned int len)1884 static void __write_bytes_to_xdr_buf(const struct xdr_buf *subbuf,
1885 void *obj, unsigned int len)
1886 {
1887 unsigned int this_len;
1888
1889 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1890 memcpy(subbuf->head[0].iov_base, obj, this_len);
1891 len -= this_len;
1892 obj += this_len;
1893 this_len = min_t(unsigned int, len, subbuf->page_len);
1894 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
1895 len -= this_len;
1896 obj += this_len;
1897 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1898 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1899 }
1900
1901 /* obj is assumed to point to allocated memory of size at least len: */
write_bytes_to_xdr_buf(const struct xdr_buf * buf,unsigned int base,void * obj,unsigned int len)1902 int write_bytes_to_xdr_buf(const struct xdr_buf *buf, unsigned int base,
1903 void *obj, unsigned int len)
1904 {
1905 struct xdr_buf subbuf;
1906 int status;
1907
1908 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1909 if (status != 0)
1910 return status;
1911 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1912 return 0;
1913 }
1914 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
1915
xdr_decode_word(const struct xdr_buf * buf,unsigned int base,u32 * obj)1916 int xdr_decode_word(const struct xdr_buf *buf, unsigned int base, u32 *obj)
1917 {
1918 __be32 raw;
1919 int status;
1920
1921 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1922 if (status)
1923 return status;
1924 *obj = be32_to_cpu(raw);
1925 return 0;
1926 }
1927 EXPORT_SYMBOL_GPL(xdr_decode_word);
1928
xdr_encode_word(const struct xdr_buf * buf,unsigned int base,u32 obj)1929 int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj)
1930 {
1931 __be32 raw = cpu_to_be32(obj);
1932
1933 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1934 }
1935 EXPORT_SYMBOL_GPL(xdr_encode_word);
1936
1937 /* Returns 0 on success, or else a negative error code. */
xdr_xcode_array2(const struct xdr_buf * buf,unsigned int base,struct xdr_array2_desc * desc,int encode)1938 static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base,
1939 struct xdr_array2_desc *desc, int encode)
1940 {
1941 char *elem = NULL, *c;
1942 unsigned int copied = 0, todo, avail_here;
1943 struct page **ppages = NULL;
1944 int err;
1945
1946 if (encode) {
1947 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1948 return -EINVAL;
1949 } else {
1950 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1951 desc->array_len > desc->array_maxlen ||
1952 (unsigned long) base + 4 + desc->array_len *
1953 desc->elem_size > buf->len)
1954 return -EINVAL;
1955 }
1956 base += 4;
1957
1958 if (!desc->xcode)
1959 return 0;
1960
1961 todo = desc->array_len * desc->elem_size;
1962
1963 /* process head */
1964 if (todo && base < buf->head->iov_len) {
1965 c = buf->head->iov_base + base;
1966 avail_here = min_t(unsigned int, todo,
1967 buf->head->iov_len - base);
1968 todo -= avail_here;
1969
1970 while (avail_here >= desc->elem_size) {
1971 err = desc->xcode(desc, c);
1972 if (err)
1973 goto out;
1974 c += desc->elem_size;
1975 avail_here -= desc->elem_size;
1976 }
1977 if (avail_here) {
1978 if (!elem) {
1979 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1980 err = -ENOMEM;
1981 if (!elem)
1982 goto out;
1983 }
1984 if (encode) {
1985 err = desc->xcode(desc, elem);
1986 if (err)
1987 goto out;
1988 memcpy(c, elem, avail_here);
1989 } else
1990 memcpy(elem, c, avail_here);
1991 copied = avail_here;
1992 }
1993 base = buf->head->iov_len; /* align to start of pages */
1994 }
1995
1996 /* process pages array */
1997 base -= buf->head->iov_len;
1998 if (todo && base < buf->page_len) {
1999 unsigned int avail_page;
2000
2001 avail_here = min(todo, buf->page_len - base);
2002 todo -= avail_here;
2003
2004 base += buf->page_base;
2005 ppages = buf->pages + (base >> PAGE_SHIFT);
2006 base &= ~PAGE_MASK;
2007 avail_page = min_t(unsigned int, PAGE_SIZE - base,
2008 avail_here);
2009 c = kmap(*ppages) + base;
2010
2011 while (avail_here) {
2012 avail_here -= avail_page;
2013 if (copied || avail_page < desc->elem_size) {
2014 unsigned int l = min(avail_page,
2015 desc->elem_size - copied);
2016 if (!elem) {
2017 elem = kmalloc(desc->elem_size,
2018 GFP_KERNEL);
2019 err = -ENOMEM;
2020 if (!elem)
2021 goto out;
2022 }
2023 if (encode) {
2024 if (!copied) {
2025 err = desc->xcode(desc, elem);
2026 if (err)
2027 goto out;
2028 }
2029 memcpy(c, elem + copied, l);
2030 copied += l;
2031 if (copied == desc->elem_size)
2032 copied = 0;
2033 } else {
2034 memcpy(elem + copied, c, l);
2035 copied += l;
2036 if (copied == desc->elem_size) {
2037 err = desc->xcode(desc, elem);
2038 if (err)
2039 goto out;
2040 copied = 0;
2041 }
2042 }
2043 avail_page -= l;
2044 c += l;
2045 }
2046 while (avail_page >= desc->elem_size) {
2047 err = desc->xcode(desc, c);
2048 if (err)
2049 goto out;
2050 c += desc->elem_size;
2051 avail_page -= desc->elem_size;
2052 }
2053 if (avail_page) {
2054 unsigned int l = min(avail_page,
2055 desc->elem_size - copied);
2056 if (!elem) {
2057 elem = kmalloc(desc->elem_size,
2058 GFP_KERNEL);
2059 err = -ENOMEM;
2060 if (!elem)
2061 goto out;
2062 }
2063 if (encode) {
2064 if (!copied) {
2065 err = desc->xcode(desc, elem);
2066 if (err)
2067 goto out;
2068 }
2069 memcpy(c, elem + copied, l);
2070 copied += l;
2071 if (copied == desc->elem_size)
2072 copied = 0;
2073 } else {
2074 memcpy(elem + copied, c, l);
2075 copied += l;
2076 if (copied == desc->elem_size) {
2077 err = desc->xcode(desc, elem);
2078 if (err)
2079 goto out;
2080 copied = 0;
2081 }
2082 }
2083 }
2084 if (avail_here) {
2085 kunmap(*ppages);
2086 ppages++;
2087 c = kmap(*ppages);
2088 }
2089
2090 avail_page = min(avail_here,
2091 (unsigned int) PAGE_SIZE);
2092 }
2093 base = buf->page_len; /* align to start of tail */
2094 }
2095
2096 /* process tail */
2097 base -= buf->page_len;
2098 if (todo) {
2099 c = buf->tail->iov_base + base;
2100 if (copied) {
2101 unsigned int l = desc->elem_size - copied;
2102
2103 if (encode)
2104 memcpy(c, elem + copied, l);
2105 else {
2106 memcpy(elem + copied, c, l);
2107 err = desc->xcode(desc, elem);
2108 if (err)
2109 goto out;
2110 }
2111 todo -= l;
2112 c += l;
2113 }
2114 while (todo) {
2115 err = desc->xcode(desc, c);
2116 if (err)
2117 goto out;
2118 c += desc->elem_size;
2119 todo -= desc->elem_size;
2120 }
2121 }
2122 err = 0;
2123
2124 out:
2125 kfree(elem);
2126 if (ppages)
2127 kunmap(*ppages);
2128 return err;
2129 }
2130
xdr_decode_array2(const struct xdr_buf * buf,unsigned int base,struct xdr_array2_desc * desc)2131 int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base,
2132 struct xdr_array2_desc *desc)
2133 {
2134 if (base >= buf->len)
2135 return -EINVAL;
2136
2137 return xdr_xcode_array2(buf, base, desc, 0);
2138 }
2139 EXPORT_SYMBOL_GPL(xdr_decode_array2);
2140
xdr_encode_array2(const struct xdr_buf * buf,unsigned int base,struct xdr_array2_desc * desc)2141 int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base,
2142 struct xdr_array2_desc *desc)
2143 {
2144 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
2145 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
2146 return -EINVAL;
2147
2148 return xdr_xcode_array2(buf, base, desc, 1);
2149 }
2150 EXPORT_SYMBOL_GPL(xdr_encode_array2);
2151
xdr_process_buf(const struct xdr_buf * buf,unsigned int offset,unsigned int len,int (* actor)(struct scatterlist *,void *),void * data)2152 int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset,
2153 unsigned int len,
2154 int (*actor)(struct scatterlist *, void *), void *data)
2155 {
2156 int i, ret = 0;
2157 unsigned int page_len, thislen, page_offset;
2158 struct scatterlist sg[1];
2159
2160 sg_init_table(sg, 1);
2161
2162 if (offset >= buf->head[0].iov_len) {
2163 offset -= buf->head[0].iov_len;
2164 } else {
2165 thislen = buf->head[0].iov_len - offset;
2166 if (thislen > len)
2167 thislen = len;
2168 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
2169 ret = actor(sg, data);
2170 if (ret)
2171 goto out;
2172 offset = 0;
2173 len -= thislen;
2174 }
2175 if (len == 0)
2176 goto out;
2177
2178 if (offset >= buf->page_len) {
2179 offset -= buf->page_len;
2180 } else {
2181 page_len = buf->page_len - offset;
2182 if (page_len > len)
2183 page_len = len;
2184 len -= page_len;
2185 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
2186 i = (offset + buf->page_base) >> PAGE_SHIFT;
2187 thislen = PAGE_SIZE - page_offset;
2188 do {
2189 if (thislen > page_len)
2190 thislen = page_len;
2191 sg_set_page(sg, buf->pages[i], thislen, page_offset);
2192 ret = actor(sg, data);
2193 if (ret)
2194 goto out;
2195 page_len -= thislen;
2196 i++;
2197 page_offset = 0;
2198 thislen = PAGE_SIZE;
2199 } while (page_len != 0);
2200 offset = 0;
2201 }
2202 if (len == 0)
2203 goto out;
2204 if (offset < buf->tail[0].iov_len) {
2205 thislen = buf->tail[0].iov_len - offset;
2206 if (thislen > len)
2207 thislen = len;
2208 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
2209 ret = actor(sg, data);
2210 len -= thislen;
2211 }
2212 if (len != 0)
2213 ret = -EINVAL;
2214 out:
2215 return ret;
2216 }
2217 EXPORT_SYMBOL_GPL(xdr_process_buf);
2218
2219 /**
2220 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
2221 * @xdr: pointer to xdr_stream
2222 * @str: location to store pointer to string
2223 * @maxlen: maximum acceptable string length
2224 * @gfp_flags: GFP mask to use
2225 *
2226 * Return values:
2227 * On success, returns length of NUL-terminated string stored in *@ptr
2228 * %-EBADMSG on XDR buffer overflow
2229 * %-EMSGSIZE if the size of the string would exceed @maxlen
2230 * %-ENOMEM on memory allocation failure
2231 */
xdr_stream_decode_string_dup(struct xdr_stream * xdr,char ** str,size_t maxlen,gfp_t gfp_flags)2232 ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
2233 size_t maxlen, gfp_t gfp_flags)
2234 {
2235 void *p;
2236 ssize_t ret;
2237
2238 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
2239 if (ret > 0) {
2240 char *s = kmemdup_nul(p, ret, gfp_flags);
2241 if (s != NULL) {
2242 *str = s;
2243 return strlen(s);
2244 }
2245 ret = -ENOMEM;
2246 }
2247 *str = NULL;
2248 return ret;
2249 }
2250 EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);
2251
2252 /**
2253 * xdr_stream_decode_opaque_auth - Decode struct opaque_auth (RFC5531 S8.2)
2254 * @xdr: pointer to xdr_stream
2255 * @flavor: location to store decoded flavor
2256 * @body: location to store decode body
2257 * @body_len: location to store length of decoded body
2258 *
2259 * Return values:
2260 * On success, returns the number of buffer bytes consumed
2261 * %-EBADMSG on XDR buffer overflow
2262 * %-EMSGSIZE if the decoded size of the body field exceeds 400 octets
2263 */
xdr_stream_decode_opaque_auth(struct xdr_stream * xdr,u32 * flavor,void ** body,unsigned int * body_len)2264 ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor,
2265 void **body, unsigned int *body_len)
2266 {
2267 ssize_t ret, len;
2268
2269 len = xdr_stream_decode_u32(xdr, flavor);
2270 if (unlikely(len < 0))
2271 return len;
2272 ret = xdr_stream_decode_opaque_inline(xdr, body, RPC_MAX_AUTH_SIZE);
2273 if (unlikely(ret < 0))
2274 return ret;
2275 *body_len = ret;
2276 return len + ret;
2277 }
2278 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_auth);
2279
2280 /**
2281 * xdr_stream_encode_opaque_auth - Encode struct opaque_auth (RFC5531 S8.2)
2282 * @xdr: pointer to xdr_stream
2283 * @flavor: verifier flavor to encode
2284 * @body: content of body to encode
2285 * @body_len: length of body to encode
2286 *
2287 * Return values:
2288 * On success, returns length in bytes of XDR buffer consumed
2289 * %-EBADMSG on XDR buffer overflow
2290 * %-EMSGSIZE if the size of @body exceeds 400 octets
2291 */
xdr_stream_encode_opaque_auth(struct xdr_stream * xdr,u32 flavor,void * body,unsigned int body_len)2292 ssize_t xdr_stream_encode_opaque_auth(struct xdr_stream *xdr, u32 flavor,
2293 void *body, unsigned int body_len)
2294 {
2295 ssize_t ret, len;
2296
2297 if (unlikely(body_len > RPC_MAX_AUTH_SIZE))
2298 return -EMSGSIZE;
2299 len = xdr_stream_encode_u32(xdr, flavor);
2300 if (unlikely(len < 0))
2301 return len;
2302 ret = xdr_stream_encode_opaque(xdr, body, body_len);
2303 if (unlikely(ret < 0))
2304 return ret;
2305 return len + ret;
2306 }
2307 EXPORT_SYMBOL_GPL(xdr_stream_encode_opaque_auth);
2308