Lines Matching +full:- +full:i
1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/fault-inject-usercopy.h>
78 * fault_in_iov_iter_readable - fault in iov iterator for reading
79 * @i: iterator
88 * Always returns 0 for non-userspace iterators.
90 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_readable() argument
92 if (iter_is_ubuf(i)) { in fault_in_iov_iter_readable()
93 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable()
95 return size - n; in fault_in_iov_iter_readable()
96 } else if (iter_is_iovec(i)) { in fault_in_iov_iter_readable()
97 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
101 size -= count; in fault_in_iov_iter_readable()
102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_readable()
103 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_readable()
108 ret = fault_in_readable(p->iov_base + skip, len); in fault_in_iov_iter_readable()
109 count -= len - ret; in fault_in_iov_iter_readable()
120 * fault_in_iov_iter_writeable - fault in iov iterator for writing
121 * @i: iterator
124 * Faults in the iterator using get_user_pages(), i.e., without triggering
126 * some or all of the pages in @i aren't in memory.
131 * Always returns 0 for non-user-space iterators.
133 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_writeable() argument
135 if (iter_is_ubuf(i)) { in fault_in_iov_iter_writeable()
136 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_writeable()
138 return size - n; in fault_in_iov_iter_writeable()
139 } else if (iter_is_iovec(i)) { in fault_in_iov_iter_writeable()
140 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
144 size -= count; in fault_in_iov_iter_writeable()
145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_writeable()
146 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_writeable()
151 ret = fault_in_safe_writeable(p->iov_base + skip, len); in fault_in_iov_iter_writeable()
152 count -= len - ret; in fault_in_iov_iter_writeable()
162 void iov_iter_init(struct iov_iter *i, unsigned int direction, in iov_iter_init() argument
167 *i = (struct iov_iter) { in iov_iter_init()
179 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) in _copy_to_iter() argument
181 if (WARN_ON_ONCE(i->data_source)) in _copy_to_iter()
183 if (user_backed_iter(i)) in _copy_to_iter()
185 return iterate_and_advance(i, bytes, (void *)addr, in _copy_to_iter()
211 * _copy_mc_to_iter - copy to iter with source memory error exception handling
214 * @i: destination iterator
217 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
218 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
224 * byte-by-byte until the fault happens again. Re-triggering machine
226 * alignment and poison alignment assumptions to avoid re-triggering
234 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) in _copy_mc_to_iter() argument
236 if (WARN_ON_ONCE(i->data_source)) in _copy_mc_to_iter()
238 if (user_backed_iter(i)) in _copy_mc_to_iter()
240 return iterate_and_advance(i, bytes, (void *)addr, in _copy_mc_to_iter()
247 size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) in __copy_from_iter() argument
249 return iterate_and_advance(i, bytes, addr, in __copy_from_iter()
253 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter() argument
255 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter()
258 if (user_backed_iter(i)) in _copy_from_iter()
260 return __copy_from_iter(addr, bytes, i); in _copy_from_iter()
271 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter_nocache() argument
273 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_nocache()
276 return iterate_and_advance(i, bytes, addr, in _copy_from_iter_nocache()
299 * _copy_from_iter_flushcache - write destination through cpu cache
302 * @i: source iterator
304 * The pmem driver arranges for filesystem-dax to use this facility via
310 * instructions that strand dirty-data in the cache.
314 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter_flushcache() argument
316 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_flushcache()
319 return iterate_and_advance(i, bytes, addr, in _copy_from_iter_flushcache()
334 * However, we mostly deal with order-0 pages and thus can in page_copy_sane()
342 v += (page - head) << PAGE_SHIFT; in page_copy_sane()
350 struct iov_iter *i) in copy_page_to_iter() argument
355 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter()
361 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter()
362 n = _copy_to_iter(kaddr + offset, n, i); in copy_page_to_iter()
365 bytes -= n; in copy_page_to_iter()
379 struct iov_iter *i) in copy_page_to_iter_nofault() argument
385 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter_nofault()
391 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter_nofault()
393 n = iterate_and_advance(i, n, kaddr + offset, in copy_page_to_iter_nofault()
398 bytes -= n; in copy_page_to_iter_nofault()
412 struct iov_iter *i) in copy_page_from_iter() argument
421 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_from_iter()
422 n = _copy_from_iter(kaddr + offset, n, i); in copy_page_from_iter()
425 bytes -= n; in copy_page_from_iter()
453 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) in iov_iter_zero() argument
455 return iterate_and_advance(i, bytes, NULL, in iov_iter_zero()
461 size_t bytes, struct iov_iter *i) in copy_page_from_iter_atomic() argument
467 if (WARN_ON_ONCE(!i->data_source)) in copy_page_from_iter_atomic()
473 n = bytes - copied; in copy_page_from_iter_atomic()
477 n = min_t(size_t, n, PAGE_SIZE - offset); in copy_page_from_iter_atomic()
481 n = __copy_from_iter(p, n, i); in copy_page_from_iter_atomic()
491 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) in iov_iter_bvec_advance() argument
495 if (!i->count) in iov_iter_bvec_advance()
497 i->count -= size; in iov_iter_bvec_advance()
499 size += i->iov_offset; in iov_iter_bvec_advance()
501 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
502 if (likely(size < bvec->bv_len)) in iov_iter_bvec_advance()
504 size -= bvec->bv_len; in iov_iter_bvec_advance()
506 i->iov_offset = size; in iov_iter_bvec_advance()
507 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
508 i->bvec = bvec; in iov_iter_bvec_advance()
511 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) in iov_iter_iovec_advance() argument
515 if (!i->count) in iov_iter_iovec_advance()
517 i->count -= size; in iov_iter_iovec_advance()
519 size += i->iov_offset; // from beginning of current segment in iov_iter_iovec_advance()
520 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
521 if (likely(size < iov->iov_len)) in iov_iter_iovec_advance()
523 size -= iov->iov_len; in iov_iter_iovec_advance()
525 i->iov_offset = size; in iov_iter_iovec_advance()
526 i->nr_segs -= iov - iter_iov(i); in iov_iter_iovec_advance()
527 i->__iov = iov; in iov_iter_iovec_advance()
530 void iov_iter_advance(struct iov_iter *i, size_t size) in iov_iter_advance() argument
532 if (unlikely(i->count < size)) in iov_iter_advance()
533 size = i->count; in iov_iter_advance()
534 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { in iov_iter_advance()
535 i->iov_offset += size; in iov_iter_advance()
536 i->count -= size; in iov_iter_advance()
537 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { in iov_iter_advance()
539 iov_iter_iovec_advance(i, size); in iov_iter_advance()
540 } else if (iov_iter_is_bvec(i)) { in iov_iter_advance()
541 iov_iter_bvec_advance(i, size); in iov_iter_advance()
542 } else if (iov_iter_is_discard(i)) { in iov_iter_advance()
543 i->count -= size; in iov_iter_advance()
548 void iov_iter_revert(struct iov_iter *i, size_t unroll) in iov_iter_revert() argument
554 i->count += unroll; in iov_iter_revert()
555 if (unlikely(iov_iter_is_discard(i))) in iov_iter_revert()
557 if (unroll <= i->iov_offset) { in iov_iter_revert()
558 i->iov_offset -= unroll; in iov_iter_revert()
561 unroll -= i->iov_offset; in iov_iter_revert()
562 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { in iov_iter_revert()
567 } else if (iov_iter_is_bvec(i)) { in iov_iter_revert()
568 const struct bio_vec *bvec = i->bvec; in iov_iter_revert()
570 size_t n = (--bvec)->bv_len; in iov_iter_revert()
571 i->nr_segs++; in iov_iter_revert()
573 i->bvec = bvec; in iov_iter_revert()
574 i->iov_offset = n - unroll; in iov_iter_revert()
577 unroll -= n; in iov_iter_revert()
580 const struct iovec *iov = iter_iov(i); in iov_iter_revert()
582 size_t n = (--iov)->iov_len; in iov_iter_revert()
583 i->nr_segs++; in iov_iter_revert()
585 i->__iov = iov; in iov_iter_revert()
586 i->iov_offset = n - unroll; in iov_iter_revert()
589 unroll -= n; in iov_iter_revert()
598 size_t iov_iter_single_seg_count(const struct iov_iter *i) in iov_iter_single_seg_count() argument
600 if (i->nr_segs > 1) { in iov_iter_single_seg_count()
601 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_single_seg_count()
602 return min(i->count, iter_iov(i)->iov_len - i->iov_offset); in iov_iter_single_seg_count()
603 if (iov_iter_is_bvec(i)) in iov_iter_single_seg_count()
604 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count()
606 return i->count; in iov_iter_single_seg_count()
610 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, in iov_iter_kvec() argument
615 *i = (struct iov_iter){ in iov_iter_kvec()
626 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, in iov_iter_bvec() argument
631 *i = (struct iov_iter){ in iov_iter_bvec()
643 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
644 * @i: The iterator to initialise.
648 * @count: The size of the I/O buffer in bytes.
650 * Set up an I/O iterator to either draw data out of the pages attached to an
655 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, in iov_iter_xarray() argument
659 *i = (struct iov_iter) { in iov_iter_xarray()
671 * iov_iter_discard - Initialise an I/O iterator that discards data
672 * @i: The iterator to initialise.
674 * @count: The size of the I/O buffer in bytes.
676 * Set up an I/O iterator that just discards everything that's written to it.
679 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) in iov_iter_discard() argument
682 *i = (struct iov_iter){ in iov_iter_discard()
691 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, in iov_iter_aligned_iovec() argument
694 size_t size = i->count; in iov_iter_aligned_iovec()
695 size_t skip = i->iov_offset; in iov_iter_aligned_iovec()
698 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_aligned_iovec()
699 const struct iovec *iov = iter_iov(i) + k; in iov_iter_aligned_iovec()
700 size_t len = iov->iov_len - skip; in iov_iter_aligned_iovec()
706 if ((unsigned long)(iov->iov_base + skip) & addr_mask) in iov_iter_aligned_iovec()
709 size -= len; in iov_iter_aligned_iovec()
716 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, in iov_iter_aligned_bvec() argument
719 size_t size = i->count; in iov_iter_aligned_bvec()
720 unsigned skip = i->iov_offset; in iov_iter_aligned_bvec()
723 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_aligned_bvec()
724 size_t len = i->bvec[k].bv_len - skip; in iov_iter_aligned_bvec()
730 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) in iov_iter_aligned_bvec()
733 size -= len; in iov_iter_aligned_bvec()
741 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
744 * @i: &struct iov_iter to restore
750 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, in iov_iter_is_aligned() argument
753 if (likely(iter_is_ubuf(i))) { in iov_iter_is_aligned()
754 if (i->count & len_mask) in iov_iter_is_aligned()
756 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
761 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_is_aligned()
762 return iov_iter_aligned_iovec(i, addr_mask, len_mask); in iov_iter_is_aligned()
764 if (iov_iter_is_bvec(i)) in iov_iter_is_aligned()
765 return iov_iter_aligned_bvec(i, addr_mask, len_mask); in iov_iter_is_aligned()
767 if (iov_iter_is_xarray(i)) { in iov_iter_is_aligned()
768 if (i->count & len_mask) in iov_iter_is_aligned()
770 if ((i->xarray_start + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
778 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) in iov_iter_alignment_iovec() argument
781 size_t size = i->count; in iov_iter_alignment_iovec()
782 size_t skip = i->iov_offset; in iov_iter_alignment_iovec()
785 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_alignment_iovec()
786 const struct iovec *iov = iter_iov(i) + k; in iov_iter_alignment_iovec()
787 size_t len = iov->iov_len - skip; in iov_iter_alignment_iovec()
789 res |= (unsigned long)iov->iov_base + skip; in iov_iter_alignment_iovec()
793 size -= len; in iov_iter_alignment_iovec()
801 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) in iov_iter_alignment_bvec() argument
804 size_t size = i->count; in iov_iter_alignment_bvec()
805 unsigned skip = i->iov_offset; in iov_iter_alignment_bvec()
808 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_alignment_bvec()
809 size_t len = i->bvec[k].bv_len - skip; in iov_iter_alignment_bvec()
810 res |= (unsigned long)i->bvec[k].bv_offset + skip; in iov_iter_alignment_bvec()
814 size -= len; in iov_iter_alignment_bvec()
821 unsigned long iov_iter_alignment(const struct iov_iter *i) in iov_iter_alignment() argument
823 if (likely(iter_is_ubuf(i))) { in iov_iter_alignment()
824 size_t size = i->count; in iov_iter_alignment()
826 return ((unsigned long)i->ubuf + i->iov_offset) | size; in iov_iter_alignment()
831 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_alignment()
832 return iov_iter_alignment_iovec(i); in iov_iter_alignment()
834 if (iov_iter_is_bvec(i)) in iov_iter_alignment()
835 return iov_iter_alignment_bvec(i); in iov_iter_alignment()
837 if (iov_iter_is_xarray(i)) in iov_iter_alignment()
838 return (i->xarray_start + i->iov_offset) | i->count; in iov_iter_alignment()
844 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) in iov_iter_gap_alignment() argument
848 size_t size = i->count; in iov_iter_gap_alignment()
851 if (iter_is_ubuf(i)) in iov_iter_gap_alignment()
854 if (WARN_ON(!iter_is_iovec(i))) in iov_iter_gap_alignment()
857 for (k = 0; k < i->nr_segs; k++) { in iov_iter_gap_alignment()
858 const struct iovec *iov = iter_iov(i) + k; in iov_iter_gap_alignment()
859 if (iov->iov_len) { in iov_iter_gap_alignment()
860 unsigned long base = (unsigned long)iov->iov_base; in iov_iter_gap_alignment()
863 v = base + iov->iov_len; in iov_iter_gap_alignment()
864 if (size <= iov->iov_len) in iov_iter_gap_alignment()
866 size -= iov->iov_len; in iov_iter_gap_alignment()
916 static ssize_t iter_xarray_get_pages(struct iov_iter *i, in iter_xarray_get_pages() argument
924 pos = i->xarray_start + i->iov_offset; in iter_xarray_get_pages()
931 return -ENOMEM; in iter_xarray_get_pages()
932 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); in iter_xarray_get_pages()
936 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iter_xarray_get_pages()
937 i->iov_offset += maxsize; in iter_xarray_get_pages()
938 i->count -= maxsize; in iter_xarray_get_pages()
942 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
943 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) in first_iovec_segment() argument
948 if (iter_is_ubuf(i)) in first_iovec_segment()
949 return (unsigned long)i->ubuf + i->iov_offset; in first_iovec_segment()
951 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { in first_iovec_segment()
952 const struct iovec *iov = iter_iov(i) + k; in first_iovec_segment()
953 size_t len = iov->iov_len - skip; in first_iovec_segment()
959 return (unsigned long)iov->iov_base + skip; in first_iovec_segment()
964 /* must be done on non-empty ITER_BVEC one */
965 static struct page *first_bvec_segment(const struct iov_iter *i, in first_bvec_segment() argument
969 size_t skip = i->iov_offset, len; in first_bvec_segment()
971 len = i->bvec->bv_len - skip; in first_bvec_segment()
974 skip += i->bvec->bv_offset; in first_bvec_segment()
975 page = i->bvec->bv_page + skip / PAGE_SIZE; in first_bvec_segment()
980 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, in __iov_iter_get_pages_alloc() argument
986 if (maxsize > i->count) in __iov_iter_get_pages_alloc()
987 maxsize = i->count; in __iov_iter_get_pages_alloc()
993 if (likely(user_backed_iter(i))) { in __iov_iter_get_pages_alloc()
997 if (iov_iter_rw(i) != WRITE) in __iov_iter_get_pages_alloc()
999 if (i->nofault) in __iov_iter_get_pages_alloc()
1002 addr = first_iovec_segment(i, &maxsize); in __iov_iter_get_pages_alloc()
1007 return -ENOMEM; in __iov_iter_get_pages_alloc()
1011 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1012 iov_iter_advance(i, maxsize); in __iov_iter_get_pages_alloc()
1015 if (iov_iter_is_bvec(i)) { in __iov_iter_get_pages_alloc()
1019 page = first_bvec_segment(i, &maxsize, start); in __iov_iter_get_pages_alloc()
1022 return -ENOMEM; in __iov_iter_get_pages_alloc()
1026 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1027 i->count -= maxsize; in __iov_iter_get_pages_alloc()
1028 i->iov_offset += maxsize; in __iov_iter_get_pages_alloc()
1029 if (i->iov_offset == i->bvec->bv_len) { in __iov_iter_get_pages_alloc()
1030 i->iov_offset = 0; in __iov_iter_get_pages_alloc()
1031 i->bvec++; in __iov_iter_get_pages_alloc()
1032 i->nr_segs--; in __iov_iter_get_pages_alloc()
1036 if (iov_iter_is_xarray(i)) in __iov_iter_get_pages_alloc()
1037 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); in __iov_iter_get_pages_alloc()
1038 return -EFAULT; in __iov_iter_get_pages_alloc()
1041 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, in iov_iter_get_pages2() argument
1048 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); in iov_iter_get_pages2()
1052 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, in iov_iter_get_pages_alloc2() argument
1059 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); in iov_iter_get_pages_alloc2()
1068 static int iov_npages(const struct iov_iter *i, int maxpages) in iov_npages() argument
1070 size_t skip = i->iov_offset, size = i->count; in iov_npages()
1074 for (p = iter_iov(i); size; skip = 0, p++) { in iov_npages()
1075 unsigned offs = offset_in_page(p->iov_base + skip); in iov_npages()
1076 size_t len = min(p->iov_len - skip, size); in iov_npages()
1079 size -= len; in iov_npages()
1088 static int bvec_npages(const struct iov_iter *i, int maxpages) in bvec_npages() argument
1090 size_t skip = i->iov_offset, size = i->count; in bvec_npages()
1094 for (p = i->bvec; size; skip = 0, p++) { in bvec_npages()
1095 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; in bvec_npages()
1096 size_t len = min(p->bv_len - skip, size); in bvec_npages()
1098 size -= len; in bvec_npages()
1106 int iov_iter_npages(const struct iov_iter *i, int maxpages) in iov_iter_npages() argument
1108 if (unlikely(!i->count)) in iov_iter_npages()
1110 if (likely(iter_is_ubuf(i))) { in iov_iter_npages()
1111 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); in iov_iter_npages()
1112 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); in iov_iter_npages()
1116 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_npages()
1117 return iov_npages(i, maxpages); in iov_iter_npages()
1118 if (iov_iter_is_bvec(i)) in iov_iter_npages()
1119 return bvec_npages(i, maxpages); in iov_iter_npages()
1120 if (iov_iter_is_xarray(i)) { in iov_iter_npages()
1121 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; in iov_iter_npages()
1122 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1133 return new->bvec = kmemdup(new->bvec, in dup_iter()
1134 new->nr_segs * sizeof(struct bio_vec), in dup_iter()
1138 return new->__iov = kmemdup(new->__iov, in dup_iter()
1139 new->nr_segs * sizeof(struct iovec), in dup_iter()
1150 int ret = -EFAULT, i; in copy_compat_iovec_from_user() local
1153 return -EFAULT; in copy_compat_iovec_from_user()
1155 for (i = 0; i < nr_segs; i++) { in copy_compat_iovec_from_user()
1159 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); in copy_compat_iovec_from_user()
1160 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); in copy_compat_iovec_from_user()
1164 ret = -EINVAL; in copy_compat_iovec_from_user()
1167 iov[i].iov_base = compat_ptr(buf); in copy_compat_iovec_from_user()
1168 iov[i].iov_len = len; in copy_compat_iovec_from_user()
1180 int ret = -EFAULT; in copy_iovec_from_user()
1183 return -EFAULT; in copy_iovec_from_user()
1189 unsafe_get_user(len, &uiov->iov_len, uaccess_end); in copy_iovec_from_user()
1190 unsafe_get_user(buf, &uiov->iov_base, uaccess_end); in copy_iovec_from_user()
1194 ret = -EINVAL; in copy_iovec_from_user()
1197 iov->iov_base = buf; in copy_iovec_from_user()
1198 iov->iov_len = len; in copy_iovec_from_user()
1201 } while (--nr_segs); in copy_iovec_from_user()
1224 return ERR_PTR(-EINVAL); in iovec_from_user()
1228 return ERR_PTR(-ENOMEM); in iovec_from_user()
1248 struct iovec **iovp, struct iov_iter *i, in __import_iovec_ubuf() argument
1261 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); in __import_iovec_ubuf()
1265 return i->count; in __import_iovec_ubuf()
1270 struct iov_iter *i, bool compat) in __import_iovec() argument
1277 return __import_iovec_ubuf(type, uvec, iovp, i, compat); in __import_iovec()
1300 return -EFAULT; in __import_iovec()
1303 if (len > MAX_RW_COUNT - total_len) { in __import_iovec()
1304 len = MAX_RW_COUNT - total_len; in __import_iovec()
1310 iov_iter_init(i, type, iov, nr_segs, total_len); in __import_iovec()
1319 * import_iovec() - Copy an array of &struct iovec from userspace
1328 * on-stack) kernel array.
1329 * @i: Pointer to iterator that will be initialized on success.
1335 * on-stack array was used or not (and regardless of whether this function
1342 struct iovec **iovp, struct iov_iter *i) in import_iovec() argument
1344 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, in import_iovec()
1349 int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) in import_ubuf() argument
1354 return -EFAULT; in import_ubuf()
1356 iov_iter_ubuf(i, rw, buf, len); in import_ubuf()
1362 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1365 * @i: &struct iov_iter to restore
1368 * Used after iov_iter_save_state() to bring restore @i, if operations may
1373 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) in iov_iter_restore() argument
1375 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) && in iov_iter_restore()
1376 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i)) in iov_iter_restore()
1378 i->iov_offset = state->iov_offset; in iov_iter_restore()
1379 i->count = state->count; in iov_iter_restore()
1380 if (iter_is_ubuf(i)) in iov_iter_restore()
1383 * For the *vec iters, nr_segs + iov is constant - if we increment in iov_iter_restore()
1392 if (iov_iter_is_bvec(i)) in iov_iter_restore()
1393 i->bvec -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1395 i->__iov -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1396 i->nr_segs = state->nr_segs; in iov_iter_restore()
1403 static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i, in iov_iter_extract_xarray_pages() argument
1411 loff_t pos = i->xarray_start + i->iov_offset; in iov_iter_extract_xarray_pages()
1413 XA_STATE(xas, i->xarray, index); in iov_iter_extract_xarray_pages()
1420 return -ENOMEM; in iov_iter_extract_xarray_pages()
1440 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iov_iter_extract_xarray_pages()
1441 iov_iter_advance(i, maxsize); in iov_iter_extract_xarray_pages()
1449 static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i, in iov_iter_extract_bvec_pages() argument
1456 size_t skip = i->iov_offset, offset, size; in iov_iter_extract_bvec_pages()
1460 if (i->nr_segs == 0) in iov_iter_extract_bvec_pages()
1462 size = min(maxsize, i->bvec->bv_len - skip); in iov_iter_extract_bvec_pages()
1465 i->iov_offset = 0; in iov_iter_extract_bvec_pages()
1466 i->nr_segs--; in iov_iter_extract_bvec_pages()
1467 i->bvec++; in iov_iter_extract_bvec_pages()
1471 skip += i->bvec->bv_offset; in iov_iter_extract_bvec_pages()
1472 page = i->bvec->bv_page + skip / PAGE_SIZE; in iov_iter_extract_bvec_pages()
1478 return -ENOMEM; in iov_iter_extract_bvec_pages()
1483 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); in iov_iter_extract_bvec_pages()
1484 iov_iter_advance(i, size); in iov_iter_extract_bvec_pages()
1492 static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i, in iov_iter_extract_kvec_pages() argument
1500 size_t skip = i->iov_offset, offset, len, size; in iov_iter_extract_kvec_pages()
1504 if (i->nr_segs == 0) in iov_iter_extract_kvec_pages()
1506 size = min(maxsize, i->kvec->iov_len - skip); in iov_iter_extract_kvec_pages()
1509 i->iov_offset = 0; in iov_iter_extract_kvec_pages()
1510 i->nr_segs--; in iov_iter_extract_kvec_pages()
1511 i->kvec++; in iov_iter_extract_kvec_pages()
1515 kaddr = i->kvec->iov_base + skip; in iov_iter_extract_kvec_pages()
1521 return -ENOMEM; in iov_iter_extract_kvec_pages()
1524 kaddr -= offset; in iov_iter_extract_kvec_pages()
1535 len -= seg; in iov_iter_extract_kvec_pages()
1539 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); in iov_iter_extract_kvec_pages()
1540 iov_iter_advance(i, size); in iov_iter_extract_kvec_pages()
1546 * each of them. This should only be used if the iterator is user-backed
1556 static ssize_t iov_iter_extract_user_pages(struct iov_iter *i, in iov_iter_extract_user_pages() argument
1568 if (i->data_source == ITER_DEST) in iov_iter_extract_user_pages()
1572 if (i->nofault) in iov_iter_extract_user_pages()
1575 addr = first_iovec_segment(i, &maxsize); in iov_iter_extract_user_pages()
1580 return -ENOMEM; in iov_iter_extract_user_pages()
1584 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset); in iov_iter_extract_user_pages()
1585 iov_iter_advance(i, maxsize); in iov_iter_extract_user_pages()
1590 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1591 * @i: The iterator to extract from
1607 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1615 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1630 * It may also return -ENOMEM and -EFAULT.
1632 ssize_t iov_iter_extract_pages(struct iov_iter *i, in iov_iter_extract_pages() argument
1639 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); in iov_iter_extract_pages()
1643 if (likely(user_backed_iter(i))) in iov_iter_extract_pages()
1644 return iov_iter_extract_user_pages(i, pages, maxsize, in iov_iter_extract_pages()
1647 if (iov_iter_is_kvec(i)) in iov_iter_extract_pages()
1648 return iov_iter_extract_kvec_pages(i, pages, maxsize, in iov_iter_extract_pages()
1651 if (iov_iter_is_bvec(i)) in iov_iter_extract_pages()
1652 return iov_iter_extract_bvec_pages(i, pages, maxsize, in iov_iter_extract_pages()
1655 if (iov_iter_is_xarray(i)) in iov_iter_extract_pages()
1656 return iov_iter_extract_xarray_pages(i, pages, maxsize, in iov_iter_extract_pages()
1659 return -EFAULT; in iov_iter_extract_pages()