Lines Matching full:i
79 * @i: iterator
90 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_readable() argument
92 if (iter_is_ubuf(i)) { in fault_in_iov_iter_readable()
93 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable()
96 } else if (iter_is_iovec(i)) { in fault_in_iov_iter_readable()
97 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_readable()
121 * @i: iterator
124 * Faults in the iterator using get_user_pages(), i.e., without triggering
126 * some or all of the pages in @i aren't in memory.
133 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_writeable() argument
135 if (iter_is_ubuf(i)) { in fault_in_iov_iter_writeable()
136 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_writeable()
139 } else if (iter_is_iovec(i)) { in fault_in_iov_iter_writeable()
140 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_writeable()
162 void iov_iter_init(struct iov_iter *i, unsigned int direction, in iov_iter_init() argument
167 *i = (struct iov_iter) { in iov_iter_init()
179 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) in _copy_to_iter() argument
181 if (WARN_ON_ONCE(i->data_source)) in _copy_to_iter()
183 if (user_backed_iter(i)) in _copy_to_iter()
185 return iterate_and_advance(i, bytes, (void *)addr, in _copy_to_iter()
214 * @i: destination iterator
234 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) in _copy_mc_to_iter() argument
236 if (WARN_ON_ONCE(i->data_source)) in _copy_mc_to_iter()
238 if (user_backed_iter(i)) in _copy_mc_to_iter()
240 return iterate_and_advance(i, bytes, (void *)addr, in _copy_mc_to_iter()
247 size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) in __copy_from_iter() argument
249 return iterate_and_advance(i, bytes, addr, in __copy_from_iter()
253 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter() argument
255 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter()
258 if (user_backed_iter(i)) in _copy_from_iter()
260 return __copy_from_iter(addr, bytes, i); in _copy_from_iter()
271 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter_nocache() argument
273 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_nocache()
276 return iterate_and_advance(i, bytes, addr, in _copy_from_iter_nocache()
302 * @i: source iterator
314 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter_flushcache() argument
316 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_flushcache()
319 return iterate_and_advance(i, bytes, addr, in _copy_from_iter_flushcache()
350 struct iov_iter *i) in copy_page_to_iter() argument
355 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter()
362 n = _copy_to_iter(kaddr + offset, n, i); in copy_page_to_iter()
379 struct iov_iter *i) in copy_page_to_iter_nofault() argument
385 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter_nofault()
393 n = iterate_and_advance(i, n, kaddr + offset, in copy_page_to_iter_nofault()
412 struct iov_iter *i) in copy_page_from_iter() argument
422 n = _copy_from_iter(kaddr + offset, n, i); in copy_page_from_iter()
453 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) in iov_iter_zero() argument
455 return iterate_and_advance(i, bytes, NULL, in iov_iter_zero()
461 size_t bytes, struct iov_iter *i) in copy_page_from_iter_atomic() argument
467 if (WARN_ON_ONCE(!i->data_source)) in copy_page_from_iter_atomic()
481 n = __copy_from_iter(p, n, i); in copy_page_from_iter_atomic()
491 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) in iov_iter_bvec_advance() argument
495 if (!i->count) in iov_iter_bvec_advance()
497 i->count -= size; in iov_iter_bvec_advance()
499 size += i->iov_offset; in iov_iter_bvec_advance()
501 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
506 i->iov_offset = size; in iov_iter_bvec_advance()
507 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
508 i->bvec = bvec; in iov_iter_bvec_advance()
511 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) in iov_iter_iovec_advance() argument
515 if (!i->count) in iov_iter_iovec_advance()
517 i->count -= size; in iov_iter_iovec_advance()
519 size += i->iov_offset; // from beginning of current segment in iov_iter_iovec_advance()
520 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
525 i->iov_offset = size; in iov_iter_iovec_advance()
526 i->nr_segs -= iov - iter_iov(i); in iov_iter_iovec_advance()
527 i->__iov = iov; in iov_iter_iovec_advance()
530 void iov_iter_advance(struct iov_iter *i, size_t size) in iov_iter_advance() argument
532 if (unlikely(i->count < size)) in iov_iter_advance()
533 size = i->count; in iov_iter_advance()
534 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { in iov_iter_advance()
535 i->iov_offset += size; in iov_iter_advance()
536 i->count -= size; in iov_iter_advance()
537 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { in iov_iter_advance()
539 iov_iter_iovec_advance(i, size); in iov_iter_advance()
540 } else if (iov_iter_is_bvec(i)) { in iov_iter_advance()
541 iov_iter_bvec_advance(i, size); in iov_iter_advance()
542 } else if (iov_iter_is_discard(i)) { in iov_iter_advance()
543 i->count -= size; in iov_iter_advance()
548 void iov_iter_revert(struct iov_iter *i, size_t unroll) in iov_iter_revert() argument
554 i->count += unroll; in iov_iter_revert()
555 if (unlikely(iov_iter_is_discard(i))) in iov_iter_revert()
557 if (unroll <= i->iov_offset) { in iov_iter_revert()
558 i->iov_offset -= unroll; in iov_iter_revert()
561 unroll -= i->iov_offset; in iov_iter_revert()
562 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { in iov_iter_revert()
567 } else if (iov_iter_is_bvec(i)) { in iov_iter_revert()
568 const struct bio_vec *bvec = i->bvec; in iov_iter_revert()
571 i->nr_segs++; in iov_iter_revert()
573 i->bvec = bvec; in iov_iter_revert()
574 i->iov_offset = n - unroll; in iov_iter_revert()
580 const struct iovec *iov = iter_iov(i); in iov_iter_revert()
583 i->nr_segs++; in iov_iter_revert()
585 i->__iov = iov; in iov_iter_revert()
586 i->iov_offset = n - unroll; in iov_iter_revert()
598 size_t iov_iter_single_seg_count(const struct iov_iter *i) in iov_iter_single_seg_count() argument
600 if (i->nr_segs > 1) { in iov_iter_single_seg_count()
601 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_single_seg_count()
602 return min(i->count, iter_iov(i)->iov_len - i->iov_offset); in iov_iter_single_seg_count()
603 if (iov_iter_is_bvec(i)) in iov_iter_single_seg_count()
604 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count()
606 return i->count; in iov_iter_single_seg_count()
610 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, in iov_iter_kvec() argument
615 *i = (struct iov_iter){ in iov_iter_kvec()
626 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, in iov_iter_bvec() argument
631 *i = (struct iov_iter){ in iov_iter_bvec()
643 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
644 * @i: The iterator to initialise.
648 * @count: The size of the I/O buffer in bytes.
650 * Set up an I/O iterator to either draw data out of the pages attached to an
655 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, in iov_iter_xarray() argument
659 *i = (struct iov_iter) { in iov_iter_xarray()
671 * iov_iter_discard - Initialise an I/O iterator that discards data
672 * @i: The iterator to initialise.
674 * @count: The size of the I/O buffer in bytes.
676 * Set up an I/O iterator that just discards everything that's written to it.
679 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) in iov_iter_discard() argument
682 *i = (struct iov_iter){ in iov_iter_discard()
691 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, in iov_iter_aligned_iovec() argument
694 size_t size = i->count; in iov_iter_aligned_iovec()
695 size_t skip = i->iov_offset; in iov_iter_aligned_iovec()
698 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_aligned_iovec()
699 const struct iovec *iov = iter_iov(i) + k; in iov_iter_aligned_iovec()
716 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, in iov_iter_aligned_bvec() argument
719 size_t size = i->count; in iov_iter_aligned_bvec()
720 unsigned skip = i->iov_offset; in iov_iter_aligned_bvec()
723 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_aligned_bvec()
724 size_t len = i->bvec[k].bv_len - skip; in iov_iter_aligned_bvec()
730 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) in iov_iter_aligned_bvec()
744 * @i: &struct iov_iter to restore
750 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, in iov_iter_is_aligned() argument
753 if (likely(iter_is_ubuf(i))) { in iov_iter_is_aligned()
754 if (i->count & len_mask) in iov_iter_is_aligned()
756 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
761 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_is_aligned()
762 return iov_iter_aligned_iovec(i, addr_mask, len_mask); in iov_iter_is_aligned()
764 if (iov_iter_is_bvec(i)) in iov_iter_is_aligned()
765 return iov_iter_aligned_bvec(i, addr_mask, len_mask); in iov_iter_is_aligned()
767 if (iov_iter_is_xarray(i)) { in iov_iter_is_aligned()
768 if (i->count & len_mask) in iov_iter_is_aligned()
770 if ((i->xarray_start + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
778 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) in iov_iter_alignment_iovec() argument
781 size_t size = i->count; in iov_iter_alignment_iovec()
782 size_t skip = i->iov_offset; in iov_iter_alignment_iovec()
785 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_alignment_iovec()
786 const struct iovec *iov = iter_iov(i) + k; in iov_iter_alignment_iovec()
801 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) in iov_iter_alignment_bvec() argument
804 size_t size = i->count; in iov_iter_alignment_bvec()
805 unsigned skip = i->iov_offset; in iov_iter_alignment_bvec()
808 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_alignment_bvec()
809 size_t len = i->bvec[k].bv_len - skip; in iov_iter_alignment_bvec()
810 res |= (unsigned long)i->bvec[k].bv_offset + skip; in iov_iter_alignment_bvec()
821 unsigned long iov_iter_alignment(const struct iov_iter *i) in iov_iter_alignment() argument
823 if (likely(iter_is_ubuf(i))) { in iov_iter_alignment()
824 size_t size = i->count; in iov_iter_alignment()
826 return ((unsigned long)i->ubuf + i->iov_offset) | size; in iov_iter_alignment()
831 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_alignment()
832 return iov_iter_alignment_iovec(i); in iov_iter_alignment()
834 if (iov_iter_is_bvec(i)) in iov_iter_alignment()
835 return iov_iter_alignment_bvec(i); in iov_iter_alignment()
837 if (iov_iter_is_xarray(i)) in iov_iter_alignment()
838 return (i->xarray_start + i->iov_offset) | i->count; in iov_iter_alignment()
844 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) in iov_iter_gap_alignment() argument
848 size_t size = i->count; in iov_iter_gap_alignment()
851 if (iter_is_ubuf(i)) in iov_iter_gap_alignment()
854 if (WARN_ON(!iter_is_iovec(i))) in iov_iter_gap_alignment()
857 for (k = 0; k < i->nr_segs; k++) { in iov_iter_gap_alignment()
858 const struct iovec *iov = iter_iov(i) + k; in iov_iter_gap_alignment()
916 static ssize_t iter_xarray_get_pages(struct iov_iter *i, in iter_xarray_get_pages() argument
924 pos = i->xarray_start + i->iov_offset; in iter_xarray_get_pages()
932 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); in iter_xarray_get_pages()
937 i->iov_offset += maxsize; in iter_xarray_get_pages()
938 i->count -= maxsize; in iter_xarray_get_pages()
943 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) in first_iovec_segment() argument
948 if (iter_is_ubuf(i)) in first_iovec_segment()
949 return (unsigned long)i->ubuf + i->iov_offset; in first_iovec_segment()
951 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { in first_iovec_segment()
952 const struct iovec *iov = iter_iov(i) + k; in first_iovec_segment()
965 static struct page *first_bvec_segment(const struct iov_iter *i, in first_bvec_segment() argument
969 size_t skip = i->iov_offset, len; in first_bvec_segment()
971 len = i->bvec->bv_len - skip; in first_bvec_segment()
974 skip += i->bvec->bv_offset; in first_bvec_segment()
975 page = i->bvec->bv_page + skip / PAGE_SIZE; in first_bvec_segment()
980 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, in __iov_iter_get_pages_alloc() argument
986 if (maxsize > i->count) in __iov_iter_get_pages_alloc()
987 maxsize = i->count; in __iov_iter_get_pages_alloc()
993 if (likely(user_backed_iter(i))) { in __iov_iter_get_pages_alloc()
997 if (iov_iter_rw(i) != WRITE) in __iov_iter_get_pages_alloc()
999 if (i->nofault) in __iov_iter_get_pages_alloc()
1002 addr = first_iovec_segment(i, &maxsize); in __iov_iter_get_pages_alloc()
1012 iov_iter_advance(i, maxsize); in __iov_iter_get_pages_alloc()
1015 if (iov_iter_is_bvec(i)) { in __iov_iter_get_pages_alloc()
1019 page = first_bvec_segment(i, &maxsize, start); in __iov_iter_get_pages_alloc()
1027 i->count -= maxsize; in __iov_iter_get_pages_alloc()
1028 i->iov_offset += maxsize; in __iov_iter_get_pages_alloc()
1029 if (i->iov_offset == i->bvec->bv_len) { in __iov_iter_get_pages_alloc()
1030 i->iov_offset = 0; in __iov_iter_get_pages_alloc()
1031 i->bvec++; in __iov_iter_get_pages_alloc()
1032 i->nr_segs--; in __iov_iter_get_pages_alloc()
1036 if (iov_iter_is_xarray(i)) in __iov_iter_get_pages_alloc()
1037 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); in __iov_iter_get_pages_alloc()
1041 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, in iov_iter_get_pages2() argument
1048 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); in iov_iter_get_pages2()
1052 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, in iov_iter_get_pages_alloc2() argument
1059 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); in iov_iter_get_pages_alloc2()
1068 static int iov_npages(const struct iov_iter *i, int maxpages) in iov_npages() argument
1070 size_t skip = i->iov_offset, size = i->count; in iov_npages()
1074 for (p = iter_iov(i); size; skip = 0, p++) { in iov_npages()
1088 static int bvec_npages(const struct iov_iter *i, int maxpages) in bvec_npages() argument
1090 size_t skip = i->iov_offset, size = i->count; in bvec_npages()
1094 for (p = i->bvec; size; skip = 0, p++) { in bvec_npages()
1106 int iov_iter_npages(const struct iov_iter *i, int maxpages) in iov_iter_npages() argument
1108 if (unlikely(!i->count)) in iov_iter_npages()
1110 if (likely(iter_is_ubuf(i))) { in iov_iter_npages()
1111 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); in iov_iter_npages()
1112 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); in iov_iter_npages()
1116 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_npages()
1117 return iov_npages(i, maxpages); in iov_iter_npages()
1118 if (iov_iter_is_bvec(i)) in iov_iter_npages()
1119 return bvec_npages(i, maxpages); in iov_iter_npages()
1120 if (iov_iter_is_xarray(i)) { in iov_iter_npages()
1121 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; in iov_iter_npages()
1122 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1150 int ret = -EFAULT, i; in copy_compat_iovec_from_user() local
1155 for (i = 0; i < nr_segs; i++) { in copy_compat_iovec_from_user()
1159 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); in copy_compat_iovec_from_user()
1160 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); in copy_compat_iovec_from_user()
1167 iov[i].iov_base = compat_ptr(buf); in copy_compat_iovec_from_user()
1168 iov[i].iov_len = len; in copy_compat_iovec_from_user()
1248 struct iovec **iovp, struct iov_iter *i, in __import_iovec_ubuf() argument
1261 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); in __import_iovec_ubuf()
1265 return i->count; in __import_iovec_ubuf()
1270 struct iov_iter *i, bool compat) in __import_iovec() argument
1277 return __import_iovec_ubuf(type, uvec, iovp, i, compat); in __import_iovec()
1310 iov_iter_init(i, type, iov, nr_segs, total_len); in __import_iovec()
1329 * @i: Pointer to iterator that will be initialized on success.
1342 struct iovec **iovp, struct iov_iter *i) in import_iovec() argument
1344 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, in import_iovec()
1349 int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) in import_ubuf() argument
1356 iov_iter_ubuf(i, rw, buf, len); in import_ubuf()
1365 * @i: &struct iov_iter to restore
1368 * Used after iov_iter_save_state() to bring restore @i, if operations may
1373 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) in iov_iter_restore() argument
1375 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) && in iov_iter_restore()
1376 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i)) in iov_iter_restore()
1378 i->iov_offset = state->iov_offset; in iov_iter_restore()
1379 i->count = state->count; in iov_iter_restore()
1380 if (iter_is_ubuf(i)) in iov_iter_restore()
1392 if (iov_iter_is_bvec(i)) in iov_iter_restore()
1393 i->bvec -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1395 i->__iov -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1396 i->nr_segs = state->nr_segs; in iov_iter_restore()
1403 static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i, in iov_iter_extract_xarray_pages() argument
1411 loff_t pos = i->xarray_start + i->iov_offset; in iov_iter_extract_xarray_pages()
1413 XA_STATE(xas, i->xarray, index); in iov_iter_extract_xarray_pages()
1441 iov_iter_advance(i, maxsize); in iov_iter_extract_xarray_pages()
1449 static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i, in iov_iter_extract_bvec_pages() argument
1456 size_t skip = i->iov_offset, offset, size; in iov_iter_extract_bvec_pages()
1460 if (i->nr_segs == 0) in iov_iter_extract_bvec_pages()
1462 size = min(maxsize, i->bvec->bv_len - skip); in iov_iter_extract_bvec_pages()
1465 i->iov_offset = 0; in iov_iter_extract_bvec_pages()
1466 i->nr_segs--; in iov_iter_extract_bvec_pages()
1467 i->bvec++; in iov_iter_extract_bvec_pages()
1471 skip += i->bvec->bv_offset; in iov_iter_extract_bvec_pages()
1472 page = i->bvec->bv_page + skip / PAGE_SIZE; in iov_iter_extract_bvec_pages()
1484 iov_iter_advance(i, size); in iov_iter_extract_bvec_pages()
1492 static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i, in iov_iter_extract_kvec_pages() argument
1500 size_t skip = i->iov_offset, offset, len, size; in iov_iter_extract_kvec_pages()
1504 if (i->nr_segs == 0) in iov_iter_extract_kvec_pages()
1506 size = min(maxsize, i->kvec->iov_len - skip); in iov_iter_extract_kvec_pages()
1509 i->iov_offset = 0; in iov_iter_extract_kvec_pages()
1510 i->nr_segs--; in iov_iter_extract_kvec_pages()
1511 i->kvec++; in iov_iter_extract_kvec_pages()
1515 kaddr = i->kvec->iov_base + skip; in iov_iter_extract_kvec_pages()
1540 iov_iter_advance(i, size); in iov_iter_extract_kvec_pages()
1556 static ssize_t iov_iter_extract_user_pages(struct iov_iter *i, in iov_iter_extract_user_pages() argument
1568 if (i->data_source == ITER_DEST) in iov_iter_extract_user_pages()
1572 if (i->nofault) in iov_iter_extract_user_pages()
1575 addr = first_iovec_segment(i, &maxsize); in iov_iter_extract_user_pages()
1585 iov_iter_advance(i, maxsize); in iov_iter_extract_user_pages()
1591 * @i: The iterator to extract from
1632 ssize_t iov_iter_extract_pages(struct iov_iter *i, in iov_iter_extract_pages() argument
1639 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); in iov_iter_extract_pages()
1643 if (likely(user_backed_iter(i))) in iov_iter_extract_pages()
1644 return iov_iter_extract_user_pages(i, pages, maxsize, in iov_iter_extract_pages()
1647 if (iov_iter_is_kvec(i)) in iov_iter_extract_pages()
1648 return iov_iter_extract_kvec_pages(i, pages, maxsize, in iov_iter_extract_pages()
1651 if (iov_iter_is_bvec(i)) in iov_iter_extract_pages()
1652 return iov_iter_extract_bvec_pages(i, pages, maxsize, in iov_iter_extract_pages()
1655 if (iov_iter_is_xarray(i)) in iov_iter_extract_pages()
1656 return iov_iter_extract_xarray_pages(i, pages, maxsize, in iov_iter_extract_pages()