Lines Matching +full:page +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/fault-inject-usercopy.h>
78 * fault_in_iov_iter_readable - fault in iov iterator for reading
80 * @size: maximum length
83 * @size. For each iovec, fault in each page that constitutes the iovec.
88 * Always returns 0 for non-userspace iterators.
90 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_readable() argument
93 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable()
95 return size - n; in fault_in_iov_iter_readable()
97 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
101 size -= count; in fault_in_iov_iter_readable()
102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_readable()
103 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_readable()
108 ret = fault_in_readable(p->iov_base + skip, len); in fault_in_iov_iter_readable()
109 count -= len - ret; in fault_in_iov_iter_readable()
113 return count + size; in fault_in_iov_iter_readable()
120 * fault_in_iov_iter_writeable - fault in iov iterator for writing
122 * @size: maximum length
125 * hardware page faults. This is primarily useful when we already know that
131 * Always returns 0 for non-user-space iterators.
133 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_writeable() argument
136 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_writeable()
138 return size - n; in fault_in_iov_iter_writeable()
140 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
144 size -= count; in fault_in_iov_iter_writeable()
145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_writeable()
146 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_writeable()
151 ret = fault_in_safe_writeable(p->iov_base + skip, len); in fault_in_iov_iter_writeable()
152 count -= len - ret; in fault_in_iov_iter_writeable()
156 return count + size; in fault_in_iov_iter_writeable()
181 if (WARN_ON_ONCE(i->data_source)) in _copy_to_iter()
211 * _copy_mc_to_iter - copy to iter with source memory error exception handling
217 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
218 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
224 * byte-by-byte until the fault happens again. Re-triggering machine
226 * alignment and poison alignment assumptions to avoid re-triggering
236 if (WARN_ON_ONCE(i->data_source)) in _copy_mc_to_iter()
255 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter()
273 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_nocache()
299 * _copy_from_iter_flushcache - write destination through cpu cache
304 * The pmem driver arranges for filesystem-dax to use this facility via
310 * instructions that strand dirty-data in the cache.
316 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_flushcache()
326 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) in page_copy_sane() argument
328 struct page *head; in page_copy_sane()
332 * The general case needs to access the page order in order in page_copy_sane()
333 * to compute the page size. in page_copy_sane()
334 * However, we mostly deal with order-0 pages and thus can in page_copy_sane()
336 * page orders. in page_copy_sane()
341 head = compound_head(page); in page_copy_sane()
342 v += (page - head) << PAGE_SHIFT; in page_copy_sane()
349 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, in copy_page_to_iter() argument
353 if (!page_copy_sane(page, offset, bytes)) in copy_page_to_iter()
355 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter()
357 page += offset / PAGE_SIZE; // first subpage in copy_page_to_iter()
360 void *kaddr = kmap_local_page(page); in copy_page_to_iter()
361 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter()
365 bytes -= n; in copy_page_to_iter()
370 page++; in copy_page_to_iter()
378 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes, in copy_page_to_iter_nofault() argument
383 if (!page_copy_sane(page, offset, bytes)) in copy_page_to_iter_nofault()
385 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter_nofault()
387 page += offset / PAGE_SIZE; // first subpage in copy_page_to_iter_nofault()
390 void *kaddr = kmap_local_page(page); in copy_page_to_iter_nofault()
391 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter_nofault()
398 bytes -= n; in copy_page_to_iter_nofault()
403 page++; in copy_page_to_iter_nofault()
411 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, in copy_page_from_iter() argument
415 if (!page_copy_sane(page, offset, bytes)) in copy_page_from_iter()
417 page += offset / PAGE_SIZE; // first subpage in copy_page_from_iter()
420 void *kaddr = kmap_local_page(page); in copy_page_from_iter()
421 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_from_iter()
425 bytes -= n; in copy_page_from_iter()
430 page++; in copy_page_from_iter()
460 size_t copy_page_from_iter_atomic(struct page *page, size_t offset, in copy_page_from_iter_atomic() argument
465 PageHighMem(page); in copy_page_from_iter_atomic()
467 if (!page_copy_sane(page, offset, bytes)) in copy_page_from_iter_atomic()
469 if (WARN_ON_ONCE(!i->data_source)) in copy_page_from_iter_atomic()
475 n = bytes - copied; in copy_page_from_iter_atomic()
477 page += offset / PAGE_SIZE; in copy_page_from_iter_atomic()
479 n = min_t(size_t, n, PAGE_SIZE - offset); in copy_page_from_iter_atomic()
482 p = kmap_atomic(page) + offset; in copy_page_from_iter_atomic()
493 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) in iov_iter_bvec_advance() argument
497 if (!i->count) in iov_iter_bvec_advance()
499 i->count -= size; in iov_iter_bvec_advance()
501 size += i->iov_offset; in iov_iter_bvec_advance()
503 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
504 if (likely(size < bvec->bv_len)) in iov_iter_bvec_advance()
506 size -= bvec->bv_len; in iov_iter_bvec_advance()
508 i->iov_offset = size; in iov_iter_bvec_advance()
509 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
510 i->bvec = bvec; in iov_iter_bvec_advance()
513 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) in iov_iter_iovec_advance() argument
517 if (!i->count) in iov_iter_iovec_advance()
519 i->count -= size; in iov_iter_iovec_advance()
521 size += i->iov_offset; // from beginning of current segment in iov_iter_iovec_advance()
522 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
523 if (likely(size < iov->iov_len)) in iov_iter_iovec_advance()
525 size -= iov->iov_len; in iov_iter_iovec_advance()
527 i->iov_offset = size; in iov_iter_iovec_advance()
528 i->nr_segs -= iov - iter_iov(i); in iov_iter_iovec_advance()
529 i->__iov = iov; in iov_iter_iovec_advance()
532 static void iov_iter_folioq_advance(struct iov_iter *i, size_t size) in iov_iter_folioq_advance() argument
534 const struct folio_queue *folioq = i->folioq; in iov_iter_folioq_advance()
535 unsigned int slot = i->folioq_slot; in iov_iter_folioq_advance()
537 if (!i->count) in iov_iter_folioq_advance()
539 i->count -= size; in iov_iter_folioq_advance()
542 folioq = folioq->next; in iov_iter_folioq_advance()
546 size += i->iov_offset; /* From beginning of current segment. */ in iov_iter_folioq_advance()
550 if (likely(size < fsize)) in iov_iter_folioq_advance()
552 size -= fsize; in iov_iter_folioq_advance()
554 if (slot >= folioq_nr_slots(folioq) && folioq->next) { in iov_iter_folioq_advance()
555 folioq = folioq->next; in iov_iter_folioq_advance()
558 } while (size); in iov_iter_folioq_advance()
560 i->iov_offset = size; in iov_iter_folioq_advance()
561 i->folioq_slot = slot; in iov_iter_folioq_advance()
562 i->folioq = folioq; in iov_iter_folioq_advance()
565 void iov_iter_advance(struct iov_iter *i, size_t size) in iov_iter_advance() argument
567 if (unlikely(i->count < size)) in iov_iter_advance()
568 size = i->count; in iov_iter_advance()
570 i->iov_offset += size; in iov_iter_advance()
571 i->count -= size; in iov_iter_advance()
574 iov_iter_iovec_advance(i, size); in iov_iter_advance()
576 iov_iter_bvec_advance(i, size); in iov_iter_advance()
578 iov_iter_folioq_advance(i, size); in iov_iter_advance()
580 i->count -= size; in iov_iter_advance()
587 const struct folio_queue *folioq = i->folioq; in iov_iter_folioq_revert()
588 unsigned int slot = i->folioq_slot; in iov_iter_folioq_revert()
594 folioq = folioq->prev; in iov_iter_folioq_revert()
597 slot--; in iov_iter_folioq_revert()
601 i->iov_offset = fsize - unroll; in iov_iter_folioq_revert()
604 unroll -= fsize; in iov_iter_folioq_revert()
607 i->folioq_slot = slot; in iov_iter_folioq_revert()
608 i->folioq = folioq; in iov_iter_folioq_revert()
617 i->count += unroll; in iov_iter_revert()
620 if (unroll <= i->iov_offset) { in iov_iter_revert()
621 i->iov_offset -= unroll; in iov_iter_revert()
624 unroll -= i->iov_offset; in iov_iter_revert()
631 const struct bio_vec *bvec = i->bvec; in iov_iter_revert()
633 size_t n = (--bvec)->bv_len; in iov_iter_revert()
634 i->nr_segs++; in iov_iter_revert()
636 i->bvec = bvec; in iov_iter_revert()
637 i->iov_offset = n - unroll; in iov_iter_revert()
640 unroll -= n; in iov_iter_revert()
643 i->iov_offset = 0; in iov_iter_revert()
648 size_t n = (--iov)->iov_len; in iov_iter_revert()
649 i->nr_segs++; in iov_iter_revert()
651 i->__iov = iov; in iov_iter_revert()
652 i->iov_offset = n - unroll; in iov_iter_revert()
655 unroll -= n; in iov_iter_revert()
666 if (i->nr_segs > 1) { in iov_iter_single_seg_count()
668 return min(i->count, iter_iov(i)->iov_len - i->iov_offset); in iov_iter_single_seg_count()
670 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count()
673 return !i->count ? 0 : in iov_iter_single_seg_count()
674 umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count); in iov_iter_single_seg_count()
675 return i->count; in iov_iter_single_seg_count()
712 * iov_iter_folio_queue - Initialise an I/O iterator to use the folios in a folio queue
718 * @count: The size of the I/O buffer in bytes.
742 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
747 * @count: The size of the I/O buffer in bytes.
770 * iov_iter_discard - Initialise an I/O iterator that discards data
773 * @count: The size of the I/O buffer in bytes.
794 size_t size = i->count; in iov_iter_aligned_iovec() local
795 size_t skip = i->iov_offset; in iov_iter_aligned_iovec()
798 size_t len = iov->iov_len - skip; in iov_iter_aligned_iovec()
800 if (len > size) in iov_iter_aligned_iovec()
801 len = size; in iov_iter_aligned_iovec()
804 if ((unsigned long)(iov->iov_base + skip) & addr_mask) in iov_iter_aligned_iovec()
808 size -= len; in iov_iter_aligned_iovec()
810 } while (size); in iov_iter_aligned_iovec()
818 const struct bio_vec *bvec = i->bvec; in iov_iter_aligned_bvec()
819 unsigned skip = i->iov_offset; in iov_iter_aligned_bvec()
820 size_t size = i->count; in iov_iter_aligned_bvec() local
823 size_t len = bvec->bv_len; in iov_iter_aligned_bvec()
825 if (len > size) in iov_iter_aligned_bvec()
826 len = size; in iov_iter_aligned_bvec()
829 if ((unsigned long)(bvec->bv_offset + skip) & addr_mask) in iov_iter_aligned_bvec()
833 size -= len; in iov_iter_aligned_bvec()
835 } while (size); in iov_iter_aligned_bvec()
841 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
854 if (i->count & len_mask) in iov_iter_is_aligned()
856 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
869 if (i->count & len_mask) in iov_iter_is_aligned()
871 if ((i->xarray_start + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
875 if (i->count & len_mask) in iov_iter_is_aligned()
877 if (i->iov_offset & addr_mask) in iov_iter_is_aligned()
889 size_t size = i->count; in iov_iter_alignment_iovec() local
890 size_t skip = i->iov_offset; in iov_iter_alignment_iovec()
893 size_t len = iov->iov_len - skip; in iov_iter_alignment_iovec()
895 res |= (unsigned long)iov->iov_base + skip; in iov_iter_alignment_iovec()
896 if (len > size) in iov_iter_alignment_iovec()
897 len = size; in iov_iter_alignment_iovec()
899 size -= len; in iov_iter_alignment_iovec()
903 } while (size); in iov_iter_alignment_iovec()
909 const struct bio_vec *bvec = i->bvec; in iov_iter_alignment_bvec()
911 size_t size = i->count; in iov_iter_alignment_bvec() local
912 unsigned skip = i->iov_offset; in iov_iter_alignment_bvec()
915 size_t len = bvec->bv_len - skip; in iov_iter_alignment_bvec()
916 res |= (unsigned long)bvec->bv_offset + skip; in iov_iter_alignment_bvec()
917 if (len > size) in iov_iter_alignment_bvec()
918 len = size; in iov_iter_alignment_bvec()
921 size -= len; in iov_iter_alignment_bvec()
923 } while (size); in iov_iter_alignment_bvec()
931 size_t size = i->count; in iov_iter_alignment() local
932 if (size) in iov_iter_alignment()
933 return ((unsigned long)i->ubuf + i->iov_offset) | size; in iov_iter_alignment()
946 return i->iov_offset | i->count; in iov_iter_alignment()
948 return (i->xarray_start + i->iov_offset) | i->count; in iov_iter_alignment()
958 size_t size = i->count; in iov_iter_gap_alignment() local
967 for (k = 0; k < i->nr_segs; k++) { in iov_iter_gap_alignment()
969 if (iov->iov_len) { in iov_iter_gap_alignment()
970 unsigned long base = (unsigned long)iov->iov_base; in iov_iter_gap_alignment()
973 v = base + iov->iov_len; in iov_iter_gap_alignment()
974 if (size <= iov->iov_len) in iov_iter_gap_alignment()
976 size -= iov->iov_len; in iov_iter_gap_alignment()
983 static int want_pages_array(struct page ***res, size_t size, in want_pages_array() argument
986 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE); in want_pages_array()
992 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in want_pages_array()
1000 struct page ***ppages, size_t maxsize, in iter_folioq_get_pages()
1003 const struct folio_queue *folioq = iter->folioq; in iter_folioq_get_pages()
1004 struct page **pages; in iter_folioq_get_pages()
1005 unsigned int slot = iter->folioq_slot; in iter_folioq_get_pages()
1006 size_t extracted = 0, count = iter->count, iov_offset = iter->iov_offset; in iter_folioq_get_pages()
1009 folioq = folioq->next; in iter_folioq_get_pages()
1012 return -EIO; in iter_folioq_get_pages()
1017 return -ENOMEM; in iter_folioq_get_pages()
1024 size_t part = PAGE_SIZE - offset % PAGE_SIZE; in iter_folioq_get_pages()
1027 part = umin(part, umin(maxsize - extracted, fsize - offset)); in iter_folioq_get_pages()
1028 count -= part; in iter_folioq_get_pages()
1035 maxpages--; in iter_folioq_get_pages()
1044 if (slot == folioq_nr_slots(folioq) && folioq->next) { in iter_folioq_get_pages()
1045 folioq = folioq->next; in iter_folioq_get_pages()
1051 iter->count = count; in iter_folioq_get_pages()
1052 iter->iov_offset = iov_offset; in iter_folioq_get_pages()
1053 iter->folioq = folioq; in iter_folioq_get_pages()
1054 iter->folioq_slot = slot; in iter_folioq_get_pages()
1058 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, in iter_xarray_populate_pages()
1062 struct page *page; in iter_xarray_populate_pages() local
1066 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in iter_xarray_populate_pages()
1067 if (xas_retry(&xas, page)) in iter_xarray_populate_pages()
1070 /* Has the page moved or been split? */ in iter_xarray_populate_pages()
1071 if (unlikely(page != xas_reload(&xas))) { in iter_xarray_populate_pages()
1076 pages[ret] = find_subpage(page, xas.xa_index); in iter_xarray_populate_pages()
1086 struct page ***pages, size_t maxsize, in iter_xarray_get_pages()
1093 pos = i->xarray_start + i->iov_offset; in iter_xarray_get_pages()
1100 return -ENOMEM; in iter_xarray_get_pages()
1101 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); in iter_xarray_get_pages()
1105 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iter_xarray_get_pages()
1106 i->iov_offset += maxsize; in iter_xarray_get_pages()
1107 i->count -= maxsize; in iter_xarray_get_pages()
1111 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1112 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) in first_iovec_segment() argument
1118 return (unsigned long)i->ubuf + i->iov_offset; in first_iovec_segment()
1120 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { in first_iovec_segment()
1122 size_t len = iov->iov_len - skip; in first_iovec_segment()
1126 if (*size > len) in first_iovec_segment()
1127 *size = len; in first_iovec_segment()
1128 return (unsigned long)iov->iov_base + skip; in first_iovec_segment()
1133 /* must be done on non-empty ITER_BVEC one */
1134 static struct page *first_bvec_segment(const struct iov_iter *i, in first_bvec_segment()
1135 size_t *size, size_t *start) in first_bvec_segment() argument
1137 struct page *page; in first_bvec_segment() local
1138 size_t skip = i->iov_offset, len; in first_bvec_segment()
1140 len = i->bvec->bv_len - skip; in first_bvec_segment()
1141 if (*size > len) in first_bvec_segment()
1142 *size = len; in first_bvec_segment()
1143 skip += i->bvec->bv_offset; in first_bvec_segment()
1144 page = i->bvec->bv_page + skip / PAGE_SIZE; in first_bvec_segment()
1146 return page; in first_bvec_segment()
1150 struct page ***pages, size_t maxsize, in __iov_iter_get_pages_alloc()
1155 if (maxsize > i->count) in __iov_iter_get_pages_alloc()
1156 maxsize = i->count; in __iov_iter_get_pages_alloc()
1168 if (i->nofault) in __iov_iter_get_pages_alloc()
1176 return -ENOMEM; in __iov_iter_get_pages_alloc()
1180 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1185 struct page **p; in __iov_iter_get_pages_alloc()
1186 struct page *page; in __iov_iter_get_pages_alloc() local
1188 page = first_bvec_segment(i, &maxsize, start); in __iov_iter_get_pages_alloc()
1191 return -ENOMEM; in __iov_iter_get_pages_alloc()
1194 get_page(p[k] = page + k); in __iov_iter_get_pages_alloc()
1195 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1196 i->count -= maxsize; in __iov_iter_get_pages_alloc()
1197 i->iov_offset += maxsize; in __iov_iter_get_pages_alloc()
1198 if (i->iov_offset == i->bvec->bv_len) { in __iov_iter_get_pages_alloc()
1199 i->iov_offset = 0; in __iov_iter_get_pages_alloc()
1200 i->bvec++; in __iov_iter_get_pages_alloc()
1201 i->nr_segs--; in __iov_iter_get_pages_alloc()
1209 return -EFAULT; in __iov_iter_get_pages_alloc()
1212 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, in iov_iter_get_pages2()
1224 struct page ***pages, size_t maxsize, size_t *start) in iov_iter_get_pages_alloc2()
1241 size_t skip = i->iov_offset, size = i->count; in iov_npages() local
1245 for (p = iter_iov(i); size; skip = 0, p++) { in iov_npages()
1246 unsigned offs = offset_in_page(p->iov_base + skip); in iov_npages()
1247 size_t len = min(p->iov_len - skip, size); in iov_npages()
1250 size -= len; in iov_npages()
1261 size_t skip = i->iov_offset, size = i->count; in bvec_npages() local
1265 for (p = i->bvec; size; skip = 0, p++) { in bvec_npages()
1266 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; in bvec_npages()
1267 size_t len = min(p->bv_len - skip, size); in bvec_npages()
1269 size -= len; in bvec_npages()
1279 if (unlikely(!i->count)) in iov_iter_npages()
1282 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); in iov_iter_npages()
1283 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); in iov_iter_npages()
1292 unsigned offset = i->iov_offset % PAGE_SIZE; in iov_iter_npages()
1293 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1297 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; in iov_iter_npages()
1298 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1309 return new->bvec = kmemdup(new->bvec, in dup_iter()
1310 new->nr_segs * sizeof(struct bio_vec), in dup_iter()
1314 return new->__iov = kmemdup(new->__iov, in dup_iter()
1315 new->nr_segs * sizeof(struct iovec), in dup_iter()
1326 int ret = -EFAULT; in copy_compat_iovec_from_user()
1330 return -EFAULT; in copy_compat_iovec_from_user()
1341 ret = -EINVAL; in copy_compat_iovec_from_user()
1357 int ret = -EFAULT; in copy_iovec_from_user()
1360 return -EFAULT; in copy_iovec_from_user()
1366 unsafe_get_user(len, &uiov->iov_len, uaccess_end); in copy_iovec_from_user()
1367 unsafe_get_user(buf, &uiov->iov_base, uaccess_end); in copy_iovec_from_user()
1371 ret = -EINVAL; in copy_iovec_from_user()
1374 iov->iov_base = buf; in copy_iovec_from_user()
1375 iov->iov_len = len; in copy_iovec_from_user()
1378 } while (--nr_segs); in copy_iovec_from_user()
1401 return ERR_PTR(-EINVAL); in iovec_from_user()
1405 return ERR_PTR(-ENOMEM); in iovec_from_user()
1438 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); in __import_iovec_ubuf()
1442 return i->count; in __import_iovec_ubuf()
1477 return -EFAULT; in __import_iovec()
1480 if (len > MAX_RW_COUNT - total_len) { in __import_iovec()
1481 len = MAX_RW_COUNT - total_len; in __import_iovec()
1496 * import_iovec() - Copy an array of &struct iovec from userspace
1505 * on-stack) kernel array.
1512 * on-stack array was used or not (and regardless of whether this function
1531 return -EFAULT; in import_ubuf()
1539 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1555 i->iov_offset = state->iov_offset; in iov_iter_restore()
1556 i->count = state->count; in iov_iter_restore()
1560 * For the *vec iters, nr_segs + iov is constant - if we increment in iov_iter_restore()
1564 * size, so we can just increment the iov pointer as they are unionzed. in iov_iter_restore()
1565 * ITER_BVEC _may_ be the same size on some archs, but on others it is in iov_iter_restore()
1570 i->bvec -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1572 i->__iov -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1573 i->nr_segs = state->nr_segs; in iov_iter_restore()
1581 struct page ***pages, size_t maxsize, in iov_iter_extract_folioq_pages()
1586 const struct folio_queue *folioq = i->folioq; in iov_iter_extract_folioq_pages()
1587 struct page **p; in iov_iter_extract_folioq_pages()
1589 size_t extracted = 0, offset, slot = i->folioq_slot; in iov_iter_extract_folioq_pages()
1592 folioq = folioq->next; in iov_iter_extract_folioq_pages()
1594 if (WARN_ON(i->iov_offset != 0)) in iov_iter_extract_folioq_pages()
1595 return -EIO; in iov_iter_extract_folioq_pages()
1598 offset = i->iov_offset & ~PAGE_MASK; in iov_iter_extract_folioq_pages()
1603 return -ENOMEM; in iov_iter_extract_folioq_pages()
1608 size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot); in iov_iter_extract_folioq_pages()
1609 size_t part = PAGE_SIZE - offset % PAGE_SIZE; in iov_iter_extract_folioq_pages()
1612 part = umin(part, umin(maxsize - extracted, fsize - offset)); in iov_iter_extract_folioq_pages()
1613 i->count -= part; in iov_iter_extract_folioq_pages()
1614 i->iov_offset += part; in iov_iter_extract_folioq_pages()
1623 if (i->iov_offset >= fsize) { in iov_iter_extract_folioq_pages()
1624 i->iov_offset = 0; in iov_iter_extract_folioq_pages()
1626 if (slot == folioq_nr_slots(folioq) && folioq->next) { in iov_iter_extract_folioq_pages()
1627 folioq = folioq->next; in iov_iter_extract_folioq_pages()
1633 i->folioq = folioq; in iov_iter_extract_folioq_pages()
1634 i->folioq_slot = slot; in iov_iter_extract_folioq_pages()
1643 struct page ***pages, size_t maxsize, in iov_iter_extract_xarray_pages()
1648 struct page *page, **p; in iov_iter_extract_xarray_pages() local
1650 loff_t pos = i->xarray_start + i->iov_offset; in iov_iter_extract_xarray_pages()
1652 XA_STATE(xas, i->xarray, index); in iov_iter_extract_xarray_pages()
1659 return -ENOMEM; in iov_iter_extract_xarray_pages()
1663 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in iov_iter_extract_xarray_pages()
1664 if (xas_retry(&xas, page)) in iov_iter_extract_xarray_pages()
1667 /* Has the page moved or been split? */ in iov_iter_extract_xarray_pages()
1668 if (unlikely(page != xas_reload(&xas))) { in iov_iter_extract_xarray_pages()
1673 p[nr++] = find_subpage(page, xas.xa_index); in iov_iter_extract_xarray_pages()
1679 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iov_iter_extract_xarray_pages()
1689 struct page ***pages, size_t maxsize, in iov_iter_extract_bvec_pages()
1694 struct page **p, *page; in iov_iter_extract_bvec_pages() local
1695 size_t skip = i->iov_offset, offset, size; in iov_iter_extract_bvec_pages() local
1699 if (i->nr_segs == 0) in iov_iter_extract_bvec_pages()
1701 size = min(maxsize, i->bvec->bv_len - skip); in iov_iter_extract_bvec_pages()
1702 if (size) in iov_iter_extract_bvec_pages()
1704 i->iov_offset = 0; in iov_iter_extract_bvec_pages()
1705 i->nr_segs--; in iov_iter_extract_bvec_pages()
1706 i->bvec++; in iov_iter_extract_bvec_pages()
1710 skip += i->bvec->bv_offset; in iov_iter_extract_bvec_pages()
1711 page = i->bvec->bv_page + skip / PAGE_SIZE; in iov_iter_extract_bvec_pages()
1715 maxpages = want_pages_array(pages, size, offset, maxpages); in iov_iter_extract_bvec_pages()
1717 return -ENOMEM; in iov_iter_extract_bvec_pages()
1720 p[k] = page + k; in iov_iter_extract_bvec_pages()
1722 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); in iov_iter_extract_bvec_pages()
1723 iov_iter_advance(i, size); in iov_iter_extract_bvec_pages()
1724 return size; in iov_iter_extract_bvec_pages()
1732 struct page ***pages, size_t maxsize, in iov_iter_extract_kvec_pages()
1737 struct page **p, *page; in iov_iter_extract_kvec_pages() local
1739 size_t skip = i->iov_offset, offset, len, size; in iov_iter_extract_kvec_pages() local
1743 if (i->nr_segs == 0) in iov_iter_extract_kvec_pages()
1745 size = min(maxsize, i->kvec->iov_len - skip); in iov_iter_extract_kvec_pages()
1746 if (size) in iov_iter_extract_kvec_pages()
1748 i->iov_offset = 0; in iov_iter_extract_kvec_pages()
1749 i->nr_segs--; in iov_iter_extract_kvec_pages()
1750 i->kvec++; in iov_iter_extract_kvec_pages()
1754 kaddr = i->kvec->iov_base + skip; in iov_iter_extract_kvec_pages()
1758 maxpages = want_pages_array(pages, size, offset, maxpages); in iov_iter_extract_kvec_pages()
1760 return -ENOMEM; in iov_iter_extract_kvec_pages()
1763 kaddr -= offset; in iov_iter_extract_kvec_pages()
1764 len = offset + size; in iov_iter_extract_kvec_pages()
1769 page = vmalloc_to_page(kaddr); in iov_iter_extract_kvec_pages()
1771 page = virt_to_page(kaddr); in iov_iter_extract_kvec_pages()
1773 p[k] = page; in iov_iter_extract_kvec_pages()
1774 len -= seg; in iov_iter_extract_kvec_pages()
1778 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); in iov_iter_extract_kvec_pages()
1779 iov_iter_advance(i, size); in iov_iter_extract_kvec_pages()
1780 return size; in iov_iter_extract_kvec_pages()
1785 * each of them. This should only be used if the iterator is user-backed
1793 * child a copy of the page.
1796 struct page ***pages, in iov_iter_extract_user_pages()
1807 if (i->data_source == ITER_DEST) in iov_iter_extract_user_pages()
1811 if (i->nofault) in iov_iter_extract_user_pages()
1819 return -ENOMEM; in iov_iter_extract_user_pages()
1823 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset); in iov_iter_extract_user_pages()
1829 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1833 * @maxpages: The maximum size of the list of pages
1839 * of page contents can be set.
1841 * If *@pages is NULL, a page list will be allocated to the required size and
1843 * that the caller allocated a page list at least @maxpages in size and this
1846 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1854 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1867 * sets *offset0 to the offset into the first page.
1869 * It may also return -ENOMEM and -EFAULT.
1872 struct page ***pages, in iov_iter_extract_pages()
1878 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); in iov_iter_extract_pages()
1902 return -EFAULT; in iov_iter_extract_pages()