1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
24 __v.iov_len = min(n, __p->iov_len - skip); \
25 if (likely(__v.iov_len)) { \
26 __v.iov_base = __p->iov_base + skip; \
28 __v.iov_len -= left; \
29 skip += __v.iov_len; \
31 if (skip < __p->iov_len) \
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
44 __v.iov_len = min(n, __p->iov_len - skip); \
45 if (likely(__v.iov_len)) { \
46 __v.iov_base = __p->iov_base + skip; \
48 skip += __v.iov_len; \
50 if (skip < __p->iov_len) \
59 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
60 struct bvec_iter __start; \
61 __start.bi_size = n; \
62 __start.bi_bvec_done = skip; \
64 for_each_bvec(__v, i->bvec, __bi, __start) { \
69 #define iterate_xarray(i, n, __v, skip, STEP) { \
70 struct page *head = NULL; \
71 size_t wanted = n, seg, offset; \
72 loff_t start = i->xarray_start + skip; \
73 pgoff_t index = start >> PAGE_SHIFT; \
76 XA_STATE(xas, i->xarray, index); \
79 xas_for_each(&xas, head, ULONG_MAX) { \
80 if (xas_retry(&xas, head)) \
82 if (WARN_ON(xa_is_value(head))) \
84 if (WARN_ON(PageHuge(head))) \
86 for (j = (head->index < index) ? index - head->index : 0; \
87 j < thp_nr_pages(head); j++) { \
88 __v.bv_page = head + j; \
89 offset = (i->xarray_start + skip) & ~PAGE_MASK; \
90 seg = PAGE_SIZE - offset; \
91 __v.bv_offset = offset; \
92 __v.bv_len = min(n, seg); \
106 #define iterate_and_advance(i, n, v, I, B, K, X) { \
107 if (unlikely(i->count < n)) \
110 size_t skip = i->iov_offset; \
111 if (likely(iter_is_iovec(i))) { \
112 const struct iovec *iov; \
114 iterate_iovec(i, n, v, iov, skip, (I)) \
115 i->nr_segs -= iov - i->iov; \
117 } else if (iov_iter_is_bvec(i)) { \
118 const struct bio_vec *bvec = i->bvec; \
120 struct bvec_iter __bi; \
121 iterate_bvec(i, n, v, __bi, skip, (B)) \
122 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
123 i->nr_segs -= i->bvec - bvec; \
124 skip = __bi.bi_bvec_done; \
125 } else if (iov_iter_is_kvec(i)) { \
126 const struct kvec *kvec; \
128 iterate_kvec(i, n, v, kvec, skip, (K)) \
129 i->nr_segs -= kvec - i->kvec; \
131 } else if (iov_iter_is_xarray(i)) { \
133 iterate_xarray(i, n, v, skip, (X)) \
136 i->iov_offset = skip; \
140 static int copyout(void __user *to, const void *from, size_t n)
142 if (should_fail_usercopy())
144 if (access_ok(to, n)) {
145 instrument_copy_to_user(to, from, n);
146 n = raw_copy_to_user(to, from, n);
151 static int copyin(void *to, const void __user *from, size_t n)
153 if (should_fail_usercopy())
155 if (access_ok(from, n)) {
156 instrument_copy_from_user(to, from, n);
157 n = raw_copy_from_user(to, from, n);
162 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
165 size_t skip, copy, left, wanted;
166 const struct iovec *iov;
170 if (unlikely(bytes > i->count))
173 if (unlikely(!bytes))
179 skip = i->iov_offset;
180 buf = iov->iov_base + skip;
181 copy = min(bytes, iov->iov_len - skip);
183 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
184 kaddr = kmap_atomic(page);
185 from = kaddr + offset;
187 /* first chunk, usually the only one */
188 left = copyout(buf, from, copy);
194 while (unlikely(!left && bytes)) {
197 copy = min(bytes, iov->iov_len);
198 left = copyout(buf, from, copy);
204 if (likely(!bytes)) {
205 kunmap_atomic(kaddr);
208 offset = from - kaddr;
210 kunmap_atomic(kaddr);
211 copy = min(bytes, iov->iov_len - skip);
213 /* Too bad - revert to non-atomic kmap */
216 from = kaddr + offset;
217 left = copyout(buf, from, copy);
222 while (unlikely(!left && bytes)) {
225 copy = min(bytes, iov->iov_len);
226 left = copyout(buf, from, copy);
235 if (skip == iov->iov_len) {
239 i->count -= wanted - bytes;
240 i->nr_segs -= iov - i->iov;
242 i->iov_offset = skip;
243 return wanted - bytes;
246 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
249 size_t skip, copy, left, wanted;
250 const struct iovec *iov;
254 if (unlikely(bytes > i->count))
257 if (unlikely(!bytes))
263 skip = i->iov_offset;
264 buf = iov->iov_base + skip;
265 copy = min(bytes, iov->iov_len - skip);
267 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
268 kaddr = kmap_atomic(page);
271 /* first chunk, usually the only one */
272 left = copyin(to, buf, copy);
278 while (unlikely(!left && bytes)) {
281 copy = min(bytes, iov->iov_len);
282 left = copyin(to, buf, copy);
288 if (likely(!bytes)) {
289 kunmap_atomic(kaddr);
294 kunmap_atomic(kaddr);
295 copy = min(bytes, iov->iov_len - skip);
297 /* Too bad - revert to non-atomic kmap */
301 left = copyin(to, buf, copy);
306 while (unlikely(!left && bytes)) {
309 copy = min(bytes, iov->iov_len);
310 left = copyin(to, buf, copy);
319 if (skip == iov->iov_len) {
323 i->count -= wanted - bytes;
324 i->nr_segs -= iov - i->iov;
326 i->iov_offset = skip;
327 return wanted - bytes;
331 static bool sanity(const struct iov_iter *i)
333 struct pipe_inode_info *pipe = i->pipe;
334 unsigned int p_head = pipe->head;
335 unsigned int p_tail = pipe->tail;
336 unsigned int p_mask = pipe->ring_size - 1;
337 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
338 unsigned int i_head = i->head;
342 struct pipe_buffer *p;
343 if (unlikely(p_occupancy == 0))
344 goto Bad; // pipe must be non-empty
345 if (unlikely(i_head != p_head - 1))
346 goto Bad; // must be at the last buffer...
348 p = &pipe->bufs[i_head & p_mask];
349 if (unlikely(p->offset + p->len != i->iov_offset))
350 goto Bad; // ... at the end of segment
352 if (i_head != p_head)
353 goto Bad; // must be right after the last buffer
357 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
358 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
359 p_head, p_tail, pipe->ring_size);
360 for (idx = 0; idx < pipe->ring_size; idx++)
361 printk(KERN_ERR "[%p %p %d %d]\n",
363 pipe->bufs[idx].page,
364 pipe->bufs[idx].offset,
365 pipe->bufs[idx].len);
370 #define sanity(i) true
373 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
376 struct pipe_inode_info *pipe = i->pipe;
377 struct pipe_buffer *buf;
378 unsigned int p_tail = pipe->tail;
379 unsigned int p_mask = pipe->ring_size - 1;
380 unsigned int i_head = i->head;
383 if (unlikely(bytes > i->count))
386 if (unlikely(!bytes))
393 buf = &pipe->bufs[i_head & p_mask];
395 if (offset == off && buf->page == page) {
396 /* merge with the last one */
398 i->iov_offset += bytes;
402 buf = &pipe->bufs[i_head & p_mask];
404 if (pipe_full(i_head, p_tail, pipe->max_usage))
407 buf->ops = &page_cache_pipe_buf_ops;
410 buf->offset = offset;
413 pipe->head = i_head + 1;
414 i->iov_offset = offset + bytes;
422 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
423 * bytes. For each iovec, fault in each page that constitutes the iovec.
425 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
426 * because it is an invalid address).
428 int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
430 if (iter_is_iovec(i)) {
431 const struct iovec *p;
434 if (bytes > i->count)
436 for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
437 size_t len = min(bytes, p->iov_len - skip);
442 err = fault_in_pages_readable(p->iov_base + skip, len);
450 EXPORT_SYMBOL(iov_iter_fault_in_readable);
452 void iov_iter_init(struct iov_iter *i, unsigned int direction,
453 const struct iovec *iov, unsigned long nr_segs,
456 WARN_ON(direction & ~(READ | WRITE));
457 WARN_ON_ONCE(uaccess_kernel());
458 *i = (struct iov_iter) {
459 .iter_type = ITER_IOVEC,
460 .data_source = direction,
467 EXPORT_SYMBOL(iov_iter_init);
469 static inline bool allocated(struct pipe_buffer *buf)
471 return buf->ops == &default_pipe_buf_ops;
474 static inline void data_start(const struct iov_iter *i,
475 unsigned int *iter_headp, size_t *offp)
477 unsigned int p_mask = i->pipe->ring_size - 1;
478 unsigned int iter_head = i->head;
479 size_t off = i->iov_offset;
481 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
486 *iter_headp = iter_head;
490 static size_t push_pipe(struct iov_iter *i, size_t size,
491 int *iter_headp, size_t *offp)
493 struct pipe_inode_info *pipe = i->pipe;
494 unsigned int p_tail = pipe->tail;
495 unsigned int p_mask = pipe->ring_size - 1;
496 unsigned int iter_head;
500 if (unlikely(size > i->count))
506 data_start(i, &iter_head, &off);
507 *iter_headp = iter_head;
510 left -= PAGE_SIZE - off;
512 pipe->bufs[iter_head & p_mask].len += size;
515 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
518 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
519 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
520 struct page *page = alloc_page(GFP_USER);
524 buf->ops = &default_pipe_buf_ops;
527 buf->len = min_t(ssize_t, left, PAGE_SIZE);
530 pipe->head = iter_head;
538 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
541 struct pipe_inode_info *pipe = i->pipe;
542 unsigned int p_mask = pipe->ring_size - 1;
549 bytes = n = push_pipe(i, bytes, &i_head, &off);
553 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
554 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
556 i->iov_offset = off + chunk;
566 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
567 __wsum sum, size_t off)
569 __wsum next = csum_partial_copy_nocheck(from, to, len);
570 return csum_block_add(sum, next, off);
573 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
574 struct csum_state *csstate,
577 struct pipe_inode_info *pipe = i->pipe;
578 unsigned int p_mask = pipe->ring_size - 1;
579 __wsum sum = csstate->csum;
580 size_t off = csstate->off;
587 bytes = n = push_pipe(i, bytes, &i_head, &r);
591 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
592 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
593 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
596 i->iov_offset = r + chunk;
609 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
611 const char *from = addr;
612 if (unlikely(iov_iter_is_pipe(i)))
613 return copy_pipe_to_iter(addr, bytes, i);
614 if (iter_is_iovec(i))
616 iterate_and_advance(i, bytes, v,
617 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
618 memcpy_to_page(v.bv_page, v.bv_offset,
619 (from += v.bv_len) - v.bv_len, v.bv_len),
620 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
621 memcpy_to_page(v.bv_page, v.bv_offset,
622 (from += v.bv_len) - v.bv_len, v.bv_len)
627 EXPORT_SYMBOL(_copy_to_iter);
629 #ifdef CONFIG_ARCH_HAS_COPY_MC
630 static int copyout_mc(void __user *to, const void *from, size_t n)
632 if (access_ok(to, n)) {
633 instrument_copy_to_user(to, from, n);
634 n = copy_mc_to_user((__force void *) to, from, n);
639 static unsigned long copy_mc_to_page(struct page *page, size_t offset,
640 const char *from, size_t len)
645 to = kmap_atomic(page);
646 ret = copy_mc_to_kernel(to + offset, from, len);
652 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
655 struct pipe_inode_info *pipe = i->pipe;
656 unsigned int p_mask = pipe->ring_size - 1;
658 size_t n, off, xfer = 0;
663 bytes = n = push_pipe(i, bytes, &i_head, &off);
667 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
670 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
673 i->iov_offset = off + chunk - rem;
687 * _copy_mc_to_iter - copy to iter with source memory error exception handling
688 * @addr: source kernel address
689 * @bytes: total transfer length
690 * @iter: destination iterator
692 * The pmem driver deploys this for the dax operation
693 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
694 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
695 * successfully copied.
697 * The main differences between this and typical _copy_to_iter().
699 * * Typical tail/residue handling after a fault retries the copy
700 * byte-by-byte until the fault happens again. Re-triggering machine
701 * checks is potentially fatal so the implementation uses source
702 * alignment and poison alignment assumptions to avoid re-triggering
703 * hardware exceptions.
705 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
706 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
709 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
711 const char *from = addr;
712 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
714 if (unlikely(iov_iter_is_pipe(i)))
715 return copy_mc_pipe_to_iter(addr, bytes, i);
716 if (iter_is_iovec(i))
718 iterate_and_advance(i, bytes, v,
719 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
722 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
723 (from += v.bv_len) - v.bv_len, v.bv_len);
725 curr_addr = (unsigned long) from;
726 bytes = curr_addr - s_addr - rem;
731 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
732 - v.iov_len, v.iov_len);
734 curr_addr = (unsigned long) from;
735 bytes = curr_addr - s_addr - rem;
740 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
741 (from += v.bv_len) - v.bv_len, v.bv_len);
743 curr_addr = (unsigned long) from;
744 bytes = curr_addr - s_addr - rem;
746 i->iov_offset += bytes;
755 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
756 #endif /* CONFIG_ARCH_HAS_COPY_MC */
758 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
761 if (unlikely(iov_iter_is_pipe(i))) {
765 if (iter_is_iovec(i))
767 iterate_and_advance(i, bytes, v,
768 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
769 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
770 v.bv_offset, v.bv_len),
771 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
772 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
773 v.bv_offset, v.bv_len)
778 EXPORT_SYMBOL(_copy_from_iter);
780 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
783 if (unlikely(iov_iter_is_pipe(i))) {
787 iterate_and_advance(i, bytes, v,
788 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
789 v.iov_base, v.iov_len),
790 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
791 v.bv_offset, v.bv_len),
792 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
793 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
794 v.bv_offset, v.bv_len)
799 EXPORT_SYMBOL(_copy_from_iter_nocache);
801 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
803 * _copy_from_iter_flushcache - write destination through cpu cache
804 * @addr: destination kernel address
805 * @bytes: total transfer length
806 * @iter: source iterator
808 * The pmem driver arranges for filesystem-dax to use this facility via
809 * dax_copy_from_iter() for ensuring that writes to persistent memory
810 * are flushed through the CPU cache. It is differentiated from
811 * _copy_from_iter_nocache() in that guarantees all data is flushed for
812 * all iterator types. The _copy_from_iter_nocache() only attempts to
813 * bypass the cache for the ITER_IOVEC case, and on some archs may use
814 * instructions that strand dirty-data in the cache.
816 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
819 if (unlikely(iov_iter_is_pipe(i))) {
823 iterate_and_advance(i, bytes, v,
824 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
825 v.iov_base, v.iov_len),
826 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
827 v.bv_offset, v.bv_len),
828 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
830 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
831 v.bv_offset, v.bv_len)
836 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
839 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
842 size_t v = n + offset;
845 * The general case needs to access the page order in order
846 * to compute the page size.
847 * However, we mostly deal with order-0 pages and thus can
848 * avoid a possible cache line miss for requests that fit all
851 if (n <= v && v <= PAGE_SIZE)
854 head = compound_head(page);
855 v += (page - head) << PAGE_SHIFT;
857 if (likely(n <= v && v <= (page_size(head))))
863 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
866 if (likely(iter_is_iovec(i)))
867 return copy_page_to_iter_iovec(page, offset, bytes, i);
868 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
869 void *kaddr = kmap_atomic(page);
870 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
871 kunmap_atomic(kaddr);
874 if (iov_iter_is_pipe(i))
875 return copy_page_to_iter_pipe(page, offset, bytes, i);
876 if (unlikely(iov_iter_is_discard(i))) {
877 if (unlikely(i->count < bytes))
886 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
890 if (unlikely(!page_copy_sane(page, offset, bytes)))
892 page += offset / PAGE_SIZE; // first subpage
895 size_t n = __copy_page_to_iter(page, offset,
896 min(bytes, (size_t)PAGE_SIZE - offset), i);
902 if (offset == PAGE_SIZE) {
909 EXPORT_SYMBOL(copy_page_to_iter);
911 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
914 if (unlikely(!page_copy_sane(page, offset, bytes)))
916 if (likely(iter_is_iovec(i)))
917 return copy_page_from_iter_iovec(page, offset, bytes, i);
918 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
919 void *kaddr = kmap_atomic(page);
920 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
921 kunmap_atomic(kaddr);
927 EXPORT_SYMBOL(copy_page_from_iter);
929 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
931 struct pipe_inode_info *pipe = i->pipe;
932 unsigned int p_mask = pipe->ring_size - 1;
939 bytes = n = push_pipe(i, bytes, &i_head, &off);
944 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
945 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
947 i->iov_offset = off + chunk;
956 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
958 if (unlikely(iov_iter_is_pipe(i)))
959 return pipe_zero(bytes, i);
960 iterate_and_advance(i, bytes, v,
961 clear_user(v.iov_base, v.iov_len),
962 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
963 memset(v.iov_base, 0, v.iov_len),
964 memzero_page(v.bv_page, v.bv_offset, v.bv_len)
969 EXPORT_SYMBOL(iov_iter_zero);
971 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
974 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
975 if (unlikely(!page_copy_sane(page, offset, bytes))) {
976 kunmap_atomic(kaddr);
979 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
980 kunmap_atomic(kaddr);
984 iterate_and_advance(i, bytes, v,
985 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
986 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
987 v.bv_offset, v.bv_len),
988 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
989 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
990 v.bv_offset, v.bv_len)
992 kunmap_atomic(kaddr);
995 EXPORT_SYMBOL(copy_page_from_iter_atomic);
997 static inline void pipe_truncate(struct iov_iter *i)
999 struct pipe_inode_info *pipe = i->pipe;
1000 unsigned int p_tail = pipe->tail;
1001 unsigned int p_head = pipe->head;
1002 unsigned int p_mask = pipe->ring_size - 1;
1004 if (!pipe_empty(p_head, p_tail)) {
1005 struct pipe_buffer *buf;
1006 unsigned int i_head = i->head;
1007 size_t off = i->iov_offset;
1010 buf = &pipe->bufs[i_head & p_mask];
1011 buf->len = off - buf->offset;
1014 while (p_head != i_head) {
1016 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1019 pipe->head = p_head;
1023 static void pipe_advance(struct iov_iter *i, size_t size)
1025 struct pipe_inode_info *pipe = i->pipe;
1027 struct pipe_buffer *buf;
1028 unsigned int p_mask = pipe->ring_size - 1;
1029 unsigned int i_head = i->head;
1030 size_t off = i->iov_offset, left = size;
1032 if (off) /* make it relative to the beginning of buffer */
1033 left += off - pipe->bufs[i_head & p_mask].offset;
1035 buf = &pipe->bufs[i_head & p_mask];
1036 if (left <= buf->len)
1042 i->iov_offset = buf->offset + left;
1045 /* ... and discard everything past that point */
1049 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
1051 struct bvec_iter bi;
1053 bi.bi_size = i->count;
1054 bi.bi_bvec_done = i->iov_offset;
1056 bvec_iter_advance(i->bvec, &bi, size);
1058 i->bvec += bi.bi_idx;
1059 i->nr_segs -= bi.bi_idx;
1060 i->count = bi.bi_size;
1061 i->iov_offset = bi.bi_bvec_done;
1064 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
1066 const struct iovec *iov, *end;
1072 size += i->iov_offset; // from beginning of current segment
1073 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
1074 if (likely(size < iov->iov_len))
1076 size -= iov->iov_len;
1078 i->iov_offset = size;
1079 i->nr_segs -= iov - i->iov;
1083 void iov_iter_advance(struct iov_iter *i, size_t size)
1085 if (unlikely(i->count < size))
1087 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
1088 /* iovec and kvec have identical layouts */
1089 iov_iter_iovec_advance(i, size);
1090 } else if (iov_iter_is_bvec(i)) {
1091 iov_iter_bvec_advance(i, size);
1092 } else if (iov_iter_is_pipe(i)) {
1093 pipe_advance(i, size);
1094 } else if (unlikely(iov_iter_is_xarray(i))) {
1095 i->iov_offset += size;
1097 } else if (iov_iter_is_discard(i)) {
1101 EXPORT_SYMBOL(iov_iter_advance);
1103 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1107 if (WARN_ON(unroll > MAX_RW_COUNT))
1110 if (unlikely(iov_iter_is_pipe(i))) {
1111 struct pipe_inode_info *pipe = i->pipe;
1112 unsigned int p_mask = pipe->ring_size - 1;
1113 unsigned int i_head = i->head;
1114 size_t off = i->iov_offset;
1116 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1117 size_t n = off - b->offset;
1123 if (!unroll && i_head == i->start_head) {
1128 b = &pipe->bufs[i_head & p_mask];
1129 off = b->offset + b->len;
1131 i->iov_offset = off;
1136 if (unlikely(iov_iter_is_discard(i)))
1138 if (unroll <= i->iov_offset) {
1139 i->iov_offset -= unroll;
1142 unroll -= i->iov_offset;
1143 if (iov_iter_is_xarray(i)) {
1144 BUG(); /* We should never go beyond the start of the specified
1145 * range since we might then be straying into pages that
1148 } else if (iov_iter_is_bvec(i)) {
1149 const struct bio_vec *bvec = i->bvec;
1151 size_t n = (--bvec)->bv_len;
1155 i->iov_offset = n - unroll;
1160 } else { /* same logics for iovec and kvec */
1161 const struct iovec *iov = i->iov;
1163 size_t n = (--iov)->iov_len;
1167 i->iov_offset = n - unroll;
1174 EXPORT_SYMBOL(iov_iter_revert);
1177 * Return the count of just the current iov_iter segment.
1179 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1181 if (i->nr_segs > 1) {
1182 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1183 return min(i->count, i->iov->iov_len - i->iov_offset);
1184 if (iov_iter_is_bvec(i))
1185 return min(i->count, i->bvec->bv_len - i->iov_offset);
1189 EXPORT_SYMBOL(iov_iter_single_seg_count);
1191 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1192 const struct kvec *kvec, unsigned long nr_segs,
1195 WARN_ON(direction & ~(READ | WRITE));
1196 *i = (struct iov_iter){
1197 .iter_type = ITER_KVEC,
1198 .data_source = direction,
1205 EXPORT_SYMBOL(iov_iter_kvec);
1207 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1208 const struct bio_vec *bvec, unsigned long nr_segs,
1211 WARN_ON(direction & ~(READ | WRITE));
1212 *i = (struct iov_iter){
1213 .iter_type = ITER_BVEC,
1214 .data_source = direction,
1221 EXPORT_SYMBOL(iov_iter_bvec);
1223 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1224 struct pipe_inode_info *pipe,
1227 BUG_ON(direction != READ);
1228 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1229 *i = (struct iov_iter){
1230 .iter_type = ITER_PIPE,
1231 .data_source = false,
1234 .start_head = pipe->head,
1239 EXPORT_SYMBOL(iov_iter_pipe);
1242 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1243 * @i: The iterator to initialise.
1244 * @direction: The direction of the transfer.
1245 * @xarray: The xarray to access.
1246 * @start: The start file position.
1247 * @count: The size of the I/O buffer in bytes.
1249 * Set up an I/O iterator to either draw data out of the pages attached to an
1250 * inode or to inject data into those pages. The pages *must* be prevented
1251 * from evaporation, either by taking a ref on them or locking them by the
1254 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1255 struct xarray *xarray, loff_t start, size_t count)
1257 BUG_ON(direction & ~1);
1258 *i = (struct iov_iter) {
1259 .iter_type = ITER_XARRAY,
1260 .data_source = direction,
1262 .xarray_start = start,
1267 EXPORT_SYMBOL(iov_iter_xarray);
1270 * iov_iter_discard - Initialise an I/O iterator that discards data
1271 * @i: The iterator to initialise.
1272 * @direction: The direction of the transfer.
1273 * @count: The size of the I/O buffer in bytes.
1275 * Set up an I/O iterator that just discards everything that's written to it.
1276 * It's only available as a READ iterator.
1278 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1280 BUG_ON(direction != READ);
1281 *i = (struct iov_iter){
1282 .iter_type = ITER_DISCARD,
1283 .data_source = false,
1288 EXPORT_SYMBOL(iov_iter_discard);
1290 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1292 unsigned long res = 0;
1293 size_t size = i->count;
1294 size_t skip = i->iov_offset;
1297 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1298 size_t len = i->iov[k].iov_len - skip;
1300 res |= (unsigned long)i->iov[k].iov_base + skip;
1312 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1315 size_t size = i->count;
1316 unsigned skip = i->iov_offset;
1319 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1320 size_t len = i->bvec[k].bv_len - skip;
1321 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1332 unsigned long iov_iter_alignment(const struct iov_iter *i)
1334 /* iovec and kvec have identical layouts */
1335 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1336 return iov_iter_alignment_iovec(i);
1338 if (iov_iter_is_bvec(i))
1339 return iov_iter_alignment_bvec(i);
1341 if (iov_iter_is_pipe(i)) {
1342 unsigned int p_mask = i->pipe->ring_size - 1;
1343 size_t size = i->count;
1345 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1346 return size | i->iov_offset;
1350 if (iov_iter_is_xarray(i))
1351 return (i->xarray_start + i->iov_offset) | i->count;
1355 EXPORT_SYMBOL(iov_iter_alignment);
1357 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1359 unsigned long res = 0;
1360 unsigned long v = 0;
1361 size_t size = i->count;
1364 if (WARN_ON(!iter_is_iovec(i)))
1367 for (k = 0; k < i->nr_segs; k++) {
1368 if (i->iov[k].iov_len) {
1369 unsigned long base = (unsigned long)i->iov[k].iov_base;
1370 if (v) // if not the first one
1371 res |= base | v; // this start | previous end
1372 v = base + i->iov[k].iov_len;
1373 if (size <= i->iov[k].iov_len)
1375 size -= i->iov[k].iov_len;
1380 EXPORT_SYMBOL(iov_iter_gap_alignment);
1382 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1384 struct page **pages,
1388 struct pipe_inode_info *pipe = i->pipe;
1389 unsigned int p_mask = pipe->ring_size - 1;
1390 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1397 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1405 static ssize_t pipe_get_pages(struct iov_iter *i,
1406 struct page **pages, size_t maxsize, unsigned maxpages,
1409 unsigned int iter_head, npages;
1415 data_start(i, &iter_head, start);
1416 /* Amount of free space: some of this one + all after this one */
1417 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1418 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1420 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1423 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1424 pgoff_t index, unsigned int nr_pages)
1426 XA_STATE(xas, xa, index);
1428 unsigned int ret = 0;
1431 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1432 if (xas_retry(&xas, page))
1435 /* Has the page moved or been split? */
1436 if (unlikely(page != xas_reload(&xas))) {
1441 pages[ret] = find_subpage(page, xas.xa_index);
1442 get_page(pages[ret]);
1443 if (++ret == nr_pages)
1450 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1451 struct page **pages, size_t maxsize,
1452 unsigned maxpages, size_t *_start_offset)
1454 unsigned nr, offset;
1455 pgoff_t index, count;
1456 size_t size = maxsize, actual;
1459 if (!size || !maxpages)
1462 pos = i->xarray_start + i->iov_offset;
1463 index = pos >> PAGE_SHIFT;
1464 offset = pos & ~PAGE_MASK;
1465 *_start_offset = offset;
1468 if (size > PAGE_SIZE - offset) {
1469 size -= PAGE_SIZE - offset;
1470 count += size >> PAGE_SHIFT;
1476 if (count > maxpages)
1479 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1483 actual = PAGE_SIZE * nr;
1485 if (nr == count && size > 0) {
1486 unsigned last_offset = (nr > 1) ? 0 : offset;
1487 actual -= PAGE_SIZE - (last_offset + size);
1492 /* must be done on non-empty ITER_IOVEC one */
1493 static unsigned long first_iovec_segment(const struct iov_iter *i,
1494 size_t *size, size_t *start,
1495 size_t maxsize, unsigned maxpages)
1500 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1501 unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
1502 size_t len = i->iov[k].iov_len - skip;
1508 len += (*start = addr % PAGE_SIZE);
1509 if (len > maxpages * PAGE_SIZE)
1510 len = maxpages * PAGE_SIZE;
1512 return addr & PAGE_MASK;
1514 BUG(); // if it had been empty, we wouldn't get called
1517 /* must be done on non-empty ITER_BVEC one */
1518 static struct page *first_bvec_segment(const struct iov_iter *i,
1519 size_t *size, size_t *start,
1520 size_t maxsize, unsigned maxpages)
1523 size_t skip = i->iov_offset, len;
1525 len = i->bvec->bv_len - skip;
1528 skip += i->bvec->bv_offset;
1529 page = i->bvec->bv_page + skip / PAGE_SIZE;
1530 len += (*start = skip % PAGE_SIZE);
1531 if (len > maxpages * PAGE_SIZE)
1532 len = maxpages * PAGE_SIZE;
1537 ssize_t iov_iter_get_pages(struct iov_iter *i,
1538 struct page **pages, size_t maxsize, unsigned maxpages,
1544 if (maxsize > i->count)
1549 if (likely(iter_is_iovec(i))) {
1552 addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
1553 n = DIV_ROUND_UP(len, PAGE_SIZE);
1554 res = get_user_pages_fast(addr, n,
1555 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1557 if (unlikely(res < 0))
1559 return (res == n ? len : res * PAGE_SIZE) - *start;
1561 if (iov_iter_is_bvec(i)) {
1564 page = first_bvec_segment(i, &len, start, maxsize, maxpages);
1565 n = DIV_ROUND_UP(len, PAGE_SIZE);
1567 get_page(*pages++ = page++);
1568 return len - *start;
1570 if (iov_iter_is_pipe(i))
1571 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1572 if (iov_iter_is_xarray(i))
1573 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1576 EXPORT_SYMBOL(iov_iter_get_pages);
1578 static struct page **get_pages_array(size_t n)
1580 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1583 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1584 struct page ***pages, size_t maxsize,
1588 unsigned int iter_head, npages;
1594 data_start(i, &iter_head, start);
1595 /* Amount of free space: some of this one + all after this one */
1596 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1597 n = npages * PAGE_SIZE - *start;
1601 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1602 p = get_pages_array(npages);
1605 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1613 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1614 struct page ***pages, size_t maxsize,
1615 size_t *_start_offset)
1618 unsigned nr, offset;
1619 pgoff_t index, count;
1620 size_t size = maxsize, actual;
1626 pos = i->xarray_start + i->iov_offset;
1627 index = pos >> PAGE_SHIFT;
1628 offset = pos & ~PAGE_MASK;
1629 *_start_offset = offset;
1632 if (size > PAGE_SIZE - offset) {
1633 size -= PAGE_SIZE - offset;
1634 count += size >> PAGE_SHIFT;
1640 p = get_pages_array(count);
1645 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1649 actual = PAGE_SIZE * nr;
1651 if (nr == count && size > 0) {
1652 unsigned last_offset = (nr > 1) ? 0 : offset;
1653 actual -= PAGE_SIZE - (last_offset + size);
1658 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1659 struct page ***pages, size_t maxsize,
1666 if (maxsize > i->count)
1671 if (likely(iter_is_iovec(i))) {
1674 addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
1675 n = DIV_ROUND_UP(len, PAGE_SIZE);
1676 p = get_pages_array(n);
1679 res = get_user_pages_fast(addr, n,
1680 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1681 if (unlikely(res < 0)) {
1686 return (res == n ? len : res * PAGE_SIZE) - *start;
1688 if (iov_iter_is_bvec(i)) {
1691 page = first_bvec_segment(i, &len, start, maxsize, ~0U);
1692 n = DIV_ROUND_UP(len, PAGE_SIZE);
1693 *pages = p = get_pages_array(n);
1697 get_page(*p++ = page++);
1698 return len - *start;
1700 if (iov_iter_is_pipe(i))
1701 return pipe_get_pages_alloc(i, pages, maxsize, start);
1702 if (iov_iter_is_xarray(i))
1703 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1706 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1708 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1715 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1719 iterate_and_advance(i, bytes, v, ({
1720 next = csum_and_copy_from_user(v.iov_base,
1721 (to += v.iov_len) - v.iov_len,
1724 sum = csum_block_add(sum, next, off);
1727 next ? 0 : v.iov_len;
1729 char *p = kmap_atomic(v.bv_page);
1730 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1731 p + v.bv_offset, v.bv_len,
1736 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1737 v.iov_base, v.iov_len,
1741 char *p = kmap_atomic(v.bv_page);
1742 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1743 p + v.bv_offset, v.bv_len,
1752 EXPORT_SYMBOL(csum_and_copy_from_iter);
1754 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1757 struct csum_state *csstate = _csstate;
1758 const char *from = addr;
1762 if (unlikely(iov_iter_is_pipe(i)))
1763 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
1765 sum = csum_shift(csstate->csum, csstate->off);
1767 if (unlikely(iov_iter_is_discard(i))) {
1768 WARN_ON(1); /* for now */
1771 iterate_and_advance(i, bytes, v, ({
1772 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1776 sum = csum_block_add(sum, next, off);
1779 next ? 0 : v.iov_len;
1781 char *p = kmap_atomic(v.bv_page);
1782 sum = csum_and_memcpy(p + v.bv_offset,
1783 (from += v.bv_len) - v.bv_len,
1784 v.bv_len, sum, off);
1788 sum = csum_and_memcpy(v.iov_base,
1789 (from += v.iov_len) - v.iov_len,
1790 v.iov_len, sum, off);
1793 char *p = kmap_atomic(v.bv_page);
1794 sum = csum_and_memcpy(p + v.bv_offset,
1795 (from += v.bv_len) - v.bv_len,
1796 v.bv_len, sum, off);
1801 csstate->csum = csum_shift(sum, csstate->off);
1802 csstate->off += bytes;
1805 EXPORT_SYMBOL(csum_and_copy_to_iter);
1807 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1810 #ifdef CONFIG_CRYPTO_HASH
1811 struct ahash_request *hash = hashp;
1812 struct scatterlist sg;
1815 copied = copy_to_iter(addr, bytes, i);
1816 sg_init_one(&sg, addr, copied);
1817 ahash_request_set_crypt(hash, &sg, NULL, copied);
1818 crypto_ahash_update(hash);
1824 EXPORT_SYMBOL(hash_and_copy_to_iter);
1826 static int iov_npages(const struct iov_iter *i, int maxpages)
1828 size_t skip = i->iov_offset, size = i->count;
1829 const struct iovec *p;
1832 for (p = i->iov; size; skip = 0, p++) {
1833 unsigned offs = offset_in_page(p->iov_base + skip);
1834 size_t len = min(p->iov_len - skip, size);
1838 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1839 if (unlikely(npages > maxpages))
1846 static int bvec_npages(const struct iov_iter *i, int maxpages)
1848 size_t skip = i->iov_offset, size = i->count;
1849 const struct bio_vec *p;
1852 for (p = i->bvec; size; skip = 0, p++) {
1853 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1854 size_t len = min(p->bv_len - skip, size);
1857 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1858 if (unlikely(npages > maxpages))
1864 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1866 if (unlikely(!i->count))
1868 /* iovec and kvec have identical layouts */
1869 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1870 return iov_npages(i, maxpages);
1871 if (iov_iter_is_bvec(i))
1872 return bvec_npages(i, maxpages);
1873 if (iov_iter_is_pipe(i)) {
1874 unsigned int iter_head;
1881 data_start(i, &iter_head, &off);
1882 /* some of this one + all after this one */
1883 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1884 return min(npages, maxpages);
1886 if (iov_iter_is_xarray(i)) {
1887 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1888 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1889 return min(npages, maxpages);
1893 EXPORT_SYMBOL(iov_iter_npages);
1895 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1898 if (unlikely(iov_iter_is_pipe(new))) {
1902 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1904 if (iov_iter_is_bvec(new))
1905 return new->bvec = kmemdup(new->bvec,
1906 new->nr_segs * sizeof(struct bio_vec),
1909 /* iovec and kvec have identical layout */
1910 return new->iov = kmemdup(new->iov,
1911 new->nr_segs * sizeof(struct iovec),
1914 EXPORT_SYMBOL(dup_iter);
1916 static int copy_compat_iovec_from_user(struct iovec *iov,
1917 const struct iovec __user *uvec, unsigned long nr_segs)
1919 const struct compat_iovec __user *uiov =
1920 (const struct compat_iovec __user *)uvec;
1921 int ret = -EFAULT, i;
1923 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1926 for (i = 0; i < nr_segs; i++) {
1930 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1931 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1933 /* check for compat_size_t not fitting in compat_ssize_t .. */
1938 iov[i].iov_base = compat_ptr(buf);
1939 iov[i].iov_len = len;
1948 static int copy_iovec_from_user(struct iovec *iov,
1949 const struct iovec __user *uvec, unsigned long nr_segs)
1953 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1955 for (seg = 0; seg < nr_segs; seg++) {
1956 if ((ssize_t)iov[seg].iov_len < 0)
1963 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1964 unsigned long nr_segs, unsigned long fast_segs,
1965 struct iovec *fast_iov, bool compat)
1967 struct iovec *iov = fast_iov;
1971 * SuS says "The readv() function *may* fail if the iovcnt argument was
1972 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1973 * traditionally returned zero for zero segments, so...
1977 if (nr_segs > UIO_MAXIOV)
1978 return ERR_PTR(-EINVAL);
1979 if (nr_segs > fast_segs) {
1980 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1982 return ERR_PTR(-ENOMEM);
1986 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1988 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1990 if (iov != fast_iov)
1992 return ERR_PTR(ret);
1998 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1999 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
2000 struct iov_iter *i, bool compat)
2002 ssize_t total_len = 0;
2006 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
2009 return PTR_ERR(iov);
2013 * According to the Single Unix Specification we should return EINVAL if
2014 * an element length is < 0 when cast to ssize_t or if the total length
2015 * would overflow the ssize_t return value of the system call.
2017 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
2020 for (seg = 0; seg < nr_segs; seg++) {
2021 ssize_t len = (ssize_t)iov[seg].iov_len;
2023 if (!access_ok(iov[seg].iov_base, len)) {
2030 if (len > MAX_RW_COUNT - total_len) {
2031 len = MAX_RW_COUNT - total_len;
2032 iov[seg].iov_len = len;
2037 iov_iter_init(i, type, iov, nr_segs, total_len);
2046 * import_iovec() - Copy an array of &struct iovec from userspace
2047 * into the kernel, check that it is valid, and initialize a new
2048 * &struct iov_iter iterator to access it.
2050 * @type: One of %READ or %WRITE.
2051 * @uvec: Pointer to the userspace array.
2052 * @nr_segs: Number of elements in userspace array.
2053 * @fast_segs: Number of elements in @iov.
2054 * @iovp: (input and output parameter) Pointer to pointer to (usually small
2055 * on-stack) kernel array.
2056 * @i: Pointer to iterator that will be initialized on success.
2058 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
2059 * then this function places %NULL in *@iov on return. Otherwise, a new
2060 * array will be allocated and the result placed in *@iov. This means that
2061 * the caller may call kfree() on *@iov regardless of whether the small
2062 * on-stack array was used or not (and regardless of whether this function
2063 * returns an error or not).
2065 * Return: Negative error code on error, bytes imported on success
2067 ssize_t import_iovec(int type, const struct iovec __user *uvec,
2068 unsigned nr_segs, unsigned fast_segs,
2069 struct iovec **iovp, struct iov_iter *i)
2071 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
2072 in_compat_syscall());
2074 EXPORT_SYMBOL(import_iovec);
2076 int import_single_range(int rw, void __user *buf, size_t len,
2077 struct iovec *iov, struct iov_iter *i)
2079 if (len > MAX_RW_COUNT)
2081 if (unlikely(!access_ok(buf, len)))
2084 iov->iov_base = buf;
2086 iov_iter_init(i, rw, iov, 1, len);
2089 EXPORT_SYMBOL(import_single_range);