1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
23 __v.iov_len = min(n, __p->iov_len - skip); \
24 if (likely(__v.iov_len)) { \
25 __v.iov_base = __p->iov_base + skip; \
27 __v.iov_len -= left; \
28 skip += __v.iov_len; \
33 while (unlikely(!left && n)) { \
35 __v.iov_len = min(n, __p->iov_len); \
36 if (unlikely(!__v.iov_len)) \
38 __v.iov_base = __p->iov_base; \
40 __v.iov_len -= left; \
47 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
50 __v.iov_len = min(n, __p->iov_len - skip); \
51 if (likely(__v.iov_len)) { \
52 __v.iov_base = __p->iov_base + skip; \
54 skip += __v.iov_len; \
57 while (unlikely(n)) { \
59 __v.iov_len = min(n, __p->iov_len); \
60 if (unlikely(!__v.iov_len)) \
62 __v.iov_base = __p->iov_base; \
70 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
71 struct bvec_iter __start; \
72 __start.bi_size = n; \
73 __start.bi_bvec_done = skip; \
75 for_each_bvec(__v, i->bvec, __bi, __start) { \
80 #define iterate_xarray(i, n, __v, skip, STEP) { \
81 struct page *head = NULL; \
82 size_t wanted = n, seg, offset; \
83 loff_t start = i->xarray_start + skip; \
84 pgoff_t index = start >> PAGE_SHIFT; \
87 XA_STATE(xas, i->xarray, index); \
90 xas_for_each(&xas, head, ULONG_MAX) { \
91 if (xas_retry(&xas, head)) \
93 if (WARN_ON(xa_is_value(head))) \
95 if (WARN_ON(PageHuge(head))) \
97 for (j = (head->index < index) ? index - head->index : 0; \
98 j < thp_nr_pages(head); j++) { \
99 __v.bv_page = head + j; \
100 offset = (i->xarray_start + skip) & ~PAGE_MASK; \
101 seg = PAGE_SIZE - offset; \
102 __v.bv_offset = offset; \
103 __v.bv_len = min(n, seg); \
106 skip += __v.bv_len; \
117 #define iterate_all_kinds(i, n, v, I, B, K, X) { \
119 size_t skip = i->iov_offset; \
120 if (unlikely(i->type & ITER_BVEC)) { \
122 struct bvec_iter __bi; \
123 iterate_bvec(i, n, v, __bi, skip, (B)) \
124 } else if (unlikely(i->type & ITER_KVEC)) { \
125 const struct kvec *kvec; \
127 iterate_kvec(i, n, v, kvec, skip, (K)) \
128 } else if (unlikely(i->type & ITER_DISCARD)) { \
129 } else if (unlikely(i->type & ITER_XARRAY)) { \
131 iterate_xarray(i, n, v, skip, (X)); \
133 const struct iovec *iov; \
135 iterate_iovec(i, n, v, iov, skip, (I)) \
140 #define iterate_and_advance(i, n, v, I, B, K, X) { \
141 if (unlikely(i->count < n)) \
144 size_t skip = i->iov_offset; \
145 if (unlikely(i->type & ITER_BVEC)) { \
146 const struct bio_vec *bvec = i->bvec; \
148 struct bvec_iter __bi; \
149 iterate_bvec(i, n, v, __bi, skip, (B)) \
150 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
151 i->nr_segs -= i->bvec - bvec; \
152 skip = __bi.bi_bvec_done; \
153 } else if (unlikely(i->type & ITER_KVEC)) { \
154 const struct kvec *kvec; \
156 iterate_kvec(i, n, v, kvec, skip, (K)) \
157 if (skip == kvec->iov_len) { \
161 i->nr_segs -= kvec - i->kvec; \
163 } else if (unlikely(i->type & ITER_DISCARD)) { \
165 } else if (unlikely(i->type & ITER_XARRAY)) { \
167 iterate_xarray(i, n, v, skip, (X)) \
169 const struct iovec *iov; \
171 iterate_iovec(i, n, v, iov, skip, (I)) \
172 if (skip == iov->iov_len) { \
176 i->nr_segs -= iov - i->iov; \
180 i->iov_offset = skip; \
184 static int copyout(void __user *to, const void *from, size_t n)
186 if (should_fail_usercopy())
188 if (access_ok(to, n)) {
189 instrument_copy_to_user(to, from, n);
190 n = raw_copy_to_user(to, from, n);
195 static int copyin(void *to, const void __user *from, size_t n)
197 if (should_fail_usercopy())
199 if (access_ok(from, n)) {
200 instrument_copy_from_user(to, from, n);
201 n = raw_copy_from_user(to, from, n);
206 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
209 size_t skip, copy, left, wanted;
210 const struct iovec *iov;
214 if (unlikely(bytes > i->count))
217 if (unlikely(!bytes))
223 skip = i->iov_offset;
224 buf = iov->iov_base + skip;
225 copy = min(bytes, iov->iov_len - skip);
227 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
228 kaddr = kmap_atomic(page);
229 from = kaddr + offset;
231 /* first chunk, usually the only one */
232 left = copyout(buf, from, copy);
238 while (unlikely(!left && bytes)) {
241 copy = min(bytes, iov->iov_len);
242 left = copyout(buf, from, copy);
248 if (likely(!bytes)) {
249 kunmap_atomic(kaddr);
252 offset = from - kaddr;
254 kunmap_atomic(kaddr);
255 copy = min(bytes, iov->iov_len - skip);
257 /* Too bad - revert to non-atomic kmap */
260 from = kaddr + offset;
261 left = copyout(buf, from, copy);
266 while (unlikely(!left && bytes)) {
269 copy = min(bytes, iov->iov_len);
270 left = copyout(buf, from, copy);
279 if (skip == iov->iov_len) {
283 i->count -= wanted - bytes;
284 i->nr_segs -= iov - i->iov;
286 i->iov_offset = skip;
287 return wanted - bytes;
290 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
293 size_t skip, copy, left, wanted;
294 const struct iovec *iov;
298 if (unlikely(bytes > i->count))
301 if (unlikely(!bytes))
307 skip = i->iov_offset;
308 buf = iov->iov_base + skip;
309 copy = min(bytes, iov->iov_len - skip);
311 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
312 kaddr = kmap_atomic(page);
315 /* first chunk, usually the only one */
316 left = copyin(to, buf, copy);
322 while (unlikely(!left && bytes)) {
325 copy = min(bytes, iov->iov_len);
326 left = copyin(to, buf, copy);
332 if (likely(!bytes)) {
333 kunmap_atomic(kaddr);
338 kunmap_atomic(kaddr);
339 copy = min(bytes, iov->iov_len - skip);
341 /* Too bad - revert to non-atomic kmap */
345 left = copyin(to, buf, copy);
350 while (unlikely(!left && bytes)) {
353 copy = min(bytes, iov->iov_len);
354 left = copyin(to, buf, copy);
363 if (skip == iov->iov_len) {
367 i->count -= wanted - bytes;
368 i->nr_segs -= iov - i->iov;
370 i->iov_offset = skip;
371 return wanted - bytes;
375 static bool sanity(const struct iov_iter *i)
377 struct pipe_inode_info *pipe = i->pipe;
378 unsigned int p_head = pipe->head;
379 unsigned int p_tail = pipe->tail;
380 unsigned int p_mask = pipe->ring_size - 1;
381 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
382 unsigned int i_head = i->head;
386 struct pipe_buffer *p;
387 if (unlikely(p_occupancy == 0))
388 goto Bad; // pipe must be non-empty
389 if (unlikely(i_head != p_head - 1))
390 goto Bad; // must be at the last buffer...
392 p = &pipe->bufs[i_head & p_mask];
393 if (unlikely(p->offset + p->len != i->iov_offset))
394 goto Bad; // ... at the end of segment
396 if (i_head != p_head)
397 goto Bad; // must be right after the last buffer
401 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
402 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
403 p_head, p_tail, pipe->ring_size);
404 for (idx = 0; idx < pipe->ring_size; idx++)
405 printk(KERN_ERR "[%p %p %d %d]\n",
407 pipe->bufs[idx].page,
408 pipe->bufs[idx].offset,
409 pipe->bufs[idx].len);
414 #define sanity(i) true
417 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
420 struct pipe_inode_info *pipe = i->pipe;
421 struct pipe_buffer *buf;
422 unsigned int p_tail = pipe->tail;
423 unsigned int p_mask = pipe->ring_size - 1;
424 unsigned int i_head = i->head;
427 if (unlikely(bytes > i->count))
430 if (unlikely(!bytes))
437 buf = &pipe->bufs[i_head & p_mask];
439 if (offset == off && buf->page == page) {
440 /* merge with the last one */
442 i->iov_offset += bytes;
446 buf = &pipe->bufs[i_head & p_mask];
448 if (pipe_full(i_head, p_tail, pipe->max_usage))
451 buf->ops = &page_cache_pipe_buf_ops;
454 buf->offset = offset;
457 pipe->head = i_head + 1;
458 i->iov_offset = offset + bytes;
466 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
467 * bytes. For each iovec, fault in each page that constitutes the iovec.
469 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
470 * because it is an invalid address).
472 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
474 size_t skip = i->iov_offset;
475 const struct iovec *iov;
479 if (iter_is_iovec(i)) {
480 iterate_iovec(i, bytes, v, iov, skip, ({
481 err = fault_in_pages_readable(v.iov_base, v.iov_len);
488 EXPORT_SYMBOL(iov_iter_fault_in_readable);
490 void iov_iter_init(struct iov_iter *i, unsigned int direction,
491 const struct iovec *iov, unsigned long nr_segs,
494 WARN_ON(direction & ~(READ | WRITE));
495 direction &= READ | WRITE;
497 /* It will get better. Eventually... */
498 if (uaccess_kernel()) {
499 i->type = ITER_KVEC | direction;
500 i->kvec = (struct kvec *)iov;
502 i->type = ITER_IOVEC | direction;
505 i->nr_segs = nr_segs;
509 EXPORT_SYMBOL(iov_iter_init);
511 static inline bool allocated(struct pipe_buffer *buf)
513 return buf->ops == &default_pipe_buf_ops;
516 static inline void data_start(const struct iov_iter *i,
517 unsigned int *iter_headp, size_t *offp)
519 unsigned int p_mask = i->pipe->ring_size - 1;
520 unsigned int iter_head = i->head;
521 size_t off = i->iov_offset;
523 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
528 *iter_headp = iter_head;
532 static size_t push_pipe(struct iov_iter *i, size_t size,
533 int *iter_headp, size_t *offp)
535 struct pipe_inode_info *pipe = i->pipe;
536 unsigned int p_tail = pipe->tail;
537 unsigned int p_mask = pipe->ring_size - 1;
538 unsigned int iter_head;
542 if (unlikely(size > i->count))
548 data_start(i, &iter_head, &off);
549 *iter_headp = iter_head;
552 left -= PAGE_SIZE - off;
554 pipe->bufs[iter_head & p_mask].len += size;
557 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
560 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
561 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
562 struct page *page = alloc_page(GFP_USER);
566 buf->ops = &default_pipe_buf_ops;
569 buf->len = min_t(ssize_t, left, PAGE_SIZE);
572 pipe->head = iter_head;
580 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
583 struct pipe_inode_info *pipe = i->pipe;
584 unsigned int p_mask = pipe->ring_size - 1;
591 bytes = n = push_pipe(i, bytes, &i_head, &off);
595 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
596 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
598 i->iov_offset = off + chunk;
608 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
609 __wsum sum, size_t off)
611 __wsum next = csum_partial_copy_nocheck(from, to, len);
612 return csum_block_add(sum, next, off);
615 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
616 struct csum_state *csstate,
619 struct pipe_inode_info *pipe = i->pipe;
620 unsigned int p_mask = pipe->ring_size - 1;
621 __wsum sum = csstate->csum;
622 size_t off = csstate->off;
629 bytes = n = push_pipe(i, bytes, &i_head, &r);
633 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
634 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
635 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
638 i->iov_offset = r + chunk;
651 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
653 const char *from = addr;
654 if (unlikely(iov_iter_is_pipe(i)))
655 return copy_pipe_to_iter(addr, bytes, i);
656 if (iter_is_iovec(i))
658 iterate_and_advance(i, bytes, v,
659 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
660 memcpy_to_page(v.bv_page, v.bv_offset,
661 (from += v.bv_len) - v.bv_len, v.bv_len),
662 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
663 memcpy_to_page(v.bv_page, v.bv_offset,
664 (from += v.bv_len) - v.bv_len, v.bv_len)
669 EXPORT_SYMBOL(_copy_to_iter);
671 #ifdef CONFIG_ARCH_HAS_COPY_MC
672 static int copyout_mc(void __user *to, const void *from, size_t n)
674 if (access_ok(to, n)) {
675 instrument_copy_to_user(to, from, n);
676 n = copy_mc_to_user((__force void *) to, from, n);
681 static unsigned long copy_mc_to_page(struct page *page, size_t offset,
682 const char *from, size_t len)
687 to = kmap_atomic(page);
688 ret = copy_mc_to_kernel(to + offset, from, len);
694 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
697 struct pipe_inode_info *pipe = i->pipe;
698 unsigned int p_mask = pipe->ring_size - 1;
700 size_t n, off, xfer = 0;
705 bytes = n = push_pipe(i, bytes, &i_head, &off);
709 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
712 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
715 i->iov_offset = off + chunk - rem;
729 * _copy_mc_to_iter - copy to iter with source memory error exception handling
730 * @addr: source kernel address
731 * @bytes: total transfer length
732 * @iter: destination iterator
734 * The pmem driver deploys this for the dax operation
735 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
736 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
737 * successfully copied.
739 * The main differences between this and typical _copy_to_iter().
741 * * Typical tail/residue handling after a fault retries the copy
742 * byte-by-byte until the fault happens again. Re-triggering machine
743 * checks is potentially fatal so the implementation uses source
744 * alignment and poison alignment assumptions to avoid re-triggering
745 * hardware exceptions.
747 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
748 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
751 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
753 const char *from = addr;
754 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
756 if (unlikely(iov_iter_is_pipe(i)))
757 return copy_mc_pipe_to_iter(addr, bytes, i);
758 if (iter_is_iovec(i))
760 iterate_and_advance(i, bytes, v,
761 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
764 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
765 (from += v.bv_len) - v.bv_len, v.bv_len);
767 curr_addr = (unsigned long) from;
768 bytes = curr_addr - s_addr - rem;
773 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
774 - v.iov_len, v.iov_len);
776 curr_addr = (unsigned long) from;
777 bytes = curr_addr - s_addr - rem;
782 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
783 (from += v.bv_len) - v.bv_len, v.bv_len);
785 curr_addr = (unsigned long) from;
786 bytes = curr_addr - s_addr - rem;
788 i->iov_offset += bytes;
797 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
798 #endif /* CONFIG_ARCH_HAS_COPY_MC */
800 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
803 if (unlikely(iov_iter_is_pipe(i))) {
807 if (iter_is_iovec(i))
809 iterate_and_advance(i, bytes, v,
810 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
811 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
812 v.bv_offset, v.bv_len),
813 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
814 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
815 v.bv_offset, v.bv_len)
820 EXPORT_SYMBOL(_copy_from_iter);
822 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
825 if (unlikely(iov_iter_is_pipe(i))) {
829 iterate_and_advance(i, bytes, v,
830 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
831 v.iov_base, v.iov_len),
832 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
833 v.bv_offset, v.bv_len),
834 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
835 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
836 v.bv_offset, v.bv_len)
841 EXPORT_SYMBOL(_copy_from_iter_nocache);
843 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
845 * _copy_from_iter_flushcache - write destination through cpu cache
846 * @addr: destination kernel address
847 * @bytes: total transfer length
848 * @iter: source iterator
850 * The pmem driver arranges for filesystem-dax to use this facility via
851 * dax_copy_from_iter() for ensuring that writes to persistent memory
852 * are flushed through the CPU cache. It is differentiated from
853 * _copy_from_iter_nocache() in that guarantees all data is flushed for
854 * all iterator types. The _copy_from_iter_nocache() only attempts to
855 * bypass the cache for the ITER_IOVEC case, and on some archs may use
856 * instructions that strand dirty-data in the cache.
858 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
861 if (unlikely(iov_iter_is_pipe(i))) {
865 iterate_and_advance(i, bytes, v,
866 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
867 v.iov_base, v.iov_len),
868 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
869 v.bv_offset, v.bv_len),
870 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
872 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
873 v.bv_offset, v.bv_len)
878 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
881 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
884 size_t v = n + offset;
887 * The general case needs to access the page order in order
888 * to compute the page size.
889 * However, we mostly deal with order-0 pages and thus can
890 * avoid a possible cache line miss for requests that fit all
893 if (n <= v && v <= PAGE_SIZE)
896 head = compound_head(page);
897 v += (page - head) << PAGE_SHIFT;
899 if (likely(n <= v && v <= (page_size(head))))
905 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
908 if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
909 void *kaddr = kmap_atomic(page);
910 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
911 kunmap_atomic(kaddr);
913 } else if (unlikely(iov_iter_is_discard(i))) {
914 if (unlikely(i->count < bytes))
918 } else if (likely(!iov_iter_is_pipe(i)))
919 return copy_page_to_iter_iovec(page, offset, bytes, i);
921 return copy_page_to_iter_pipe(page, offset, bytes, i);
924 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
928 if (unlikely(!page_copy_sane(page, offset, bytes)))
930 page += offset / PAGE_SIZE; // first subpage
933 size_t n = __copy_page_to_iter(page, offset,
934 min(bytes, (size_t)PAGE_SIZE - offset), i);
940 if (offset == PAGE_SIZE) {
947 EXPORT_SYMBOL(copy_page_to_iter);
949 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
952 if (unlikely(!page_copy_sane(page, offset, bytes)))
954 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
958 if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
959 void *kaddr = kmap_atomic(page);
960 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
961 kunmap_atomic(kaddr);
964 return copy_page_from_iter_iovec(page, offset, bytes, i);
966 EXPORT_SYMBOL(copy_page_from_iter);
968 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
970 struct pipe_inode_info *pipe = i->pipe;
971 unsigned int p_mask = pipe->ring_size - 1;
978 bytes = n = push_pipe(i, bytes, &i_head, &off);
983 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
984 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
986 i->iov_offset = off + chunk;
995 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
997 if (unlikely(iov_iter_is_pipe(i)))
998 return pipe_zero(bytes, i);
999 iterate_and_advance(i, bytes, v,
1000 clear_user(v.iov_base, v.iov_len),
1001 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
1002 memset(v.iov_base, 0, v.iov_len),
1003 memzero_page(v.bv_page, v.bv_offset, v.bv_len)
1008 EXPORT_SYMBOL(iov_iter_zero);
1010 size_t iov_iter_copy_from_user_atomic(struct page *page,
1011 struct iov_iter *i, unsigned long offset, size_t bytes)
1013 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
1014 if (unlikely(!page_copy_sane(page, offset, bytes))) {
1015 kunmap_atomic(kaddr);
1018 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1019 kunmap_atomic(kaddr);
1023 iterate_all_kinds(i, bytes, v,
1024 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1025 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1026 v.bv_offset, v.bv_len),
1027 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1028 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1029 v.bv_offset, v.bv_len)
1031 kunmap_atomic(kaddr);
1034 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1036 static inline void pipe_truncate(struct iov_iter *i)
1038 struct pipe_inode_info *pipe = i->pipe;
1039 unsigned int p_tail = pipe->tail;
1040 unsigned int p_head = pipe->head;
1041 unsigned int p_mask = pipe->ring_size - 1;
1043 if (!pipe_empty(p_head, p_tail)) {
1044 struct pipe_buffer *buf;
1045 unsigned int i_head = i->head;
1046 size_t off = i->iov_offset;
1049 buf = &pipe->bufs[i_head & p_mask];
1050 buf->len = off - buf->offset;
1053 while (p_head != i_head) {
1055 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1058 pipe->head = p_head;
1062 static void pipe_advance(struct iov_iter *i, size_t size)
1064 struct pipe_inode_info *pipe = i->pipe;
1066 struct pipe_buffer *buf;
1067 unsigned int p_mask = pipe->ring_size - 1;
1068 unsigned int i_head = i->head;
1069 size_t off = i->iov_offset, left = size;
1071 if (off) /* make it relative to the beginning of buffer */
1072 left += off - pipe->bufs[i_head & p_mask].offset;
1074 buf = &pipe->bufs[i_head & p_mask];
1075 if (left <= buf->len)
1081 i->iov_offset = buf->offset + left;
1084 /* ... and discard everything past that point */
1088 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
1090 struct bvec_iter bi;
1092 bi.bi_size = i->count;
1093 bi.bi_bvec_done = i->iov_offset;
1095 bvec_iter_advance(i->bvec, &bi, size);
1097 i->bvec += bi.bi_idx;
1098 i->nr_segs -= bi.bi_idx;
1099 i->count = bi.bi_size;
1100 i->iov_offset = bi.bi_bvec_done;
1103 void iov_iter_advance(struct iov_iter *i, size_t size)
1105 if (unlikely(i->count < size))
1107 if (unlikely(iov_iter_is_pipe(i))) {
1108 pipe_advance(i, size);
1111 if (unlikely(iov_iter_is_discard(i))) {
1115 if (unlikely(iov_iter_is_xarray(i))) {
1116 i->iov_offset += size;
1120 if (iov_iter_is_bvec(i)) {
1121 iov_iter_bvec_advance(i, size);
1124 iterate_and_advance(i, size, v, 0, 0, 0, 0)
1126 EXPORT_SYMBOL(iov_iter_advance);
1128 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1132 if (WARN_ON(unroll > MAX_RW_COUNT))
1135 if (unlikely(iov_iter_is_pipe(i))) {
1136 struct pipe_inode_info *pipe = i->pipe;
1137 unsigned int p_mask = pipe->ring_size - 1;
1138 unsigned int i_head = i->head;
1139 size_t off = i->iov_offset;
1141 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1142 size_t n = off - b->offset;
1148 if (!unroll && i_head == i->start_head) {
1153 b = &pipe->bufs[i_head & p_mask];
1154 off = b->offset + b->len;
1156 i->iov_offset = off;
1161 if (unlikely(iov_iter_is_discard(i)))
1163 if (unroll <= i->iov_offset) {
1164 i->iov_offset -= unroll;
1167 unroll -= i->iov_offset;
1168 if (iov_iter_is_xarray(i)) {
1169 BUG(); /* We should never go beyond the start of the specified
1170 * range since we might then be straying into pages that
1173 } else if (iov_iter_is_bvec(i)) {
1174 const struct bio_vec *bvec = i->bvec;
1176 size_t n = (--bvec)->bv_len;
1180 i->iov_offset = n - unroll;
1185 } else { /* same logics for iovec and kvec */
1186 const struct iovec *iov = i->iov;
1188 size_t n = (--iov)->iov_len;
1192 i->iov_offset = n - unroll;
1199 EXPORT_SYMBOL(iov_iter_revert);
1202 * Return the count of just the current iov_iter segment.
1204 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1206 if (unlikely(iov_iter_is_pipe(i)))
1207 return i->count; // it is a silly place, anyway
1208 if (i->nr_segs == 1)
1210 if (unlikely(iov_iter_is_discard(i) || iov_iter_is_xarray(i)))
1212 if (iov_iter_is_bvec(i))
1213 return min(i->count, i->bvec->bv_len - i->iov_offset);
1215 return min(i->count, i->iov->iov_len - i->iov_offset);
1217 EXPORT_SYMBOL(iov_iter_single_seg_count);
1219 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1220 const struct kvec *kvec, unsigned long nr_segs,
1223 WARN_ON(direction & ~(READ | WRITE));
1224 i->type = ITER_KVEC | (direction & (READ | WRITE));
1226 i->nr_segs = nr_segs;
1230 EXPORT_SYMBOL(iov_iter_kvec);
1232 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1233 const struct bio_vec *bvec, unsigned long nr_segs,
1236 WARN_ON(direction & ~(READ | WRITE));
1237 i->type = ITER_BVEC | (direction & (READ | WRITE));
1239 i->nr_segs = nr_segs;
1243 EXPORT_SYMBOL(iov_iter_bvec);
1245 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1246 struct pipe_inode_info *pipe,
1249 BUG_ON(direction != READ);
1250 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1251 i->type = ITER_PIPE | READ;
1253 i->head = pipe->head;
1256 i->start_head = i->head;
1258 EXPORT_SYMBOL(iov_iter_pipe);
1261 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1262 * @i: The iterator to initialise.
1263 * @direction: The direction of the transfer.
1264 * @xarray: The xarray to access.
1265 * @start: The start file position.
1266 * @count: The size of the I/O buffer in bytes.
1268 * Set up an I/O iterator to either draw data out of the pages attached to an
1269 * inode or to inject data into those pages. The pages *must* be prevented
1270 * from evaporation, either by taking a ref on them or locking them by the
1273 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1274 struct xarray *xarray, loff_t start, size_t count)
1276 BUG_ON(direction & ~1);
1277 i->type = ITER_XARRAY | (direction & (READ | WRITE));
1279 i->xarray_start = start;
1283 EXPORT_SYMBOL(iov_iter_xarray);
1286 * iov_iter_discard - Initialise an I/O iterator that discards data
1287 * @i: The iterator to initialise.
1288 * @direction: The direction of the transfer.
1289 * @count: The size of the I/O buffer in bytes.
1291 * Set up an I/O iterator that just discards everything that's written to it.
1292 * It's only available as a READ iterator.
1294 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1296 BUG_ON(direction != READ);
1297 i->type = ITER_DISCARD | READ;
1301 EXPORT_SYMBOL(iov_iter_discard);
1303 unsigned long iov_iter_alignment(const struct iov_iter *i)
1305 unsigned long res = 0;
1306 size_t size = i->count;
1308 if (unlikely(iov_iter_is_pipe(i))) {
1309 unsigned int p_mask = i->pipe->ring_size - 1;
1311 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1312 return size | i->iov_offset;
1315 if (unlikely(iov_iter_is_xarray(i)))
1316 return (i->xarray_start + i->iov_offset) | i->count;
1317 iterate_all_kinds(i, size, v,
1318 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1319 res |= v.bv_offset | v.bv_len,
1320 res |= (unsigned long)v.iov_base | v.iov_len,
1321 res |= v.bv_offset | v.bv_len
1325 EXPORT_SYMBOL(iov_iter_alignment);
1327 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1329 unsigned long res = 0;
1330 size_t size = i->count;
1332 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1337 iterate_all_kinds(i, size, v,
1338 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1339 (size != v.iov_len ? size : 0), 0),
1340 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1341 (size != v.bv_len ? size : 0)),
1342 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1343 (size != v.iov_len ? size : 0)),
1344 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1345 (size != v.bv_len ? size : 0))
1349 EXPORT_SYMBOL(iov_iter_gap_alignment);
1351 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1353 struct page **pages,
1357 struct pipe_inode_info *pipe = i->pipe;
1358 unsigned int p_mask = pipe->ring_size - 1;
1359 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1366 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1374 static ssize_t pipe_get_pages(struct iov_iter *i,
1375 struct page **pages, size_t maxsize, unsigned maxpages,
1378 unsigned int iter_head, npages;
1387 data_start(i, &iter_head, start);
1388 /* Amount of free space: some of this one + all after this one */
1389 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1390 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1392 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1395 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1396 pgoff_t index, unsigned int nr_pages)
1398 XA_STATE(xas, xa, index);
1400 unsigned int ret = 0;
1403 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1404 if (xas_retry(&xas, page))
1407 /* Has the page moved or been split? */
1408 if (unlikely(page != xas_reload(&xas))) {
1413 pages[ret] = find_subpage(page, xas.xa_index);
1414 get_page(pages[ret]);
1415 if (++ret == nr_pages)
1422 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1423 struct page **pages, size_t maxsize,
1424 unsigned maxpages, size_t *_start_offset)
1426 unsigned nr, offset;
1427 pgoff_t index, count;
1428 size_t size = maxsize, actual;
1431 if (!size || !maxpages)
1434 pos = i->xarray_start + i->iov_offset;
1435 index = pos >> PAGE_SHIFT;
1436 offset = pos & ~PAGE_MASK;
1437 *_start_offset = offset;
1440 if (size > PAGE_SIZE - offset) {
1441 size -= PAGE_SIZE - offset;
1442 count += size >> PAGE_SHIFT;
1448 if (count > maxpages)
1451 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1455 actual = PAGE_SIZE * nr;
1457 if (nr == count && size > 0) {
1458 unsigned last_offset = (nr > 1) ? 0 : offset;
1459 actual -= PAGE_SIZE - (last_offset + size);
1464 ssize_t iov_iter_get_pages(struct iov_iter *i,
1465 struct page **pages, size_t maxsize, unsigned maxpages,
1468 if (maxsize > i->count)
1471 if (unlikely(iov_iter_is_pipe(i)))
1472 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1473 if (unlikely(iov_iter_is_xarray(i)))
1474 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1475 if (unlikely(iov_iter_is_discard(i)))
1478 iterate_all_kinds(i, maxsize, v, ({
1479 unsigned long addr = (unsigned long)v.iov_base;
1480 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1484 if (len > maxpages * PAGE_SIZE)
1485 len = maxpages * PAGE_SIZE;
1486 addr &= ~(PAGE_SIZE - 1);
1487 n = DIV_ROUND_UP(len, PAGE_SIZE);
1488 res = get_user_pages_fast(addr, n,
1489 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1491 if (unlikely(res < 0))
1493 return (res == n ? len : res * PAGE_SIZE) - *start;
1495 /* can't be more than PAGE_SIZE */
1496 *start = v.bv_offset;
1497 get_page(*pages = v.bv_page);
1506 EXPORT_SYMBOL(iov_iter_get_pages);
1508 static struct page **get_pages_array(size_t n)
1510 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1513 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1514 struct page ***pages, size_t maxsize,
1518 unsigned int iter_head, npages;
1527 data_start(i, &iter_head, start);
1528 /* Amount of free space: some of this one + all after this one */
1529 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1530 n = npages * PAGE_SIZE - *start;
1534 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1535 p = get_pages_array(npages);
1538 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1546 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1547 struct page ***pages, size_t maxsize,
1548 size_t *_start_offset)
1551 unsigned nr, offset;
1552 pgoff_t index, count;
1553 size_t size = maxsize, actual;
1559 pos = i->xarray_start + i->iov_offset;
1560 index = pos >> PAGE_SHIFT;
1561 offset = pos & ~PAGE_MASK;
1562 *_start_offset = offset;
1565 if (size > PAGE_SIZE - offset) {
1566 size -= PAGE_SIZE - offset;
1567 count += size >> PAGE_SHIFT;
1573 p = get_pages_array(count);
1578 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1582 actual = PAGE_SIZE * nr;
1584 if (nr == count && size > 0) {
1585 unsigned last_offset = (nr > 1) ? 0 : offset;
1586 actual -= PAGE_SIZE - (last_offset + size);
1591 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1592 struct page ***pages, size_t maxsize,
1597 if (maxsize > i->count)
1600 if (unlikely(iov_iter_is_pipe(i)))
1601 return pipe_get_pages_alloc(i, pages, maxsize, start);
1602 if (unlikely(iov_iter_is_xarray(i)))
1603 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1604 if (unlikely(iov_iter_is_discard(i)))
1607 iterate_all_kinds(i, maxsize, v, ({
1608 unsigned long addr = (unsigned long)v.iov_base;
1609 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1613 addr &= ~(PAGE_SIZE - 1);
1614 n = DIV_ROUND_UP(len, PAGE_SIZE);
1615 p = get_pages_array(n);
1618 res = get_user_pages_fast(addr, n,
1619 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1620 if (unlikely(res < 0)) {
1625 return (res == n ? len : res * PAGE_SIZE) - *start;
1627 /* can't be more than PAGE_SIZE */
1628 *start = v.bv_offset;
1629 *pages = p = get_pages_array(1);
1632 get_page(*p = v.bv_page);
1640 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1642 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1649 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1653 iterate_and_advance(i, bytes, v, ({
1654 next = csum_and_copy_from_user(v.iov_base,
1655 (to += v.iov_len) - v.iov_len,
1658 sum = csum_block_add(sum, next, off);
1661 next ? 0 : v.iov_len;
1663 char *p = kmap_atomic(v.bv_page);
1664 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1665 p + v.bv_offset, v.bv_len,
1670 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1671 v.iov_base, v.iov_len,
1675 char *p = kmap_atomic(v.bv_page);
1676 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1677 p + v.bv_offset, v.bv_len,
1686 EXPORT_SYMBOL(csum_and_copy_from_iter);
1688 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1691 struct csum_state *csstate = _csstate;
1692 const char *from = addr;
1696 if (unlikely(iov_iter_is_pipe(i)))
1697 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
1699 sum = csstate->csum;
1701 if (unlikely(iov_iter_is_discard(i))) {
1702 WARN_ON(1); /* for now */
1705 iterate_and_advance(i, bytes, v, ({
1706 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1710 sum = csum_block_add(sum, next, off);
1713 next ? 0 : v.iov_len;
1715 char *p = kmap_atomic(v.bv_page);
1716 sum = csum_and_memcpy(p + v.bv_offset,
1717 (from += v.bv_len) - v.bv_len,
1718 v.bv_len, sum, off);
1722 sum = csum_and_memcpy(v.iov_base,
1723 (from += v.iov_len) - v.iov_len,
1724 v.iov_len, sum, off);
1727 char *p = kmap_atomic(v.bv_page);
1728 sum = csum_and_memcpy(p + v.bv_offset,
1729 (from += v.bv_len) - v.bv_len,
1730 v.bv_len, sum, off);
1735 csstate->csum = sum;
1739 EXPORT_SYMBOL(csum_and_copy_to_iter);
1741 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1744 #ifdef CONFIG_CRYPTO_HASH
1745 struct ahash_request *hash = hashp;
1746 struct scatterlist sg;
1749 copied = copy_to_iter(addr, bytes, i);
1750 sg_init_one(&sg, addr, copied);
1751 ahash_request_set_crypt(hash, &sg, NULL, copied);
1752 crypto_ahash_update(hash);
1758 EXPORT_SYMBOL(hash_and_copy_to_iter);
1760 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1762 size_t size = i->count;
1767 if (unlikely(iov_iter_is_discard(i)))
1770 if (unlikely(iov_iter_is_pipe(i))) {
1771 struct pipe_inode_info *pipe = i->pipe;
1772 unsigned int iter_head;
1778 data_start(i, &iter_head, &off);
1779 /* some of this one + all after this one */
1780 npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
1781 if (npages >= maxpages)
1783 } else if (unlikely(iov_iter_is_xarray(i))) {
1786 offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK;
1789 if (size > PAGE_SIZE - offset) {
1790 size -= PAGE_SIZE - offset;
1791 npages += size >> PAGE_SHIFT;
1796 if (npages >= maxpages)
1798 } else iterate_all_kinds(i, size, v, ({
1799 unsigned long p = (unsigned long)v.iov_base;
1800 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1802 if (npages >= maxpages)
1806 if (npages >= maxpages)
1809 unsigned long p = (unsigned long)v.iov_base;
1810 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1812 if (npages >= maxpages)
1819 EXPORT_SYMBOL(iov_iter_npages);
1821 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1824 if (unlikely(iov_iter_is_pipe(new))) {
1828 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1830 if (iov_iter_is_bvec(new))
1831 return new->bvec = kmemdup(new->bvec,
1832 new->nr_segs * sizeof(struct bio_vec),
1835 /* iovec and kvec have identical layout */
1836 return new->iov = kmemdup(new->iov,
1837 new->nr_segs * sizeof(struct iovec),
1840 EXPORT_SYMBOL(dup_iter);
1842 static int copy_compat_iovec_from_user(struct iovec *iov,
1843 const struct iovec __user *uvec, unsigned long nr_segs)
1845 const struct compat_iovec __user *uiov =
1846 (const struct compat_iovec __user *)uvec;
1847 int ret = -EFAULT, i;
1849 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1852 for (i = 0; i < nr_segs; i++) {
1856 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1857 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1859 /* check for compat_size_t not fitting in compat_ssize_t .. */
1864 iov[i].iov_base = compat_ptr(buf);
1865 iov[i].iov_len = len;
1874 static int copy_iovec_from_user(struct iovec *iov,
1875 const struct iovec __user *uvec, unsigned long nr_segs)
1879 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1881 for (seg = 0; seg < nr_segs; seg++) {
1882 if ((ssize_t)iov[seg].iov_len < 0)
1889 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1890 unsigned long nr_segs, unsigned long fast_segs,
1891 struct iovec *fast_iov, bool compat)
1893 struct iovec *iov = fast_iov;
1897 * SuS says "The readv() function *may* fail if the iovcnt argument was
1898 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1899 * traditionally returned zero for zero segments, so...
1903 if (nr_segs > UIO_MAXIOV)
1904 return ERR_PTR(-EINVAL);
1905 if (nr_segs > fast_segs) {
1906 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1908 return ERR_PTR(-ENOMEM);
1912 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1914 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1916 if (iov != fast_iov)
1918 return ERR_PTR(ret);
1924 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1925 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1926 struct iov_iter *i, bool compat)
1928 ssize_t total_len = 0;
1932 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1935 return PTR_ERR(iov);
1939 * According to the Single Unix Specification we should return EINVAL if
1940 * an element length is < 0 when cast to ssize_t or if the total length
1941 * would overflow the ssize_t return value of the system call.
1943 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1946 for (seg = 0; seg < nr_segs; seg++) {
1947 ssize_t len = (ssize_t)iov[seg].iov_len;
1949 if (!access_ok(iov[seg].iov_base, len)) {
1956 if (len > MAX_RW_COUNT - total_len) {
1957 len = MAX_RW_COUNT - total_len;
1958 iov[seg].iov_len = len;
1963 iov_iter_init(i, type, iov, nr_segs, total_len);
1972 * import_iovec() - Copy an array of &struct iovec from userspace
1973 * into the kernel, check that it is valid, and initialize a new
1974 * &struct iov_iter iterator to access it.
1976 * @type: One of %READ or %WRITE.
1977 * @uvec: Pointer to the userspace array.
1978 * @nr_segs: Number of elements in userspace array.
1979 * @fast_segs: Number of elements in @iov.
1980 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1981 * on-stack) kernel array.
1982 * @i: Pointer to iterator that will be initialized on success.
1984 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1985 * then this function places %NULL in *@iov on return. Otherwise, a new
1986 * array will be allocated and the result placed in *@iov. This means that
1987 * the caller may call kfree() on *@iov regardless of whether the small
1988 * on-stack array was used or not (and regardless of whether this function
1989 * returns an error or not).
1991 * Return: Negative error code on error, bytes imported on success
1993 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1994 unsigned nr_segs, unsigned fast_segs,
1995 struct iovec **iovp, struct iov_iter *i)
1997 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1998 in_compat_syscall());
2000 EXPORT_SYMBOL(import_iovec);
2002 int import_single_range(int rw, void __user *buf, size_t len,
2003 struct iovec *iov, struct iov_iter *i)
2005 if (len > MAX_RW_COUNT)
2007 if (unlikely(!access_ok(buf, len)))
2010 iov->iov_base = buf;
2012 iov_iter_init(i, rw, iov, 1, len);
2015 EXPORT_SYMBOL(import_single_range);