1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/splice.h>
11 #include <linux/compat.h>
12 #include <net/checksum.h>
13 #include <linux/scatterlist.h>
14 #include <linux/instrumented.h>
16 #define PIPE_PARANOIA /* for now */
18 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
22 __v.iov_len = min(n, __p->iov_len - skip); \
23 if (likely(__v.iov_len)) { \
24 __v.iov_base = __p->iov_base + skip; \
26 __v.iov_len -= left; \
27 skip += __v.iov_len; \
32 while (unlikely(!left && n)) { \
34 __v.iov_len = min(n, __p->iov_len); \
35 if (unlikely(!__v.iov_len)) \
37 __v.iov_base = __p->iov_base; \
39 __v.iov_len -= left; \
46 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
49 __v.iov_len = min(n, __p->iov_len - skip); \
50 if (likely(__v.iov_len)) { \
51 __v.iov_base = __p->iov_base + skip; \
53 skip += __v.iov_len; \
56 while (unlikely(n)) { \
58 __v.iov_len = min(n, __p->iov_len); \
59 if (unlikely(!__v.iov_len)) \
61 __v.iov_base = __p->iov_base; \
69 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
70 struct bvec_iter __start; \
71 __start.bi_size = n; \
72 __start.bi_bvec_done = skip; \
74 for_each_bvec(__v, i->bvec, __bi, __start) { \
79 #define iterate_xarray(i, n, __v, skip, STEP) { \
80 struct page *head = NULL; \
81 size_t wanted = n, seg, offset; \
82 loff_t start = i->xarray_start + skip; \
83 pgoff_t index = start >> PAGE_SHIFT; \
86 XA_STATE(xas, i->xarray, index); \
89 xas_for_each(&xas, head, ULONG_MAX) { \
90 if (xas_retry(&xas, head)) \
92 if (WARN_ON(xa_is_value(head))) \
94 if (WARN_ON(PageHuge(head))) \
96 for (j = (head->index < index) ? index - head->index : 0; \
97 j < thp_nr_pages(head); j++) { \
98 __v.bv_page = head + j; \
99 offset = (i->xarray_start + skip) & ~PAGE_MASK; \
100 seg = PAGE_SIZE - offset; \
101 __v.bv_offset = offset; \
102 __v.bv_len = min(n, seg); \
105 skip += __v.bv_len; \
116 #define iterate_all_kinds(i, n, v, I, B, K, X) { \
118 size_t skip = i->iov_offset; \
119 if (unlikely(i->type & ITER_BVEC)) { \
121 struct bvec_iter __bi; \
122 iterate_bvec(i, n, v, __bi, skip, (B)) \
123 } else if (unlikely(i->type & ITER_KVEC)) { \
124 const struct kvec *kvec; \
126 iterate_kvec(i, n, v, kvec, skip, (K)) \
127 } else if (unlikely(i->type & ITER_DISCARD)) { \
128 } else if (unlikely(i->type & ITER_XARRAY)) { \
130 iterate_xarray(i, n, v, skip, (X)); \
132 const struct iovec *iov; \
134 iterate_iovec(i, n, v, iov, skip, (I)) \
139 #define iterate_and_advance(i, n, v, I, B, K, X) { \
140 if (unlikely(i->count < n)) \
143 size_t skip = i->iov_offset; \
144 if (unlikely(i->type & ITER_BVEC)) { \
145 const struct bio_vec *bvec = i->bvec; \
147 struct bvec_iter __bi; \
148 iterate_bvec(i, n, v, __bi, skip, (B)) \
149 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
150 i->nr_segs -= i->bvec - bvec; \
151 skip = __bi.bi_bvec_done; \
152 } else if (unlikely(i->type & ITER_KVEC)) { \
153 const struct kvec *kvec; \
155 iterate_kvec(i, n, v, kvec, skip, (K)) \
156 if (skip == kvec->iov_len) { \
160 i->nr_segs -= kvec - i->kvec; \
162 } else if (unlikely(i->type & ITER_DISCARD)) { \
164 } else if (unlikely(i->type & ITER_XARRAY)) { \
166 iterate_xarray(i, n, v, skip, (X)) \
168 const struct iovec *iov; \
170 iterate_iovec(i, n, v, iov, skip, (I)) \
171 if (skip == iov->iov_len) { \
175 i->nr_segs -= iov - i->iov; \
179 i->iov_offset = skip; \
183 static int copyout(void __user *to, const void *from, size_t n)
185 if (should_fail_usercopy())
187 if (access_ok(to, n)) {
188 instrument_copy_to_user(to, from, n);
189 n = raw_copy_to_user(to, from, n);
194 static int copyin(void *to, const void __user *from, size_t n)
196 if (should_fail_usercopy())
198 if (access_ok(from, n)) {
199 instrument_copy_from_user(to, from, n);
200 n = raw_copy_from_user(to, from, n);
205 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
208 size_t skip, copy, left, wanted;
209 const struct iovec *iov;
213 if (unlikely(bytes > i->count))
216 if (unlikely(!bytes))
222 skip = i->iov_offset;
223 buf = iov->iov_base + skip;
224 copy = min(bytes, iov->iov_len - skip);
226 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
227 kaddr = kmap_atomic(page);
228 from = kaddr + offset;
230 /* first chunk, usually the only one */
231 left = copyout(buf, from, copy);
237 while (unlikely(!left && bytes)) {
240 copy = min(bytes, iov->iov_len);
241 left = copyout(buf, from, copy);
247 if (likely(!bytes)) {
248 kunmap_atomic(kaddr);
251 offset = from - kaddr;
253 kunmap_atomic(kaddr);
254 copy = min(bytes, iov->iov_len - skip);
256 /* Too bad - revert to non-atomic kmap */
259 from = kaddr + offset;
260 left = copyout(buf, from, copy);
265 while (unlikely(!left && bytes)) {
268 copy = min(bytes, iov->iov_len);
269 left = copyout(buf, from, copy);
278 if (skip == iov->iov_len) {
282 i->count -= wanted - bytes;
283 i->nr_segs -= iov - i->iov;
285 i->iov_offset = skip;
286 return wanted - bytes;
289 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
292 size_t skip, copy, left, wanted;
293 const struct iovec *iov;
297 if (unlikely(bytes > i->count))
300 if (unlikely(!bytes))
306 skip = i->iov_offset;
307 buf = iov->iov_base + skip;
308 copy = min(bytes, iov->iov_len - skip);
310 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
311 kaddr = kmap_atomic(page);
314 /* first chunk, usually the only one */
315 left = copyin(to, buf, copy);
321 while (unlikely(!left && bytes)) {
324 copy = min(bytes, iov->iov_len);
325 left = copyin(to, buf, copy);
331 if (likely(!bytes)) {
332 kunmap_atomic(kaddr);
337 kunmap_atomic(kaddr);
338 copy = min(bytes, iov->iov_len - skip);
340 /* Too bad - revert to non-atomic kmap */
344 left = copyin(to, buf, copy);
349 while (unlikely(!left && bytes)) {
352 copy = min(bytes, iov->iov_len);
353 left = copyin(to, buf, copy);
362 if (skip == iov->iov_len) {
366 i->count -= wanted - bytes;
367 i->nr_segs -= iov - i->iov;
369 i->iov_offset = skip;
370 return wanted - bytes;
374 static bool sanity(const struct iov_iter *i)
376 struct pipe_inode_info *pipe = i->pipe;
377 unsigned int p_head = pipe->head;
378 unsigned int p_tail = pipe->tail;
379 unsigned int p_mask = pipe->ring_size - 1;
380 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
381 unsigned int i_head = i->head;
385 struct pipe_buffer *p;
386 if (unlikely(p_occupancy == 0))
387 goto Bad; // pipe must be non-empty
388 if (unlikely(i_head != p_head - 1))
389 goto Bad; // must be at the last buffer...
391 p = &pipe->bufs[i_head & p_mask];
392 if (unlikely(p->offset + p->len != i->iov_offset))
393 goto Bad; // ... at the end of segment
395 if (i_head != p_head)
396 goto Bad; // must be right after the last buffer
400 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
401 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
402 p_head, p_tail, pipe->ring_size);
403 for (idx = 0; idx < pipe->ring_size; idx++)
404 printk(KERN_ERR "[%p %p %d %d]\n",
406 pipe->bufs[idx].page,
407 pipe->bufs[idx].offset,
408 pipe->bufs[idx].len);
413 #define sanity(i) true
416 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
419 struct pipe_inode_info *pipe = i->pipe;
420 struct pipe_buffer *buf;
421 unsigned int p_tail = pipe->tail;
422 unsigned int p_mask = pipe->ring_size - 1;
423 unsigned int i_head = i->head;
426 if (unlikely(bytes > i->count))
429 if (unlikely(!bytes))
436 buf = &pipe->bufs[i_head & p_mask];
438 if (offset == off && buf->page == page) {
439 /* merge with the last one */
441 i->iov_offset += bytes;
445 buf = &pipe->bufs[i_head & p_mask];
447 if (pipe_full(i_head, p_tail, pipe->max_usage))
450 buf->ops = &page_cache_pipe_buf_ops;
453 buf->offset = offset;
456 pipe->head = i_head + 1;
457 i->iov_offset = offset + bytes;
465 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
466 * bytes. For each iovec, fault in each page that constitutes the iovec.
468 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
469 * because it is an invalid address).
471 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
473 size_t skip = i->iov_offset;
474 const struct iovec *iov;
478 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
479 iterate_iovec(i, bytes, v, iov, skip, ({
480 err = fault_in_pages_readable(v.iov_base, v.iov_len);
487 EXPORT_SYMBOL(iov_iter_fault_in_readable);
489 void iov_iter_init(struct iov_iter *i, unsigned int direction,
490 const struct iovec *iov, unsigned long nr_segs,
493 WARN_ON(direction & ~(READ | WRITE));
494 direction &= READ | WRITE;
496 /* It will get better. Eventually... */
497 if (uaccess_kernel()) {
498 i->type = ITER_KVEC | direction;
499 i->kvec = (struct kvec *)iov;
501 i->type = ITER_IOVEC | direction;
504 i->nr_segs = nr_segs;
508 EXPORT_SYMBOL(iov_iter_init);
510 static void memzero_page(struct page *page, size_t offset, size_t len)
512 char *addr = kmap_atomic(page);
513 memset(addr + offset, 0, len);
517 static inline bool allocated(struct pipe_buffer *buf)
519 return buf->ops == &default_pipe_buf_ops;
522 static inline void data_start(const struct iov_iter *i,
523 unsigned int *iter_headp, size_t *offp)
525 unsigned int p_mask = i->pipe->ring_size - 1;
526 unsigned int iter_head = i->head;
527 size_t off = i->iov_offset;
529 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
534 *iter_headp = iter_head;
538 static size_t push_pipe(struct iov_iter *i, size_t size,
539 int *iter_headp, size_t *offp)
541 struct pipe_inode_info *pipe = i->pipe;
542 unsigned int p_tail = pipe->tail;
543 unsigned int p_mask = pipe->ring_size - 1;
544 unsigned int iter_head;
548 if (unlikely(size > i->count))
554 data_start(i, &iter_head, &off);
555 *iter_headp = iter_head;
558 left -= PAGE_SIZE - off;
560 pipe->bufs[iter_head & p_mask].len += size;
563 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
566 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
567 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
568 struct page *page = alloc_page(GFP_USER);
572 buf->ops = &default_pipe_buf_ops;
575 buf->len = min_t(ssize_t, left, PAGE_SIZE);
578 pipe->head = iter_head;
586 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
589 struct pipe_inode_info *pipe = i->pipe;
590 unsigned int p_mask = pipe->ring_size - 1;
597 bytes = n = push_pipe(i, bytes, &i_head, &off);
601 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
602 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
604 i->iov_offset = off + chunk;
614 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
615 __wsum sum, size_t off)
617 __wsum next = csum_partial_copy_nocheck(from, to, len);
618 return csum_block_add(sum, next, off);
621 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
622 struct csum_state *csstate,
625 struct pipe_inode_info *pipe = i->pipe;
626 unsigned int p_mask = pipe->ring_size - 1;
627 __wsum sum = csstate->csum;
628 size_t off = csstate->off;
635 bytes = n = push_pipe(i, bytes, &i_head, &r);
639 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
640 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
641 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
644 i->iov_offset = r + chunk;
657 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
659 const char *from = addr;
660 if (unlikely(iov_iter_is_pipe(i)))
661 return copy_pipe_to_iter(addr, bytes, i);
662 if (iter_is_iovec(i))
664 iterate_and_advance(i, bytes, v,
665 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
666 memcpy_to_page(v.bv_page, v.bv_offset,
667 (from += v.bv_len) - v.bv_len, v.bv_len),
668 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
669 memcpy_to_page(v.bv_page, v.bv_offset,
670 (from += v.bv_len) - v.bv_len, v.bv_len)
675 EXPORT_SYMBOL(_copy_to_iter);
677 #ifdef CONFIG_ARCH_HAS_COPY_MC
678 static int copyout_mc(void __user *to, const void *from, size_t n)
680 if (access_ok(to, n)) {
681 instrument_copy_to_user(to, from, n);
682 n = copy_mc_to_user((__force void *) to, from, n);
687 static unsigned long copy_mc_to_page(struct page *page, size_t offset,
688 const char *from, size_t len)
693 to = kmap_atomic(page);
694 ret = copy_mc_to_kernel(to + offset, from, len);
700 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
703 struct pipe_inode_info *pipe = i->pipe;
704 unsigned int p_mask = pipe->ring_size - 1;
706 size_t n, off, xfer = 0;
711 bytes = n = push_pipe(i, bytes, &i_head, &off);
715 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
718 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
721 i->iov_offset = off + chunk - rem;
735 * _copy_mc_to_iter - copy to iter with source memory error exception handling
736 * @addr: source kernel address
737 * @bytes: total transfer length
738 * @iter: destination iterator
740 * The pmem driver deploys this for the dax operation
741 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
742 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
743 * successfully copied.
745 * The main differences between this and typical _copy_to_iter().
747 * * Typical tail/residue handling after a fault retries the copy
748 * byte-by-byte until the fault happens again. Re-triggering machine
749 * checks is potentially fatal so the implementation uses source
750 * alignment and poison alignment assumptions to avoid re-triggering
751 * hardware exceptions.
753 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
754 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
757 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
759 const char *from = addr;
760 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
762 if (unlikely(iov_iter_is_pipe(i)))
763 return copy_mc_pipe_to_iter(addr, bytes, i);
764 if (iter_is_iovec(i))
766 iterate_and_advance(i, bytes, v,
767 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
770 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
771 (from += v.bv_len) - v.bv_len, v.bv_len);
773 curr_addr = (unsigned long) from;
774 bytes = curr_addr - s_addr - rem;
779 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
780 - v.iov_len, v.iov_len);
782 curr_addr = (unsigned long) from;
783 bytes = curr_addr - s_addr - rem;
788 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
789 (from += v.bv_len) - v.bv_len, v.bv_len);
791 curr_addr = (unsigned long) from;
792 bytes = curr_addr - s_addr - rem;
794 i->iov_offset += bytes;
803 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
804 #endif /* CONFIG_ARCH_HAS_COPY_MC */
806 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
809 if (unlikely(iov_iter_is_pipe(i))) {
813 if (iter_is_iovec(i))
815 iterate_and_advance(i, bytes, v,
816 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
817 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
818 v.bv_offset, v.bv_len),
819 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
820 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
821 v.bv_offset, v.bv_len)
826 EXPORT_SYMBOL(_copy_from_iter);
828 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
831 if (unlikely(iov_iter_is_pipe(i))) {
835 if (unlikely(i->count < bytes))
838 if (iter_is_iovec(i))
840 iterate_all_kinds(i, bytes, v, ({
841 if (copyin((to += v.iov_len) - v.iov_len,
842 v.iov_base, v.iov_len))
845 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
846 v.bv_offset, v.bv_len),
847 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
848 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
849 v.bv_offset, v.bv_len)
852 iov_iter_advance(i, bytes);
855 EXPORT_SYMBOL(_copy_from_iter_full);
857 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
860 if (unlikely(iov_iter_is_pipe(i))) {
864 iterate_and_advance(i, bytes, v,
865 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
866 v.iov_base, v.iov_len),
867 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
868 v.bv_offset, v.bv_len),
869 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
870 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
871 v.bv_offset, v.bv_len)
876 EXPORT_SYMBOL(_copy_from_iter_nocache);
878 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
880 * _copy_from_iter_flushcache - write destination through cpu cache
881 * @addr: destination kernel address
882 * @bytes: total transfer length
883 * @iter: source iterator
885 * The pmem driver arranges for filesystem-dax to use this facility via
886 * dax_copy_from_iter() for ensuring that writes to persistent memory
887 * are flushed through the CPU cache. It is differentiated from
888 * _copy_from_iter_nocache() in that guarantees all data is flushed for
889 * all iterator types. The _copy_from_iter_nocache() only attempts to
890 * bypass the cache for the ITER_IOVEC case, and on some archs may use
891 * instructions that strand dirty-data in the cache.
893 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
896 if (unlikely(iov_iter_is_pipe(i))) {
900 iterate_and_advance(i, bytes, v,
901 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
902 v.iov_base, v.iov_len),
903 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
904 v.bv_offset, v.bv_len),
905 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
907 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
908 v.bv_offset, v.bv_len)
913 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
916 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
919 if (unlikely(iov_iter_is_pipe(i))) {
923 if (unlikely(i->count < bytes))
925 iterate_all_kinds(i, bytes, v, ({
926 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
927 v.iov_base, v.iov_len))
930 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
931 v.bv_offset, v.bv_len),
932 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
933 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
934 v.bv_offset, v.bv_len)
937 iov_iter_advance(i, bytes);
940 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
942 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
945 size_t v = n + offset;
948 * The general case needs to access the page order in order
949 * to compute the page size.
950 * However, we mostly deal with order-0 pages and thus can
951 * avoid a possible cache line miss for requests that fit all
954 if (n <= v && v <= PAGE_SIZE)
957 head = compound_head(page);
958 v += (page - head) << PAGE_SHIFT;
960 if (likely(n <= v && v <= (page_size(head))))
966 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
969 if (unlikely(!page_copy_sane(page, offset, bytes)))
971 if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
972 void *kaddr = kmap_atomic(page);
973 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
974 kunmap_atomic(kaddr);
976 } else if (unlikely(iov_iter_is_discard(i)))
978 else if (likely(!iov_iter_is_pipe(i)))
979 return copy_page_to_iter_iovec(page, offset, bytes, i);
981 return copy_page_to_iter_pipe(page, offset, bytes, i);
983 EXPORT_SYMBOL(copy_page_to_iter);
985 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
988 if (unlikely(!page_copy_sane(page, offset, bytes)))
990 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
994 if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
995 void *kaddr = kmap_atomic(page);
996 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
997 kunmap_atomic(kaddr);
1000 return copy_page_from_iter_iovec(page, offset, bytes, i);
1002 EXPORT_SYMBOL(copy_page_from_iter);
1004 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
1006 struct pipe_inode_info *pipe = i->pipe;
1007 unsigned int p_mask = pipe->ring_size - 1;
1008 unsigned int i_head;
1014 bytes = n = push_pipe(i, bytes, &i_head, &off);
1019 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
1020 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
1022 i->iov_offset = off + chunk;
1031 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
1033 if (unlikely(iov_iter_is_pipe(i)))
1034 return pipe_zero(bytes, i);
1035 iterate_and_advance(i, bytes, v,
1036 clear_user(v.iov_base, v.iov_len),
1037 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
1038 memset(v.iov_base, 0, v.iov_len),
1039 memzero_page(v.bv_page, v.bv_offset, v.bv_len)
1044 EXPORT_SYMBOL(iov_iter_zero);
1046 size_t iov_iter_copy_from_user_atomic(struct page *page,
1047 struct iov_iter *i, unsigned long offset, size_t bytes)
1049 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
1050 if (unlikely(!page_copy_sane(page, offset, bytes))) {
1051 kunmap_atomic(kaddr);
1054 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1055 kunmap_atomic(kaddr);
1059 iterate_all_kinds(i, bytes, v,
1060 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1061 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1062 v.bv_offset, v.bv_len),
1063 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1064 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1065 v.bv_offset, v.bv_len)
1067 kunmap_atomic(kaddr);
1070 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1072 static inline void pipe_truncate(struct iov_iter *i)
1074 struct pipe_inode_info *pipe = i->pipe;
1075 unsigned int p_tail = pipe->tail;
1076 unsigned int p_head = pipe->head;
1077 unsigned int p_mask = pipe->ring_size - 1;
1079 if (!pipe_empty(p_head, p_tail)) {
1080 struct pipe_buffer *buf;
1081 unsigned int i_head = i->head;
1082 size_t off = i->iov_offset;
1085 buf = &pipe->bufs[i_head & p_mask];
1086 buf->len = off - buf->offset;
1089 while (p_head != i_head) {
1091 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1094 pipe->head = p_head;
1098 static void pipe_advance(struct iov_iter *i, size_t size)
1100 struct pipe_inode_info *pipe = i->pipe;
1101 if (unlikely(i->count < size))
1104 struct pipe_buffer *buf;
1105 unsigned int p_mask = pipe->ring_size - 1;
1106 unsigned int i_head = i->head;
1107 size_t off = i->iov_offset, left = size;
1109 if (off) /* make it relative to the beginning of buffer */
1110 left += off - pipe->bufs[i_head & p_mask].offset;
1112 buf = &pipe->bufs[i_head & p_mask];
1113 if (left <= buf->len)
1119 i->iov_offset = buf->offset + left;
1122 /* ... and discard everything past that point */
1126 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
1128 struct bvec_iter bi;
1130 bi.bi_size = i->count;
1131 bi.bi_bvec_done = i->iov_offset;
1133 bvec_iter_advance(i->bvec, &bi, size);
1135 i->bvec += bi.bi_idx;
1136 i->nr_segs -= bi.bi_idx;
1137 i->count = bi.bi_size;
1138 i->iov_offset = bi.bi_bvec_done;
1141 void iov_iter_advance(struct iov_iter *i, size_t size)
1143 if (unlikely(iov_iter_is_pipe(i))) {
1144 pipe_advance(i, size);
1147 if (unlikely(iov_iter_is_discard(i))) {
1151 if (unlikely(iov_iter_is_xarray(i))) {
1152 size = min(size, i->count);
1153 i->iov_offset += size;
1157 if (iov_iter_is_bvec(i)) {
1158 iov_iter_bvec_advance(i, size);
1161 iterate_and_advance(i, size, v, 0, 0, 0, 0)
1163 EXPORT_SYMBOL(iov_iter_advance);
1165 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1169 if (WARN_ON(unroll > MAX_RW_COUNT))
1172 if (unlikely(iov_iter_is_pipe(i))) {
1173 struct pipe_inode_info *pipe = i->pipe;
1174 unsigned int p_mask = pipe->ring_size - 1;
1175 unsigned int i_head = i->head;
1176 size_t off = i->iov_offset;
1178 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1179 size_t n = off - b->offset;
1185 if (!unroll && i_head == i->start_head) {
1190 b = &pipe->bufs[i_head & p_mask];
1191 off = b->offset + b->len;
1193 i->iov_offset = off;
1198 if (unlikely(iov_iter_is_discard(i)))
1200 if (unroll <= i->iov_offset) {
1201 i->iov_offset -= unroll;
1204 unroll -= i->iov_offset;
1205 if (iov_iter_is_xarray(i)) {
1206 BUG(); /* We should never go beyond the start of the specified
1207 * range since we might then be straying into pages that
1210 } else if (iov_iter_is_bvec(i)) {
1211 const struct bio_vec *bvec = i->bvec;
1213 size_t n = (--bvec)->bv_len;
1217 i->iov_offset = n - unroll;
1222 } else { /* same logics for iovec and kvec */
1223 const struct iovec *iov = i->iov;
1225 size_t n = (--iov)->iov_len;
1229 i->iov_offset = n - unroll;
1236 EXPORT_SYMBOL(iov_iter_revert);
1239 * Return the count of just the current iov_iter segment.
1241 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1243 if (unlikely(iov_iter_is_pipe(i)))
1244 return i->count; // it is a silly place, anyway
1245 if (i->nr_segs == 1)
1247 if (unlikely(iov_iter_is_discard(i) || iov_iter_is_xarray(i)))
1249 if (iov_iter_is_bvec(i))
1250 return min(i->count, i->bvec->bv_len - i->iov_offset);
1252 return min(i->count, i->iov->iov_len - i->iov_offset);
1254 EXPORT_SYMBOL(iov_iter_single_seg_count);
1256 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1257 const struct kvec *kvec, unsigned long nr_segs,
1260 WARN_ON(direction & ~(READ | WRITE));
1261 i->type = ITER_KVEC | (direction & (READ | WRITE));
1263 i->nr_segs = nr_segs;
1267 EXPORT_SYMBOL(iov_iter_kvec);
1269 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1270 const struct bio_vec *bvec, unsigned long nr_segs,
1273 WARN_ON(direction & ~(READ | WRITE));
1274 i->type = ITER_BVEC | (direction & (READ | WRITE));
1276 i->nr_segs = nr_segs;
1280 EXPORT_SYMBOL(iov_iter_bvec);
1282 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1283 struct pipe_inode_info *pipe,
1286 BUG_ON(direction != READ);
1287 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1288 i->type = ITER_PIPE | READ;
1290 i->head = pipe->head;
1293 i->start_head = i->head;
1295 EXPORT_SYMBOL(iov_iter_pipe);
1298 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1299 * @i: The iterator to initialise.
1300 * @direction: The direction of the transfer.
1301 * @xarray: The xarray to access.
1302 * @start: The start file position.
1303 * @count: The size of the I/O buffer in bytes.
1305 * Set up an I/O iterator to either draw data out of the pages attached to an
1306 * inode or to inject data into those pages. The pages *must* be prevented
1307 * from evaporation, either by taking a ref on them or locking them by the
1310 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1311 struct xarray *xarray, loff_t start, size_t count)
1313 BUG_ON(direction & ~1);
1314 i->type = ITER_XARRAY | (direction & (READ | WRITE));
1316 i->xarray_start = start;
1320 EXPORT_SYMBOL(iov_iter_xarray);
1323 * iov_iter_discard - Initialise an I/O iterator that discards data
1324 * @i: The iterator to initialise.
1325 * @direction: The direction of the transfer.
1326 * @count: The size of the I/O buffer in bytes.
1328 * Set up an I/O iterator that just discards everything that's written to it.
1329 * It's only available as a READ iterator.
1331 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1333 BUG_ON(direction != READ);
1334 i->type = ITER_DISCARD | READ;
1338 EXPORT_SYMBOL(iov_iter_discard);
1340 unsigned long iov_iter_alignment(const struct iov_iter *i)
1342 unsigned long res = 0;
1343 size_t size = i->count;
1345 if (unlikely(iov_iter_is_pipe(i))) {
1346 unsigned int p_mask = i->pipe->ring_size - 1;
1348 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1349 return size | i->iov_offset;
1352 if (unlikely(iov_iter_is_xarray(i)))
1353 return (i->xarray_start + i->iov_offset) | i->count;
1354 iterate_all_kinds(i, size, v,
1355 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1356 res |= v.bv_offset | v.bv_len,
1357 res |= (unsigned long)v.iov_base | v.iov_len,
1358 res |= v.bv_offset | v.bv_len
1362 EXPORT_SYMBOL(iov_iter_alignment);
1364 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1366 unsigned long res = 0;
1367 size_t size = i->count;
1369 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1374 iterate_all_kinds(i, size, v,
1375 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1376 (size != v.iov_len ? size : 0), 0),
1377 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1378 (size != v.bv_len ? size : 0)),
1379 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1380 (size != v.iov_len ? size : 0)),
1381 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1382 (size != v.bv_len ? size : 0))
1386 EXPORT_SYMBOL(iov_iter_gap_alignment);
1388 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1390 struct page **pages,
1394 struct pipe_inode_info *pipe = i->pipe;
1395 unsigned int p_mask = pipe->ring_size - 1;
1396 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1403 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1411 static ssize_t pipe_get_pages(struct iov_iter *i,
1412 struct page **pages, size_t maxsize, unsigned maxpages,
1415 unsigned int iter_head, npages;
1424 data_start(i, &iter_head, start);
1425 /* Amount of free space: some of this one + all after this one */
1426 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1427 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1429 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1432 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1433 pgoff_t index, unsigned int nr_pages)
1435 XA_STATE(xas, xa, index);
1437 unsigned int ret = 0;
1440 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1441 if (xas_retry(&xas, page))
1444 /* Has the page moved or been split? */
1445 if (unlikely(page != xas_reload(&xas))) {
1450 pages[ret] = find_subpage(page, xas.xa_index);
1451 get_page(pages[ret]);
1452 if (++ret == nr_pages)
1459 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1460 struct page **pages, size_t maxsize,
1461 unsigned maxpages, size_t *_start_offset)
1463 unsigned nr, offset;
1464 pgoff_t index, count;
1465 size_t size = maxsize, actual;
1468 if (!size || !maxpages)
1471 pos = i->xarray_start + i->iov_offset;
1472 index = pos >> PAGE_SHIFT;
1473 offset = pos & ~PAGE_MASK;
1474 *_start_offset = offset;
1477 if (size > PAGE_SIZE - offset) {
1478 size -= PAGE_SIZE - offset;
1479 count += size >> PAGE_SHIFT;
1485 if (count > maxpages)
1488 nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1492 actual = PAGE_SIZE * nr;
1494 if (nr == count && size > 0) {
1495 unsigned last_offset = (nr > 1) ? 0 : offset;
1496 actual -= PAGE_SIZE - (last_offset + size);
1501 ssize_t iov_iter_get_pages(struct iov_iter *i,
1502 struct page **pages, size_t maxsize, unsigned maxpages,
1505 if (maxsize > i->count)
1508 if (unlikely(iov_iter_is_pipe(i)))
1509 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1510 if (unlikely(iov_iter_is_xarray(i)))
1511 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1512 if (unlikely(iov_iter_is_discard(i)))
1515 iterate_all_kinds(i, maxsize, v, ({
1516 unsigned long addr = (unsigned long)v.iov_base;
1517 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1521 if (len > maxpages * PAGE_SIZE)
1522 len = maxpages * PAGE_SIZE;
1523 addr &= ~(PAGE_SIZE - 1);
1524 n = DIV_ROUND_UP(len, PAGE_SIZE);
1525 res = get_user_pages_fast(addr, n,
1526 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1528 if (unlikely(res < 0))
1530 return (res == n ? len : res * PAGE_SIZE) - *start;
1532 /* can't be more than PAGE_SIZE */
1533 *start = v.bv_offset;
1534 get_page(*pages = v.bv_page);
1543 EXPORT_SYMBOL(iov_iter_get_pages);
1545 static struct page **get_pages_array(size_t n)
1547 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1550 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1551 struct page ***pages, size_t maxsize,
1555 unsigned int iter_head, npages;
1564 data_start(i, &iter_head, start);
1565 /* Amount of free space: some of this one + all after this one */
1566 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1567 n = npages * PAGE_SIZE - *start;
1571 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1572 p = get_pages_array(npages);
1575 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1583 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1584 struct page ***pages, size_t maxsize,
1585 size_t *_start_offset)
1588 unsigned nr, offset;
1589 pgoff_t index, count;
1590 size_t size = maxsize, actual;
1596 pos = i->xarray_start + i->iov_offset;
1597 index = pos >> PAGE_SHIFT;
1598 offset = pos & ~PAGE_MASK;
1599 *_start_offset = offset;
1602 if (size > PAGE_SIZE - offset) {
1603 size -= PAGE_SIZE - offset;
1604 count += size >> PAGE_SHIFT;
1610 p = get_pages_array(count);
1615 nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1619 actual = PAGE_SIZE * nr;
1621 if (nr == count && size > 0) {
1622 unsigned last_offset = (nr > 1) ? 0 : offset;
1623 actual -= PAGE_SIZE - (last_offset + size);
1628 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1629 struct page ***pages, size_t maxsize,
1634 if (maxsize > i->count)
1637 if (unlikely(iov_iter_is_pipe(i)))
1638 return pipe_get_pages_alloc(i, pages, maxsize, start);
1639 if (unlikely(iov_iter_is_xarray(i)))
1640 return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1641 if (unlikely(iov_iter_is_discard(i)))
1644 iterate_all_kinds(i, maxsize, v, ({
1645 unsigned long addr = (unsigned long)v.iov_base;
1646 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1650 addr &= ~(PAGE_SIZE - 1);
1651 n = DIV_ROUND_UP(len, PAGE_SIZE);
1652 p = get_pages_array(n);
1655 res = get_user_pages_fast(addr, n,
1656 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1657 if (unlikely(res < 0)) {
1662 return (res == n ? len : res * PAGE_SIZE) - *start;
1664 /* can't be more than PAGE_SIZE */
1665 *start = v.bv_offset;
1666 *pages = p = get_pages_array(1);
1669 get_page(*p = v.bv_page);
1677 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1679 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1686 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1690 iterate_and_advance(i, bytes, v, ({
1691 next = csum_and_copy_from_user(v.iov_base,
1692 (to += v.iov_len) - v.iov_len,
1695 sum = csum_block_add(sum, next, off);
1698 next ? 0 : v.iov_len;
1700 char *p = kmap_atomic(v.bv_page);
1701 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1702 p + v.bv_offset, v.bv_len,
1707 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1708 v.iov_base, v.iov_len,
1712 char *p = kmap_atomic(v.bv_page);
1713 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1714 p + v.bv_offset, v.bv_len,
1723 EXPORT_SYMBOL(csum_and_copy_from_iter);
1725 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1732 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1736 if (unlikely(i->count < bytes))
1738 iterate_all_kinds(i, bytes, v, ({
1739 next = csum_and_copy_from_user(v.iov_base,
1740 (to += v.iov_len) - v.iov_len,
1744 sum = csum_block_add(sum, next, off);
1748 char *p = kmap_atomic(v.bv_page);
1749 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1750 p + v.bv_offset, v.bv_len,
1755 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1756 v.iov_base, v.iov_len,
1760 char *p = kmap_atomic(v.bv_page);
1761 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1762 p + v.bv_offset, v.bv_len,
1769 iov_iter_advance(i, bytes);
1772 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1774 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1777 struct csum_state *csstate = _csstate;
1778 const char *from = addr;
1782 if (unlikely(iov_iter_is_pipe(i)))
1783 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
1785 sum = csstate->csum;
1787 if (unlikely(iov_iter_is_discard(i))) {
1788 WARN_ON(1); /* for now */
1791 iterate_and_advance(i, bytes, v, ({
1792 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1796 sum = csum_block_add(sum, next, off);
1799 next ? 0 : v.iov_len;
1801 char *p = kmap_atomic(v.bv_page);
1802 sum = csum_and_memcpy(p + v.bv_offset,
1803 (from += v.bv_len) - v.bv_len,
1804 v.bv_len, sum, off);
1808 sum = csum_and_memcpy(v.iov_base,
1809 (from += v.iov_len) - v.iov_len,
1810 v.iov_len, sum, off);
1813 char *p = kmap_atomic(v.bv_page);
1814 sum = csum_and_memcpy(p + v.bv_offset,
1815 (from += v.bv_len) - v.bv_len,
1816 v.bv_len, sum, off);
1821 csstate->csum = sum;
1825 EXPORT_SYMBOL(csum_and_copy_to_iter);
1827 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1830 #ifdef CONFIG_CRYPTO_HASH
1831 struct ahash_request *hash = hashp;
1832 struct scatterlist sg;
1835 copied = copy_to_iter(addr, bytes, i);
1836 sg_init_one(&sg, addr, copied);
1837 ahash_request_set_crypt(hash, &sg, NULL, copied);
1838 crypto_ahash_update(hash);
1844 EXPORT_SYMBOL(hash_and_copy_to_iter);
1846 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1848 size_t size = i->count;
1853 if (unlikely(iov_iter_is_discard(i)))
1856 if (unlikely(iov_iter_is_pipe(i))) {
1857 struct pipe_inode_info *pipe = i->pipe;
1858 unsigned int iter_head;
1864 data_start(i, &iter_head, &off);
1865 /* some of this one + all after this one */
1866 npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
1867 if (npages >= maxpages)
1869 } else if (unlikely(iov_iter_is_xarray(i))) {
1872 offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK;
1875 if (size > PAGE_SIZE - offset) {
1876 size -= PAGE_SIZE - offset;
1877 npages += size >> PAGE_SHIFT;
1882 if (npages >= maxpages)
1884 } else iterate_all_kinds(i, size, v, ({
1885 unsigned long p = (unsigned long)v.iov_base;
1886 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1888 if (npages >= maxpages)
1892 if (npages >= maxpages)
1895 unsigned long p = (unsigned long)v.iov_base;
1896 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1898 if (npages >= maxpages)
1905 EXPORT_SYMBOL(iov_iter_npages);
1907 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1910 if (unlikely(iov_iter_is_pipe(new))) {
1914 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1916 if (iov_iter_is_bvec(new))
1917 return new->bvec = kmemdup(new->bvec,
1918 new->nr_segs * sizeof(struct bio_vec),
1921 /* iovec and kvec have identical layout */
1922 return new->iov = kmemdup(new->iov,
1923 new->nr_segs * sizeof(struct iovec),
1926 EXPORT_SYMBOL(dup_iter);
1928 static int copy_compat_iovec_from_user(struct iovec *iov,
1929 const struct iovec __user *uvec, unsigned long nr_segs)
1931 const struct compat_iovec __user *uiov =
1932 (const struct compat_iovec __user *)uvec;
1933 int ret = -EFAULT, i;
1935 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1938 for (i = 0; i < nr_segs; i++) {
1942 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1943 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1945 /* check for compat_size_t not fitting in compat_ssize_t .. */
1950 iov[i].iov_base = compat_ptr(buf);
1951 iov[i].iov_len = len;
1960 static int copy_iovec_from_user(struct iovec *iov,
1961 const struct iovec __user *uvec, unsigned long nr_segs)
1965 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1967 for (seg = 0; seg < nr_segs; seg++) {
1968 if ((ssize_t)iov[seg].iov_len < 0)
1975 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1976 unsigned long nr_segs, unsigned long fast_segs,
1977 struct iovec *fast_iov, bool compat)
1979 struct iovec *iov = fast_iov;
1983 * SuS says "The readv() function *may* fail if the iovcnt argument was
1984 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1985 * traditionally returned zero for zero segments, so...
1989 if (nr_segs > UIO_MAXIOV)
1990 return ERR_PTR(-EINVAL);
1991 if (nr_segs > fast_segs) {
1992 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1994 return ERR_PTR(-ENOMEM);
1998 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
2000 ret = copy_iovec_from_user(iov, uvec, nr_segs);
2002 if (iov != fast_iov)
2004 return ERR_PTR(ret);
2010 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
2011 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
2012 struct iov_iter *i, bool compat)
2014 ssize_t total_len = 0;
2018 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
2021 return PTR_ERR(iov);
2025 * According to the Single Unix Specification we should return EINVAL if
2026 * an element length is < 0 when cast to ssize_t or if the total length
2027 * would overflow the ssize_t return value of the system call.
2029 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
2032 for (seg = 0; seg < nr_segs; seg++) {
2033 ssize_t len = (ssize_t)iov[seg].iov_len;
2035 if (!access_ok(iov[seg].iov_base, len)) {
2042 if (len > MAX_RW_COUNT - total_len) {
2043 len = MAX_RW_COUNT - total_len;
2044 iov[seg].iov_len = len;
2049 iov_iter_init(i, type, iov, nr_segs, total_len);
2058 * import_iovec() - Copy an array of &struct iovec from userspace
2059 * into the kernel, check that it is valid, and initialize a new
2060 * &struct iov_iter iterator to access it.
2062 * @type: One of %READ or %WRITE.
2063 * @uvec: Pointer to the userspace array.
2064 * @nr_segs: Number of elements in userspace array.
2065 * @fast_segs: Number of elements in @iov.
2066 * @iovp: (input and output parameter) Pointer to pointer to (usually small
2067 * on-stack) kernel array.
2068 * @i: Pointer to iterator that will be initialized on success.
2070 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
2071 * then this function places %NULL in *@iov on return. Otherwise, a new
2072 * array will be allocated and the result placed in *@iov. This means that
2073 * the caller may call kfree() on *@iov regardless of whether the small
2074 * on-stack array was used or not (and regardless of whether this function
2075 * returns an error or not).
2077 * Return: Negative error code on error, bytes imported on success
2079 ssize_t import_iovec(int type, const struct iovec __user *uvec,
2080 unsigned nr_segs, unsigned fast_segs,
2081 struct iovec **iovp, struct iov_iter *i)
2083 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
2084 in_compat_syscall());
2086 EXPORT_SYMBOL(import_iovec);
2088 int import_single_range(int rw, void __user *buf, size_t len,
2089 struct iovec *iov, struct iov_iter *i)
2091 if (len > MAX_RW_COUNT)
2093 if (unlikely(!access_ok(buf, len)))
2096 iov->iov_base = buf;
2098 iov_iter_init(i, rw, iov, 1, len);
2101 EXPORT_SYMBOL(import_single_range);
2103 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
2104 int (*f)(struct kvec *vec, void *context),
2112 iterate_all_kinds(i, bytes, v, -EINVAL, ({
2113 w.iov_base = kmap(v.bv_page) + v.bv_offset;
2114 w.iov_len = v.bv_len;
2115 err = f(&w, context);
2119 err = f(&w, context);}), ({
2120 w.iov_base = kmap(v.bv_page) + v.bv_offset;
2121 w.iov_len = v.bv_len;
2122 err = f(&w, context);
2128 EXPORT_SYMBOL(iov_iter_for_each_range);